summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-07-20 08:17:34 +1000
committerBen Skeggs <bskeggs@redhat.com>2012-10-03 13:12:56 +1000
commitebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69 (patch)
tree07cad59be501458e6ae1304b7c0352e322ac3387 /drivers/gpu/drm
parentac1499d9573f4aadd1d2beac11fe23af8ce90c24 (diff)
downloadlinux-stable-ebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69.tar.gz
linux-stable-ebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69.tar.bz2
linux-stable-ebb945a94bba2ce8dff7b0942ff2b3f2a52a0a69.zip
drm/nouveau: port all engines to new engine module format
This is a HUGE commit, but it's not nearly as bad as it looks - any problems can be isolated to a particular chipset and engine combination. It was simply too difficult to port each one at a time, the compat layers are *already* ridiculous. Most of the changes here are simply to the glue, the process for each of the engine modules was to start with a standard skeleton and copy+paste the old code into the appropriate places, fixing up variable names etc as needed. v2: Marcin Slusarz <marcin.slusarz@gmail.com> - fix find/replace bug in license header v3: Ben Skeggs <bskeggs@redhat.com> - bump indirect pushbuf size to 8KiB, 4KiB barely enough for userspace and left no space for kernel's requirements during GEM pushbuf submission. - fix duplicate assignments noticed by clang v4: Marcin Slusarz <marcin.slusarz@gmail.com> - add sparse annotations to nv04_fifo_pause/nv04_fifo_start - use ioread32_native/iowrite32_native for fifo control registers v5: Ben Skeggs <bskeggs@redhat.com> - rebase on v3.6-rc4, modified to keep copy engine fix intact - nv10/fence: unmap fence bo before destroying - fixed fermi regression when using nvidia gr fuc - fixed typo in supported dma_mask checking Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/nouveau/Makefile99
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c297
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c268
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c350
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c252
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c255
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c125
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c118
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c176
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c165
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c736
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h178
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c228
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c394
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c554
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c490
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c604
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c594
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctx.h26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c559
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c202
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c1325
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c1053
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c1072
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv25.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c134
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv30.c238
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv34.c168
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv35.c166
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c681
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c907
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.h7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c1074
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c843
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h269
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c448
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c305
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c198
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c180
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c157
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/ramht.h56
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h47
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h46
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h57
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h123
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/graph.h72
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/mpeg.h61
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h58
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h45
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/instmem.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c136
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c421
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.h32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c347
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c387
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c408
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_compat.h14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c195
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c56
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h51
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c211
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c86
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h407
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c225
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c170
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpuobj.c518
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c140
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c151
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_revcompat.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_revcompat.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c68
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_software.h60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c391
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c163
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_util.h35
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c37
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c139
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c63
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c55
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c174
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c75
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_software.c181
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c106
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_software.c109
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c11
146 files changed, 14219 insertions, 11099 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 1855699a1ef1..d8f3ad4285d2 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -4,9 +4,11 @@
ccflags-y := -Iinclude/drm -DCONFIG_NOUVEAU_DEBUG=7 -DCONFIG_NOUVEAU_DEBUG_DEFAULT=3
ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)/core
ccflags-y += -I$(src)
nouveau-y := core/core/client.o
+nouveau-y += core/core/engctx.o
nouveau-y += core/core/engine.o
nouveau-y += core/core/enum.o
nouveau-y += core/core/gpuobj.o
@@ -90,12 +92,20 @@ nouveau-y += core/subdev/vm/nv44.o
nouveau-y += core/subdev/vm/nv50.o
nouveau-y += core/subdev/vm/nvc0.o
+nouveau-y += core/engine/dmaobj/base.o
+nouveau-y += core/engine/dmaobj/nv04.o
+nouveau-y += core/engine/dmaobj/nv50.o
+nouveau-y += core/engine/dmaobj/nvc0.o
nouveau-y += core/engine/bsp/nv84.o
nouveau-y += core/engine/copy/nva3.o
nouveau-y += core/engine/copy/nvc0.o
nouveau-y += core/engine/crypt/nv84.o
nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/disp/nv04.o
+nouveau-y += core/engine/disp/nv50.o
+nouveau-y += core/engine/disp/nvd0.o
nouveau-y += core/engine/disp/vga.o
+nouveau-y += core/engine/fifo/base.o
nouveau-y += core/engine/fifo/nv04.o
nouveau-y += core/engine/fifo/nv10.o
nouveau-y += core/engine/fifo/nv17.o
@@ -111,41 +121,82 @@ nouveau-y += core/engine/graph/ctxnve0.o
nouveau-y += core/engine/graph/nv04.o
nouveau-y += core/engine/graph/nv10.o
nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv25.o
+nouveau-y += core/engine/graph/nv2a.o
+nouveau-y += core/engine/graph/nv30.o
+nouveau-y += core/engine/graph/nv34.o
+nouveau-y += core/engine/graph/nv35.o
nouveau-y += core/engine/graph/nv40.o
nouveau-y += core/engine/graph/nv50.o
nouveau-y += core/engine/graph/nvc0.o
nouveau-y += core/engine/graph/nve0.o
nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv40.o
nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/mpeg/nv84.o
nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/software/nv04.o
+nouveau-y += core/engine/software/nv10.o
+nouveau-y += core/engine/software/nv50.o
+nouveau-y += core/engine/software/nvc0.o
nouveau-y += core/engine/vp/nv84.o
-nouveau-y += nouveau_drm.o nouveau_compat.o \
- nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
- nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
- nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
- nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
- nouveau_hw.o nouveau_calc.o \
- nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_hdmi.o nouveau_dp.o \
- nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mxm.o nouveau_agp.o \
- nouveau_abi16.o \
- nouveau_bios.o \
- nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o \
- nv04_software.o nv50_software.o nvc0_software.o \
- nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
- nv04_crtc.o nv04_display.o nv04_cursor.o \
- nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
- nv50_cursor.o nv50_display.o \
- nvd0_display.o \
- nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
- nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
- nouveau_prime.o
-
-nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
+# drm/compat - will go away
+nouveau-y += nouveau_compat.o nouveau_revcompat.o
+
+# drm/core
+nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
+nouveau-y += nouveau_agp.o
+nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
+
+nouveau-y += nouveau_abi16.o
+nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
+
+# drm/kms/common
+nouveau-y += nouveau_fbcon.o
+
+# drm/kms/nv04:nv50
+nouveau-y += nv04_fbcon.o
+
+# drm/kms/nv50:nvd9
+nouveau-y += nv50_fbcon.o nvc0_fbcon.o
+
+# drm/kms/nvd9-
+
+##
+## unported bits below
+##
+
+# drm/core
+nouveau-y += nouveau_drv.o nouveau_state.o nouveau_irq.o
+nouveau-y += nouveau_prime.o
+
+# drm/kms/bios
+nouveau-y += nouveau_mxm.o nouveau_bios.o
+
+# drm/kms/common
+nouveau-y += nouveau_display.o nouveau_connector.o
+nouveau-y += nouveau_hdmi.o nouveau_dp.o
+
+# drm/kms/nv04:nv50
+nouveau-y += nouveau_hw.o nouveau_calc.o
+nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
+nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
+
+# drm/kms/nv50-
+nouveau-y += nv50_display.o nvd0_display.o
+nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
+nouveau-y += nv50_evo.o
+
+# drm/pm
+nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o
+nouveau-y += nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o
+nouveau-y += nouveau_mem.o
+
+# optional stuff
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
+
obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 5c22864fbd2c..86a64045dd60 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,289 +18,92 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
*/
-#include "drmP.h"
-
-#include "nouveau_drv.h"
+#include <core/object.h>
#include <core/ramht.h>
+#include <core/math.h>
+
+#include <subdev/bar.h>
static u32
-nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+nouveau_ramht_hash(struct nouveau_ramht *ramht, int chid, u32 handle)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_ramht *ramht = chan->ramht;
u32 hash = 0;
- int i;
- NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
-
- for (i = 32; i > 0; i -= ramht->bits) {
+ while (handle) {
hash ^= (handle & ((1 << ramht->bits) - 1));
handle >>= ramht->bits;
}
- if (dev_priv->card_type < NV_50)
- hash ^= chan->id << (ramht->bits - 4);
- hash <<= 3;
-
- NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ hash ^= chid << (ramht->bits - 4);
+ hash = hash << 3;
return hash;
}
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
- u32 offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 ctx = nv_ro32(ramht, offset + 4);
-
- if (dev_priv->card_type < NV_40)
- return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
- return (ctx != 0);
-}
-
-static int
-nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
- struct nouveau_gpuobj *ramht, u32 offset)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- u32 ctx = nv_ro32(ramht, offset + 4);
-
- if (dev_priv->card_type >= NV_50)
- return true;
- else if (dev_priv->card_type >= NV_40)
- return chan->id ==
- ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
- else
- return chan->id ==
- ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
-}
-
int
-nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
- struct nouveau_gpuobj *gpuobj)
+nouveau_ramht_insert(struct nouveau_ramht *ramht, int chid,
+ u32 handle, u32 context)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_ramht_entry *entry;
- struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
- unsigned long flags;
- u32 ctx, co, ho;
-
- if (nouveau_ramht_find(chan, handle))
- return -EEXIST;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->channel = chan;
- entry->gpuobj = NULL;
- entry->handle = handle;
- nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
-
- if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->addr >> 4) |
- (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else
- if (dev_priv->card_type < NV_50) {
- ctx = (gpuobj->addr >> 4) |
- (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else {
- if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->node->offset << 10) |
- (chan->id << 28) |
- chan->id; /* HASH_TAG */
- } else {
- ctx = (gpuobj->node->offset >> 4) |
- ((gpuobj->engine <<
- NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
- }
- }
-
- spin_lock_irqsave(&chan->ramht->lock, flags);
- list_add(&entry->head, &chan->ramht->entries);
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ u32 co, ho;
- co = ho = nouveau_ramht_hash_handle(chan, handle);
+ co = ho = nouveau_ramht_hash(ramht, chid, handle);
do {
- if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
- NV_DEBUG(dev,
- "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, handle, ctx);
+ if (!nv_ro32(ramht, co + 4)) {
nv_wo32(ramht, co + 0, handle);
- nv_wo32(ramht, co + 4, ctx);
-
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
- nvimem_flush(dev);
- return 0;
+ nv_wo32(ramht, co + 4, context);
+ if (bar)
+ bar->flush(bar);
+ return co;
}
- NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
- chan->id, co, nv_ro32(ramht, co));
co += 8;
- if (co >= ramht->size)
+ if (co >= nv_gpuobj(ramht)->size)
co = 0;
} while (co != ho);
- NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
- list_del(&entry->head);
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
- kfree(entry);
return -ENOMEM;
}
-static struct nouveau_ramht_entry *
-nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
-{
- struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
- struct nouveau_ramht_entry *entry;
- unsigned long flags;
-
- if (!ramht)
- return NULL;
-
- spin_lock_irqsave(&ramht->lock, flags);
- list_for_each_entry(entry, &ramht->entries, head) {
- if (entry->channel == chan &&
- (!handle || entry->handle == handle)) {
- list_del(&entry->head);
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return entry;
- }
- }
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return NULL;
-}
-
-static void
-nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
- unsigned long flags;
- u32 co, ho;
-
- spin_lock_irqsave(&chan->ramht->lock, flags);
- co = ho = nouveau_ramht_hash_handle(chan, handle);
- do {
- if (nouveau_ramht_entry_valid(dev, ramht, co) &&
- nouveau_ramht_entry_same_channel(chan, ramht, co) &&
- (handle == nv_ro32(ramht, co))) {
- NV_DEBUG(dev,
- "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, handle, nv_ro32(ramht, co + 4));
- nv_wo32(ramht, co + 0, 0x00000000);
- nv_wo32(ramht, co + 4, 0x00000000);
- nvimem_flush(dev);
- goto out;
- }
-
- co += 8;
- if (co >= ramht->size)
- co = 0;
- } while (co != ho);
-
- NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
- chan->id, handle);
-out:
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
-}
-
-int
-nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+void
+nouveau_ramht_remove(struct nouveau_ramht *ramht, int cookie)
{
- struct nouveau_ramht_entry *entry;
-
- entry = nouveau_ramht_remove_entry(chan, handle);
- if (!entry)
- return -ENOENT;
-
- nouveau_ramht_remove_hash(chan, entry->handle);
- nouveau_gpuobj_ref(NULL, &entry->gpuobj);
- kfree(entry);
- return 0;
+ struct nouveau_bar *bar = nouveau_bar(ramht);
+ nv_wo32(ramht, cookie + 0, 0x00000000);
+ nv_wo32(ramht, cookie + 4, 0x00000000);
+ if (bar)
+ bar->flush(bar);
}
-struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
-{
- struct nouveau_ramht *ramht = chan->ramht;
- struct nouveau_ramht_entry *entry;
- struct nouveau_gpuobj *gpuobj = NULL;
- unsigned long flags;
-
- if (unlikely(!chan->ramht))
- return NULL;
-
- spin_lock_irqsave(&ramht->lock, flags);
- list_for_each_entry(entry, &chan->ramht->entries, head) {
- if (entry->channel == chan && entry->handle == handle) {
- gpuobj = entry->gpuobj;
- break;
- }
- }
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return gpuobj;
-}
+static struct nouveau_oclass
+nouveau_ramht_oclass = {
+ .handle = 0x0000abcd,
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = NULL,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+ },
+};
int
-nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- struct nouveau_ramht **pramht)
+nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
+ u32 size, u32 align, struct nouveau_ramht **pramht)
{
struct nouveau_ramht *ramht;
+ int ret;
- ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
- if (!ramht)
- return -ENOMEM;
-
- ramht->dev = dev;
- kref_init(&ramht->refcount);
- ramht->bits = drm_order(gpuobj->size / 8);
- INIT_LIST_HEAD(&ramht->entries);
- spin_lock_init(&ramht->lock);
- nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
-
+ ret = nouveau_gpuobj_create(parent, parent->engine ?
+ parent->engine : parent, /* <nv50 ramht */
+ &nouveau_ramht_oclass, 0, pargpu, size,
+ align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
*pramht = ramht;
- return 0;
-}
+ if (ret)
+ return ret;
-static void
-nouveau_ramht_del(struct kref *ref)
-{
- struct nouveau_ramht *ramht =
- container_of(ref, struct nouveau_ramht, refcount);
-
- nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
- kfree(ramht);
-}
-
-void
-nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
- struct nouveau_channel *chan)
-{
- struct nouveau_ramht_entry *entry;
- struct nouveau_ramht *ramht;
-
- if (ref)
- kref_get(&ref->refcount);
-
- ramht = *ptr;
- if (ramht) {
- while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
- nouveau_ramht_remove_hash(chan, entry->handle);
- nouveau_gpuobj_ref(NULL, &entry->gpuobj);
- kfree(entry);
- }
-
- kref_put(&ramht->refcount, nouveau_ramht_del);
- }
- *ptr = ref;
+ ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 4b809319e831..66f7dfd907ee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,61 +22,154 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_bsp.c...
- */
+#include <engine/bsp.h>
+
+struct nv84_bsp_priv {
+ struct nouveau_bsp base;
+};
-struct nv84_bsp_engine {
- struct nouveau_exec_engine base;
+struct nv84_bsp_chan {
+ struct nouveau_bsp_chan base;
};
+/*******************************************************************************
+ * BSP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_bsp_sclass[] = {
+ {},
+};
+
+/*******************************************************************************
+ * BSP context
+ ******************************************************************************/
+
static int
-nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+nv84_bsp_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00008000))
- return 0;
+ struct nv84_bsp_chan *priv;
+ int ret;
+
+ ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
return 0;
}
+static void
+nv84_bsp_context_dtor(struct nouveau_object *object)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ nouveau_bsp_context_destroy(&priv->base);
+}
+
static int
-nv84_bsp_init(struct drm_device *dev, int engine)
+nv84_bsp_context_init(struct nouveau_object *object)
{
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+ struct nv84_bsp_chan *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_bsp_context_init(&priv->base);
+ if (ret)
+ return ret;
+
return 0;
}
+static int
+nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_chan *priv = (void *)object;
+ return nouveau_bsp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv84_bsp_cclass = {
+ .handle = NV_ENGCTX(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_bsp_context_ctor,
+ .dtor = nv84_bsp_context_dtor,
+ .init = nv84_bsp_context_init,
+ .fini = nv84_bsp_context_fini,
+ .rd32 = _nouveau_bsp_context_rd32,
+ .wr32 = _nouveau_bsp_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * BSP engine/subdev functions
+ ******************************************************************************/
+
static void
-nv84_bsp_destroy(struct drm_device *dev, int engine)
+nv84_bsp_intr(struct nouveau_subdev *subdev)
{
- struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+}
- NVOBJ_ENGINE_DEL(dev, BSP);
+static int
+nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_bsp_priv *priv;
+ int ret;
+
+ ret = nouveau_bsp_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- kfree(pbsp);
+ nv_subdev(priv)->unit = 0x04008000;
+ nv_subdev(priv)->intr = nv84_bsp_intr;
+ nv_engine(priv)->cclass = &nv84_bsp_cclass;
+ nv_engine(priv)->sclass = nv84_bsp_sclass;
+ return 0;
}
-int
-nv84_bsp_create(struct drm_device *dev)
+static void
+nv84_bsp_dtor(struct nouveau_object *object)
{
- struct nv84_bsp_engine *pbsp;
+ struct nv84_bsp_priv *priv = (void *)object;
+ nouveau_bsp_destroy(&priv->base);
+}
- pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
- if (!pbsp)
- return -ENOMEM;
+static int
+nv84_bsp_init(struct nouveau_object *object)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ int ret;
- pbsp->base.destroy = nv84_bsp_destroy;
- pbsp->base.init = nv84_bsp_init;
- pbsp->base.fini = nv84_bsp_fini;
+ ret = nouveau_bsp_init(&priv->base);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
return 0;
}
+
+static int
+nv84_bsp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_bsp_priv *priv = (void *)object;
+ return nouveau_bsp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv84_bsp_oclass = {
+ .handle = NV_ENGINE(BSP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_bsp_ctor,
+ .dtor = nv84_bsp_dtor,
+ .init = nv84_bsp_init,
+ .fini = nv84_bsp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 9150c5ed16c3..debb82830b66 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,112 +22,75 @@
* Authors: Ben Skeggs
*/
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
-#include "fuc/nva3.fuc.h"
-
-struct nva3_copy_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nva3_copy_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx = NULL;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
+#include <subdev/fb.h>
+#include <subdev/vm.h>
- nv_wo32(ramin, 0xc0, 0x00190000);
- nv_wo32(ramin, 0xc4, ctx->addr + ctx->size - 1);
- nv_wo32(ramin, 0xc8, ctx->addr);
- nv_wo32(ramin, 0xcc, 0x00000000);
- nv_wo32(ramin, 0xd0, 0x00000000);
- nv_wo32(ramin, 0xd4, 0x00000000);
- nvimem_flush(dev);
+#include <engine/copy.h>
- nvvm_engref(chan->vm, engine, 1);
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static int
-nva3_copy_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
+#include "fuc/nva3.fuc.h"
- /* fuc engine doesn't need an object, our ramht code does.. */
- ctx->engine = 3;
- ctx->class = class;
- return nouveau_ramht_insert(chan, handle, ctx);
-}
+struct nva3_copy_priv {
+ struct nouveau_copy base;
+};
-static void
-nva3_copy_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- int i;
+struct nva3_copy_chan {
+ struct nouveau_copy_chan base;
+};
- for (i = 0xc0; i <= 0xd4; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
- nvvm_engref(chan->vm, engine, -1);
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = ctx;
-}
+static struct nouveau_oclass
+nva3_copy_sclass[] = {
+ { 0x85b5, &nouveau_object_ofuncs },
+ {}
+};
-static void
-nva3_copy_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0d);
-}
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
static int
-nva3_copy_init(struct drm_device *dev, int engine)
+nva3_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- int i;
-
- nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
- nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
-
- /* upload ucode */
- nv_wr32(dev, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
+ struct nva3_copy_chan *priv;
+ int ret;
- nv_wr32(dev, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x104188, i >> 6);
- nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
- }
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- /* start it running */
- nv_wr32(dev, 0x10410c, 0x00000000);
- nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
return 0;
}
-static int
-nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(dev, 0x104014, 0xffffffff);
- return 0;
-}
+static struct nouveau_oclass
+nva3_copy_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
+
+ },
+};
+
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
static struct nouveau_enum nva3_copy_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
@@ -137,65 +100,114 @@ static struct nouveau_enum nva3_copy_isr_error_name[] = {
};
static void
-nva3_copy_isr(struct drm_device *dev)
+nva3_copy_intr(struct nouveau_subdev *subdev)
{
- u32 dispatch = nv_rd32(dev, 0x10401c);
- u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
- u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, 0x104040) >> 16;
+ struct nva3_copy_priv *priv = (void *)subdev;
+ u32 dispatch = nv_rd32(priv, 0x10401c);
+ u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16);
+ u32 inst = nv_rd32(priv, 0x104050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, 0x104044);
- int chid = nv50_graph_isr_chid(dev, inst);
+ u32 data = nv_rd32(priv, 0x104044);
if (stat & 0x00000040) {
- NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
+ nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nva3_copy_isr_error_name, ssta);
- printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, 0x104004, 0x00000040);
+ printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
+ inst, subc, mthd, data);
+ nv_wr32(priv, 0x104004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, 0x104004, stat);
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004, stat);
}
- nv50_fb_vm_trap(dev, 1);
+
+ nv50_fb_trap(nouveau_fb(priv), 1);
}
-static void
-nva3_copy_destroy(struct drm_device *dev, int engine)
+static int
+nva3_copy_tlb_flush(struct nouveau_engine *engine)
{
- struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
+ nv50_vm_flush_engine(&engine->base, 0x0d);
+ return 0;
+}
- nouveau_irq_unregister(dev, 22);
+static int
+nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ bool enable = (nv_device(parent)->chipset != 0xaf);
+ struct nva3_copy_priv *priv;
+ int ret;
- NVOBJ_ENGINE_DEL(dev, COPY0);
- kfree(pcopy);
+ ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00802000;
+ nv_subdev(priv)->intr = nva3_copy_intr;
+ nv_engine(priv)->cclass = &nva3_copy_cclass;
+ nv_engine(priv)->sclass = nva3_copy_sclass;
+ nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
+ return 0;
}
-int
-nva3_copy_create(struct drm_device *dev)
+static int
+nva3_copy_init(struct nouveau_object *object)
{
- struct nva3_copy_engine *pcopy;
+ struct nva3_copy_priv *priv = (void *)object;
+ int ret, i;
- pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
- if (!pcopy)
- return -ENOMEM;
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
- pcopy->base.destroy = nva3_copy_destroy;
- pcopy->base.init = nva3_copy_init;
- pcopy->base.fini = nva3_copy_fini;
- pcopy->base.context_new = nva3_copy_context_new;
- pcopy->base.context_del = nva3_copy_context_del;
- pcopy->base.object_new = nva3_copy_object_new;
- pcopy->base.tlb_flush = nva3_copy_tlb_flush;
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014, 0xffffffff);
- nouveau_irq_register(dev, 22, nva3_copy_isr);
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188, i >> 6);
+ nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
+ }
- NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
- NVOBJ_CLASS(dev, 0x85b5, COPY0);
+ /* start it running */
+ nv_wr32(priv, 0x10410c, 0x00000000);
+ nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
return 0;
}
+
+static int
+nva3_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nva3_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014, 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nva3_copy_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xa3),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nva3_copy_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nva3_copy_init,
+ .fini = nva3_copy_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index f39de5a593d6..ecc8faac3a2a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,138 +22,86 @@
* Authors: Ben Skeggs
*/
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/copy.h>
+
#include "fuc/nvc0.fuc.h"
-struct nvc0_copy_engine {
- struct nouveau_exec_engine base;
- u32 irq;
- u32 pmc;
- u32 fuc;
- u32 ctx;
+struct nvc0_copy_priv {
+ struct nouveau_copy base;
};
struct nvc0_copy_chan {
- struct nouveau_gpuobj *mem;
- struct nouveau_vma vma;
+ struct nouveau_copy_chan base;
};
-static int
-nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
- struct nvc0_copy_chan *cctx;
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *ramin = chan->ramin;
- int ret;
-
- cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
- if (!cctx)
- return -ENOMEM;
+/*******************************************************************************
+ * Copy object classes
+ ******************************************************************************/
- ret = nouveau_gpuobj_new(dev, NULL, 256, 256,
- NVOBJ_FLAG_ZERO_ALLOC, &cctx->mem);
- if (ret)
- return ret;
+static struct nouveau_oclass
+nvc0_copy0_sclass[] = {
+ { 0x90b5, &nouveau_object_ofuncs },
+ {},
+};
- ret = nouveau_gpuobj_map_vm(cctx->mem, chan->vm, NV_MEM_ACCESS_RW,
- &cctx->vma);
- if (ret)
- return ret;
+static struct nouveau_oclass
+nvc0_copy1_sclass[] = {
+ { 0x90b8, &nouveau_object_ofuncs },
+ {},
+};
- nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(cctx->vma.offset));
- nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(cctx->vma.offset));
- nvimem_flush(dev);
- return 0;
-}
+/*******************************************************************************
+ * PCOPY context
+ ******************************************************************************/
static int
-nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- return 0;
-}
-
-static void
-nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
+nvc0_copy_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
- struct nvc0_copy_chan *cctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->addr >> 12);
- inst |= 0x40000000;
-
- /* disable fifo access */
- nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
- /* mark channel as unloaded if it's currently active */
- if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
- nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
- /* mark next channel as invalid if it's about to be loaded */
- if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
- nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
- /* restore fifo access */
- nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
-
- nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
- nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
-
- nouveau_gpuobj_unmap(&cctx->vma);
- nouveau_gpuobj_ref(NULL, &cctx->mem);
-
- kfree(cctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nvc0_copy_init(struct drm_device *dev, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
- int i;
-
- nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
- nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
- nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
-
- nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
+ struct nvc0_copy_chan *priv;
+ int ret;
- nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
- nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
- }
+ ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
- nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
- nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
- nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
return 0;
}
-static int
-nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+static struct nouveau_ofuncs
+nvc0_copy_context_ofuncs = {
+ .ctor = nvc0_copy_context_ctor,
+ .dtor = _nouveau_copy_context_dtor,
+ .init = _nouveau_copy_context_init,
+ .fini = _nouveau_copy_context_fini,
+ .rd32 = _nouveau_copy_context_rd32,
+ .wr32 = _nouveau_copy_context_wr32,
+};
- nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
+static struct nouveau_oclass
+nvc0_copy0_cclass = {
+ .handle = NV_ENGCTX(COPY0, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
- /* trigger fuc context unload */
- nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
- nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
- nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
- nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
+static struct nouveau_oclass
+nvc0_copy1_cclass = {
+ .handle = NV_ENGCTX(COPY1, 0xc0),
+ .ofuncs = &nvc0_copy_context_ofuncs,
+};
- nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
- return 0;
-}
+/*******************************************************************************
+ * PCOPY engine/subdev functions
+ ******************************************************************************/
static struct nouveau_enum nvc0_copy_isr_error_name[] = {
{ 0x0001, "ILLEGAL_MTHD" },
@@ -163,93 +111,145 @@ static struct nouveau_enum nvc0_copy_isr_error_name[] = {
};
static void
-nvc0_copy_isr(struct drm_device *dev, int engine)
+nvc0_copy_intr(struct nouveau_subdev *subdev)
{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
- u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
- u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
- u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
- u32 chid = nvc0_graph_isr_chid(dev, inst);
- u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
+ int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
+ u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
+ u32 stat = intr & disp & ~(disp >> 16);
+ u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
+ u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
+ u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
if (stat & 0x00000040) {
- NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
+ nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
+ printk("] ch 0x%010llx subc %d mthd 0x%04x data 0x%08x\n",
+ (u64)inst << 12, subc, mthd, data);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, pcopy->fuc + 0x004, stat);
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x104004 + (idx * 0x1000), stat);
}
}
-static void
-nvc0_copy_isr_0(struct drm_device *dev)
+static int
+nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
-}
+ struct nvc0_copy_priv *priv;
+ int ret;
-static void
-nvc0_copy_isr_1(struct drm_device *dev)
-{
- nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
+ if (nv_rd32(parent, 0x022500) & 0x00000100)
+ return -ENODEV;
+
+ ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000040;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy0_cclass;
+ nv_engine(priv)->sclass = nvc0_copy0_sclass;
+ return 0;
}
-static void
-nvc0_copy_destroy(struct drm_device *dev, int engine)
+static int
+nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+ struct nvc0_copy_priv *priv;
+ int ret;
- nouveau_irq_unregister(dev, pcopy->irq);
+ if (nv_rd32(parent, 0x022500) & 0x00000200)
+ return -ENODEV;
- if (engine == NVOBJ_ENGINE_COPY0)
- NVOBJ_ENGINE_DEL(dev, COPY0);
- else
- NVOBJ_ENGINE_DEL(dev, COPY1);
- kfree(pcopy);
+ ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000080;
+ nv_subdev(priv)->intr = nvc0_copy_intr;
+ nv_engine(priv)->cclass = &nvc0_copy1_cclass;
+ nv_engine(priv)->sclass = nvc0_copy1_sclass;
+ return 0;
}
-int
-nvc0_copy_create(struct drm_device *dev, int engine)
+static int
+nvc0_copy_init(struct nouveau_object *object)
{
- struct nvc0_copy_engine *pcopy;
-
- pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
- if (!pcopy)
- return -ENOMEM;
-
- pcopy->base.destroy = nvc0_copy_destroy;
- pcopy->base.init = nvc0_copy_init;
- pcopy->base.fini = nvc0_copy_fini;
- pcopy->base.context_new = nvc0_copy_context_new;
- pcopy->base.context_del = nvc0_copy_context_del;
- pcopy->base.object_new = nvc0_copy_object_new;
-
- if (engine == 0) {
- pcopy->irq = 5;
- pcopy->pmc = 0x00000040;
- pcopy->fuc = 0x104000;
- pcopy->ctx = 0x0230;
- nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
- NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
- NVOBJ_CLASS(dev, 0x90b5, COPY0);
- } else {
- pcopy->irq = 6;
- pcopy->pmc = 0x00000080;
- pcopy->fuc = 0x105000;
- pcopy->ctx = 0x0240;
- nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
- NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
- NVOBJ_CLASS(dev, 0x90b8, COPY1);
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_copy_init(&priv->base);
+ if (ret)
+ return ret;
+
+ /* disable all interrupts */
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ /* upload ucode */
+ nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
+ nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
+
+ nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
+ nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
}
+ /* start it running */
+ nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
+ nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
+ nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
return 0;
}
+
+static int
+nvc0_copy_fini(struct nouveau_object *object, bool suspend)
+{
+ int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
+ struct nvc0_copy_priv *priv = (void *)object;
+
+ nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
+ nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
+
+ return nouveau_copy_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nvc0_copy0_oclass = {
+ .handle = NV_ENGINE(COPY0, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy0_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nvc0_copy_init,
+ .fini = nvc0_copy_fini,
+ },
+};
+
+struct nouveau_oclass
+nvc0_copy1_oclass = {
+ .handle = NV_ENGINE(COPY1, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_copy1_ctor,
+ .dtor = _nouveau_copy_dtor,
+ .init = nvc0_copy_init,
+ .fini = nvc0_copy_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 63051ab0ecca..a0e5e39638bc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,99 +22,106 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/gpuobj.h>
-struct nv84_crypt_engine {
- struct nouveau_exec_engine base;
+#include <subdev/fb.h>
+
+#include <engine/crypt.h>
+
+struct nv84_crypt_priv {
+ struct nouveau_crypt base;
};
+struct nv84_crypt_chan {
+ struct nouveau_crypt_chan base;
+};
+
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
+
static int
-nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
+nv84_crypt_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx;
+ struct nouveau_gpuobj *obj;
int ret;
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
if (ret)
return ret;
- nv_wo32(ramin, 0xa0, 0x00190000);
- nv_wo32(ramin, 0xa4, ctx->addr + ctx->size - 1);
- nv_wo32(ramin, 0xa8, ctx->addr);
- nv_wo32(ramin, 0xac, 0);
- nv_wo32(ramin, 0xb0, 0);
- nv_wo32(ramin, 0xb4, 0);
- nvimem_flush(dev);
-
- nvvm_engref(chan->vm, engine, 1);
- chan->engctx[engine] = ctx;
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
-static void
-nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->addr >> 12);
- inst |= 0x80000000;
-
- /* mark context as invalid if still on the hardware, not
- * doing this causes issues the next time PCRYPT is used,
- * unsurprisingly :)
- */
- nv_wr32(dev, 0x10200c, 0x00000000);
- if (nv_rd32(dev, 0x102188) == inst)
- nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
- if (nv_rd32(dev, 0x10218c) == inst)
- nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
- nv_wr32(dev, 0x10200c, 0x00000010);
-
- nouveau_gpuobj_ref(NULL, &ctx);
-
- nvvm_engref(chan->vm, engine, -1);
- chan->engctx[engine] = NULL;
-}
+static struct nouveau_ofuncs
+nv84_crypt_ofuncs = {
+ .ctor = nv84_crypt_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv84_crypt_sclass[] = {
+ { 0x74c1, &nv84_crypt_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
static int
-nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+nv84_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
+ struct nv84_crypt_chan *priv;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- obj->engine = 5;
- obj->class = class;
- nv_wo32(obj, 0x00, class);
- nvimem_flush(dev);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ return 0;
}
-static void
-nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0a);
-}
+static struct nouveau_oclass
+nv84_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
-static struct nouveau_bitfield nv84_crypt_intr[] = {
+static struct nouveau_bitfield nv84_crypt_intr_mask[] = {
{ 0x00000001, "INVALID_STATE" },
{ 0x00000002, "ILLEGAL_MTHD" },
{ 0x00000004, "ILLEGAL_CLASS" },
@@ -124,79 +131,78 @@ static struct nouveau_bitfield nv84_crypt_intr[] = {
};
static void
-nv84_crypt_isr(struct drm_device *dev)
+nv84_crypt_intr(struct nouveau_subdev *subdev)
{
- u32 stat = nv_rd32(dev, 0x102130);
- u32 mthd = nv_rd32(dev, 0x102190);
- u32 data = nv_rd32(dev, 0x102194);
- u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
- int show = nouveau_ratelimit();
- int chid = nv50_graph_isr_chid(dev, inst);
-
- if (show) {
- NV_INFO(dev, "PCRYPT:");
- nouveau_bitfield_print(nv84_crypt_intr, stat);
- printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
- chid, inst, mthd, data);
+ struct nv84_crypt_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x102130);
+ u32 mthd = nv_rd32(priv, 0x102190);
+ u32 data = nv_rd32(priv, 0x102194);
+ u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff;
+
+ if (stat) {
+ nv_error(priv, "");
+ nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
+ printk(" ch 0x%010llx mthd 0x%04x data 0x%08x\n",
+ (u64)inst << 12, mthd, data);
}
- nv_wr32(dev, 0x102130, stat);
- nv_wr32(dev, 0x10200c, 0x10);
+ nv_wr32(priv, 0x102130, stat);
+ nv_wr32(priv, 0x10200c, 0x10);
- nv50_fb_vm_trap(dev, show);
+ nv50_fb_trap(nouveau_fb(priv), 1);
}
static int
-nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+nv84_crypt_tlb_flush(struct nouveau_engine *engine)
{
- nv_wr32(dev, 0x102140, 0x00000000);
+ nv50_vm_flush_engine(&engine->base, 0x0a);
return 0;
}
static int
-nv84_crypt_init(struct drm_device *dev, int engine)
+nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+ struct nv84_crypt_priv *priv;
+ int ret;
- nv_wr32(dev, 0x102130, 0xffffffff);
- nv_wr32(dev, 0x102140, 0xffffffbf);
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_wr32(dev, 0x10200c, 0x00000010);
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv84_crypt_intr;
+ nv_engine(priv)->cclass = &nv84_crypt_cclass;
+ nv_engine(priv)->sclass = nv84_crypt_sclass;
+ nv_engine(priv)->tlb_flush = nv84_crypt_tlb_flush;
return 0;
}
-static void
-nv84_crypt_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- nouveau_irq_unregister(dev, 14);
- kfree(pcrypt);
-}
-
-int
-nv84_crypt_create(struct drm_device *dev)
+static int
+nv84_crypt_init(struct nouveau_object *object)
{
- struct nv84_crypt_engine *pcrypt;
-
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
- return -ENOMEM;
-
- pcrypt->base.destroy = nv84_crypt_destroy;
- pcrypt->base.init = nv84_crypt_init;
- pcrypt->base.fini = nv84_crypt_fini;
- pcrypt->base.context_new = nv84_crypt_context_new;
- pcrypt->base.context_del = nv84_crypt_context_del;
- pcrypt->base.object_new = nv84_crypt_object_new;
- pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
+ struct nv84_crypt_priv *priv = (void *)object;
+ int ret;
- nouveau_irq_register(dev, 14, nv84_crypt_isr);
+ ret = nouveau_crypt_init(&priv->base);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
- NVOBJ_CLASS (dev, 0x74c1, CRYPT);
+ nv_wr32(priv, 0x102130, 0xffffffff);
+ nv_wr32(priv, 0x102140, 0xffffffbf);
+ nv_wr32(priv, 0x10200c, 0x00000010);
return 0;
}
+
+struct nouveau_oclass
+nv84_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_crypt_ctor,
+ .dtor = _nouveau_crypt_dtor,
+ .init = nv84_crypt_init,
+ .fini = _nouveau_crypt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index c9adc1b8a7db..559a1b1d7082 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,124 +22,74 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
+#include <core/os.h>
+#include <core/enum.h>
+#include <core/class.h>
+#include <core/engctx.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/crypt.h>
#include "fuc/nv98.fuc.h"
struct nv98_crypt_priv {
- struct nouveau_exec_engine base;
+ struct nouveau_crypt base;
};
struct nv98_crypt_chan {
- struct nouveau_gpuobj *mem;
+ struct nouveau_crypt_chan base;
};
-static int
-nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct nv98_crypt_priv *priv = nv_engine(dev, engine);
- struct nv98_crypt_chan *cctx;
- int ret;
-
- cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
- if (!cctx)
- return -ENOMEM;
-
- nvvm_engref(chan->vm, engine, 1);
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
- if (ret)
- goto error;
-
- nv_wo32(chan->ramin, 0xa0, 0x00190000);
- nv_wo32(chan->ramin, 0xa4, cctx->mem->addr + cctx->mem->size - 1);
- nv_wo32(chan->ramin, 0xa8, cctx->mem->addr);
- nv_wo32(chan->ramin, 0xac, 0x00000000);
- nv_wo32(chan->ramin, 0xb0, 0x00000000);
- nv_wo32(chan->ramin, 0xb4, 0x00000000);
- nvimem_flush(dev);
-
-error:
- if (ret)
- priv->base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv98_crypt_chan *cctx = chan->engctx[engine];
- int i;
-
- for (i = 0xa0; i < 0xb4; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
+/*******************************************************************************
+ * Crypt object classes
+ ******************************************************************************/
- nouveau_gpuobj_ref(NULL, &cctx->mem);
+static struct nouveau_oclass
+nv98_crypt_sclass[] = {
+ { 0x88b4, &nouveau_object_ofuncs },
+ {},
+};
- nvvm_engref(chan->vm, engine, -1);
- chan->engctx[engine] = NULL;
- kfree(cctx);
-}
+/*******************************************************************************
+ * PCRYPT context
+ ******************************************************************************/
static int
-nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+nv98_crypt_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv98_crypt_chan *cctx = chan->engctx[engine];
-
- /* fuc engine doesn't need an object, our ramht code does.. */
- cctx->mem->engine = 5;
- cctx->mem->class = class;
- return nouveau_ramht_insert(chan, handle, cctx->mem);
-}
+ struct nv98_crypt_chan *priv;
+ int ret;
-static void
-nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0a);
-}
+ ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
+ 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
-static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
return 0;
}
-static int
-nv98_crypt_init(struct drm_device *dev, int engine)
-{
- int i;
-
- /* reset! */
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
-
- /* wait for exit interrupt to signal */
- nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(dev, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(dev, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(dev, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
+static struct nouveau_oclass
+nv98_crypt_cclass = {
+ .handle = NV_ENGCTX(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_crypt_context_ctor,
+ .dtor = _nouveau_crypt_context_dtor,
+ .init = _nouveau_crypt_context_init,
+ .fini = _nouveau_crypt_context_fini,
+ .rd32 = _nouveau_crypt_context_rd32,
+ .wr32 = _nouveau_crypt_context_wr32,
+ },
+};
- /* start it running */
- nv_wr32(dev, 0x08710c, 0x00000000);
- nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
- return 0;
-}
+/*******************************************************************************
+ * PCRYPT engine/subdev functions
+ ******************************************************************************/
static struct nouveau_enum nv98_crypt_isr_error_name[] = {
{ 0x0000, "ILLEGAL_MTHD" },
@@ -150,65 +100,100 @@ static struct nouveau_enum nv98_crypt_isr_error_name[] = {
};
static void
-nv98_crypt_isr(struct drm_device *dev)
+nv98_crypt_intr(struct nouveau_subdev *subdev)
{
- u32 disp = nv_rd32(dev, 0x08701c);
- u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
- u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
- u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, 0x087040) >> 16;
+ struct nv98_crypt_priv *priv = (void *)subdev;
+ u32 disp = nv_rd32(priv, 0x08701c);
+ u32 stat = nv_rd32(priv, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(priv, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(priv, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(priv, 0x087040) >> 16;
u32 mthd = (addr & 0x07ff) << 2;
u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, 0x087044);
- int chid = nv50_graph_isr_chid(dev, inst);
+ u32 data = nv_rd32(priv, 0x087044);
if (stat & 0x00000040) {
- NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
+ nv_error(priv, "DISPATCH_ERROR [");
nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
- printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, 0x087004, 0x00000040);
+ printk("] ch 0x%08x subc %d mthd 0x%04x data 0x%08x\n",
+ inst, subc, mthd, data);
+ nv_wr32(priv, 0x087004, 0x00000040);
stat &= ~0x00000040;
}
if (stat) {
- NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, 0x087004, stat);
+ nv_error(priv, "unhandled intr 0x%08x\n", stat);
+ nv_wr32(priv, 0x087004, stat);
}
- nv50_fb_vm_trap(dev, 1);
+ nv50_fb_trap(nouveau_fb(priv), 1);
}
-static void
-nv98_crypt_destroy(struct drm_device *dev, int engine)
+static int
+nv98_crypt_tlb_flush(struct nouveau_engine *engine)
{
- struct nv98_crypt_priv *priv = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 14);
- NVOBJ_ENGINE_DEL(dev, CRYPT);
- kfree(priv);
+ nv50_vm_flush_engine(&engine->base, 0x0a);
+ return 0;
}
-int
-nv98_crypt_create(struct drm_device *dev)
+static int
+nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
struct nv98_crypt_priv *priv;
+ int ret;
+
+ ret = nouveau_crypt_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00004000;
+ nv_subdev(priv)->intr = nv98_crypt_intr;
+ nv_engine(priv)->cclass = &nv98_crypt_cclass;
+ nv_engine(priv)->sclass = nv98_crypt_sclass;
+ nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct nouveau_object *object)
+{
+ struct nv98_crypt_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_crypt_init(&priv->base);
+ if (ret)
+ return ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ /* wait for exit interrupt to signal */
+ nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(priv, 0x087004, 0x00000010);
- priv->base.destroy = nv98_crypt_destroy;
- priv->base.init = nv98_crypt_init;
- priv->base.fini = nv98_crypt_fini;
- priv->base.context_new = nv98_crypt_context_new;
- priv->base.context_del = nv98_crypt_context_del;
- priv->base.object_new = nv98_crypt_object_new;
- priv->base.tlb_flush = nv98_crypt_tlb_flush;
+ /* upload microcode code and data segments */
+ nv_wr32(priv, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
- nouveau_irq_register(dev, 14, nv98_crypt_isr);
+ nv_wr32(priv, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
- NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
- NVOBJ_CLASS(dev, 0x88b4, CRYPT);
+ /* start it running */
+ nv_wr32(priv, 0x08710c, 0x00000000);
+ nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
return 0;
}
+
+struct nouveau_oclass
+nv98_crypt_oclass = {
+ .handle = NV_ENGINE(CRYPT, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_crypt_ctor,
+ .dtor = _nouveau_crypt_dtor,
+ .init = nv98_crypt_init,
+ .fini = _nouveau_crypt_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
new file mode 100644
index 000000000000..1c919f2af89f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/disp.h>
+
+struct nv04_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv04_disp_sclass[] = {
+ {},
+};
+
+static void
+nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc)
+{
+ struct nouveau_disp *disp = &priv->base;
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv04_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv04_disp_priv *priv = (void *)subdev;
+ u32 crtc0 = nv_rd32(priv, 0x600100);
+ u32 crtc1 = nv_rd32(priv, 0x602100);
+
+ if (crtc0 & 0x00000001) {
+ nv04_disp_intr_vblank(priv, 0);
+ nv_wr32(priv, 0x600100, 0x00000001);
+ }
+
+ if (crtc1 & 0x00000001) {
+ nv04_disp_intr_vblank(priv, 1);
+ nv_wr32(priv, 0x602100, 0x00000001);
+ }
+}
+
+static int
+nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv04_disp_sclass;
+ nv_subdev(priv)->intr = nv04_disp_intr;
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
new file mode 100644
index 000000000000..16a9afb1060b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nv50_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nv50_disp_sclass[] = {
+ {},
+};
+
+static void
+nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
+{
+ struct nouveau_disp *disp = &priv->base;
+ struct nouveau_software_chan *chan, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+ if (chan->vblank.crtc != crtc)
+ continue;
+
+ nv_wr32(priv, 0x001704, chan->vblank.channel);
+ nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+
+ if (nv_device(priv)->chipset == 0x50) {
+ nv_wr32(priv, 0x001570, chan->vblank.offset);
+ nv_wr32(priv, 0x001574, chan->vblank.value);
+ } else {
+ if (nv_device(priv)->chipset >= 0xc0) {
+ nv_wr32(priv, 0x06000c,
+ upper_32_bits(chan->vblank.offset));
+ }
+ nv_wr32(priv, 0x060010, chan->vblank.offset);
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+ }
+
+ list_del(&chan->vblank.head);
+ if (disp->vblank.put)
+ disp->vblank.put(disp->vblank.data, crtc);
+ }
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nv50_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nv50_disp_priv *priv = (void *)subdev;
+ u32 stat1 = nv_rd32(priv, 0x610024);
+
+ if (stat1 & 0x00000004) {
+ nv50_disp_intr_vblank(priv, 0);
+ nv_wr32(priv, 0x610024, 0x00000004);
+ stat1 &= ~0x00000004;
+ }
+
+ if (stat1 & 0x00000008) {
+ nv50_disp_intr_vblank(priv, 1);
+ nv_wr32(priv, 0x610024, 0x00000008);
+ stat1 &= ~0x00000008;
+ }
+
+}
+
+static int
+nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nv50_disp_sclass;
+ nv_subdev(priv)->intr = nv50_disp_intr;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
new file mode 100644
index 000000000000..d93efbcf75b8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <subdev/bar.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nvd0_disp_priv {
+ struct nouveau_disp base;
+};
+
+static struct nouveau_oclass
+nvd0_disp_sclass[] = {
+ {},
+};
+
+static void
+nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
+{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_disp *disp = &priv->base;
+ struct nouveau_software_chan *chan, *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
+ if (chan->vblank.crtc != crtc)
+ continue;
+
+ nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
+ bar->flush(bar);
+ nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
+ nv_wr32(priv, 0x060014, chan->vblank.value);
+
+ list_del(&chan->vblank.head);
+ if (disp->vblank.put)
+ disp->vblank.put(disp->vblank.data, crtc);
+ }
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+
+ if (disp->vblank.notify)
+ disp->vblank.notify(disp->vblank.data, crtc);
+}
+
+static void
+nvd0_disp_intr(struct nouveau_subdev *subdev)
+{
+ struct nvd0_disp_priv *priv = (void *)subdev;
+ u32 intr = nv_rd32(priv, 0x610088);
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 mask = 0x01000000 << i;
+ if (mask & intr) {
+ u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
+ if (stat & 0x00000001)
+ nvd0_disp_intr_vblank(priv, i);
+ nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
+ nv_rd32(priv, 0x6100c0 + (i * 0x800));
+ }
+ }
+}
+
+static int
+nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvd0_disp_priv *priv;
+ int ret;
+
+ ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
+ "display", &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->sclass = nvd0_disp_sclass;
+ nv_subdev(priv)->intr = nvd0_disp_intr;
+
+ INIT_LIST_HEAD(&priv->base.vblank.list);
+ spin_lock_init(&priv->base.vblank.lock);
+ return 0;
+}
+
+struct nouveau_oclass
+nvd0_disp_oclass = {
+ .handle = NV_ENGINE(DISP, 0xd0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvd0_disp_ctor,
+ .dtor = _nouveau_disp_dtor,
+ .init = _nouveau_disp_init,
+ .fini = _nouveau_disp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
new file mode 100644
index 000000000000..e1f013d39768
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+int
+nouveau_dmaobj_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ void *data, u32 size, int len, void **pobject)
+{
+ struct nv_dma_class *args = data;
+ struct nouveau_dmaobj *object;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject);
+ object = *pobject;
+ if (ret)
+ return ret;
+
+ switch (args->flags & NV_DMA_TARGET_MASK) {
+ case NV_DMA_TARGET_VM:
+ object->target = NV_MEM_TARGET_VM;
+ break;
+ case NV_DMA_TARGET_VRAM:
+ object->target = NV_MEM_TARGET_VRAM;
+ break;
+ case NV_DMA_TARGET_PCI:
+ object->target = NV_MEM_TARGET_PCI;
+ break;
+ case NV_DMA_TARGET_PCI_US:
+ case NV_DMA_TARGET_AGP:
+ object->target = NV_MEM_TARGET_PCI_NOSNOOP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (args->flags & NV_DMA_ACCESS_MASK) {
+ case NV_DMA_ACCESS_VM:
+ object->access = NV_MEM_ACCESS_VM;
+ break;
+ case NV_DMA_ACCESS_RD:
+ object->access = NV_MEM_ACCESS_RO;
+ break;
+ case NV_DMA_ACCESS_WR:
+ object->access = NV_MEM_ACCESS_WO;
+ break;
+ case NV_DMA_ACCESS_RDWR:
+ object->access = NV_MEM_ACCESS_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ object->start = args->start;
+ object->limit = args->limit;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
new file mode 100644
index 000000000000..b0d3651fcaba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm/nv04.h>
+
+#include <engine/dmaobj.h>
+
+struct nv04_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nv04_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ struct nouveau_gpuobj *gpuobj;
+ u32 flags0 = nv_mclass(dmaobj);
+ u32 flags2 = 0x00000000;
+ u32 offset = (dmaobj->start & 0xfffff000);
+ u32 adjust = (dmaobj->start & 0x00000fff);
+ u32 length = dmaobj->limit - dmaobj->start;
+ int ret;
+
+ if (dmaobj->target == NV_MEM_TARGET_VM) {
+ gpuobj = nv04_vmmgr(dmaeng)->vm->pgt[0].obj[0];
+ if (dmaobj->start == 0)
+ return nouveau_gpuobj_dup(parent, gpuobj, pgpuobj);
+
+ offset = nv_ro32(gpuobj, 8 + (offset >> 10));
+ offset &= 0xfffff000;
+ dmaobj->target = NV_MEM_TARGET_PCI;
+ dmaobj->access = NV_MEM_ACCESS_RW;
+ }
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VRAM:
+ flags0 |= 0x00003000;
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags0 |= 0x00023000;
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags0 |= 0x00033000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_RO:
+ flags0 |= 0x00004000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ flags0 |= 0x00008000;
+ case NV_MEM_ACCESS_RW:
+ flags2 |= 0x00000002;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 16, 16, 0, &gpuobj);
+ *pgpuobj = gpuobj;
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags0 | (adjust << 20));
+ nv_wo32(*pgpuobj, 0x04, length);
+ nv_wo32(*pgpuobj, 0x08, flags2 | offset);
+ nv_wo32(*pgpuobj, 0x0c, flags2 | offset);
+ }
+
+ return ret;
+}
+
+static int
+nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv04_dmaobj_priv *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass,
+ data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ switch (nv_mclass(parent)) {
+ case 0x006e:
+ ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct nouveau_ofuncs
+nv04_dmaobj_ofuncs = {
+ .ctor = nv04_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv04_dmaobj_sclass[] = {
+ { 0x0002, &nv04_dmaobj_ofuncs },
+ { 0x0003, &nv04_dmaobj_ofuncs },
+ { 0x003d, &nv04_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nv04_dmaobj_sclass;
+ priv->base.bind = nv04_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
new file mode 100644
index 000000000000..8207ac9a0bb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nv50_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nv50_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
+ struct nouveau_object *parent,
+ struct nouveau_dmaobj *dmaobj,
+ struct nouveau_gpuobj **pgpuobj)
+{
+ u32 flags = nv_mclass(dmaobj);
+ int ret;
+
+ switch (dmaobj->target) {
+ case NV_MEM_TARGET_VM:
+ flags |= 0x00000000;
+ flags |= 0x60000000; /* COMPRESSION_USEVM */
+ flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
+ break;
+ case NV_MEM_TARGET_VRAM:
+ flags |= 0x00010000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ case NV_MEM_TARGET_PCI:
+ flags |= 0x00020000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ case NV_MEM_TARGET_PCI_NOSNOOP:
+ flags |= 0x00030000;
+ flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dmaobj->access) {
+ case NV_MEM_ACCESS_VM:
+ break;
+ case NV_MEM_ACCESS_RO:
+ flags |= 0x00040000;
+ break;
+ case NV_MEM_ACCESS_WO:
+ case NV_MEM_ACCESS_RW:
+ flags |= 0x00080000;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
+ if (ret == 0) {
+ nv_wo32(*pgpuobj, 0x00, flags);
+ nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
+ nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
+ upper_32_bits(dmaobj->start));
+ nv_wo32(*pgpuobj, 0x10, 0x00000000);
+ nv_wo32(*pgpuobj, 0x14, 0x00000000);
+ }
+
+ return ret;
+}
+
+static int
+nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_dmaeng *dmaeng = (void *)engine;
+ struct nv50_dmaobj_priv *dmaobj;
+ struct nouveau_gpuobj *gpuobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass,
+ data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ switch (nv_mclass(parent)) {
+ case 0x506f:
+ case 0x826f:
+ ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
+ nouveau_object_ref(NULL, pobject);
+ *pobject = nv_object(gpuobj);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct nouveau_ofuncs
+nv50_dmaobj_ofuncs = {
+ .ctor = nv50_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nv50_dmaobj_sclass[] = {
+ { 0x0002, &nv50_dmaobj_ofuncs },
+ { 0x0003, &nv50_dmaobj_ofuncs },
+ { 0x003d, &nv50_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nv50_dmaobj_sclass;
+ priv->base.bind = nv50_dmaobj_bind;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
new file mode 100644
index 000000000000..5baa08695535
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/gpuobj.h>
+
+#include <subdev/fb.h>
+#include <engine/dmaobj.h>
+
+struct nvc0_dmaeng_priv {
+ struct nouveau_dmaeng base;
+};
+
+struct nvc0_dmaobj_priv {
+ struct nouveau_dmaobj base;
+};
+
+static int
+nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_dmaobj_priv *dmaobj;
+ int ret;
+
+ ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj);
+ *pobject = nv_object(dmaobj);
+ if (ret)
+ return ret;
+
+ if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nvc0_dmaobj_ofuncs = {
+ .ctor = nvc0_dmaobj_ctor,
+ .dtor = _nouveau_dmaobj_dtor,
+ .init = _nouveau_dmaobj_init,
+ .fini = _nouveau_dmaobj_fini,
+};
+
+static struct nouveau_oclass
+nvc0_dmaobj_sclass[] = {
+ { 0x0002, &nvc0_dmaobj_ofuncs },
+ { 0x0003, &nvc0_dmaobj_ofuncs },
+ { 0x003d, &nvc0_dmaobj_ofuncs },
+ {}
+};
+
+static int
+nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_dmaeng_priv *priv;
+ int ret;
+
+ ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ priv->base.base.sclass = nvc0_dmaobj_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_dmaeng_oclass = {
+ .handle = NV_ENGINE(DMAOBJ, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_dmaeng_ctor,
+ .dtor = _nouveau_dmaeng_dtor,
+ .init = _nouveau_dmaeng_init,
+ .fini = _nouveau_dmaeng_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
new file mode 100644
index 000000000000..edeb76ee648c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/handle.h>
+
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+int
+nouveau_fifo_channel_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int bar, u32 addr, u32 size, u32 pushbuf,
+ u32 engmask, int len, void **ptr)
+{
+ struct nouveau_device *device = nv_device(engine);
+ struct nouveau_fifo *priv = (void *)engine;
+ struct nouveau_fifo_chan *chan;
+ struct nouveau_dmaeng *dmaeng;
+ unsigned long flags;
+ int ret;
+
+ /* create base object class */
+ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
+ engmask, len, ptr);
+ chan = *ptr;
+ if (ret)
+ return ret;
+
+ /* validate dma object representing push buffer */
+ chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
+ if (!chan->pushdma)
+ return -ENOENT;
+
+ dmaeng = (void *)chan->pushdma->base.engine;
+ switch (chan->pushdma->base.oclass->handle) {
+ case 0x0002:
+ case 0x003d:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dmaeng->bind) {
+ ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
+ if (ret)
+ return ret;
+ }
+
+ /* find a free fifo channel */
+ spin_lock_irqsave(&priv->lock, flags);
+ for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) {
+ if (!priv->channel[chan->chid]) {
+ priv->channel[chan->chid] = nv_object(chan);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (chan->chid == priv->max) {
+ nv_error(priv, "no free channels\n");
+ return -ENOSPC;
+ }
+
+ /* map fifo control registers */
+ chan->user = ioremap(pci_resource_start(device->pdev, bar) + addr +
+ (chan->chid * size), size);
+ if (!chan->user)
+ return -EFAULT;
+
+ chan->size = size;
+ return 0;
+}
+
+void
+nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan)
+{
+ struct nouveau_fifo *priv = (void *)nv_object(chan)->engine;
+ unsigned long flags;
+
+ iounmap(chan->user);
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->channel[chan->chid] = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ nouveau_gpuobj_ref(NULL, &chan->pushgpu);
+ nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma);
+ nouveau_namedb_destroy(&chan->base);
+}
+
+void
+_nouveau_fifo_channel_dtor(struct nouveau_object *object)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ nouveau_fifo_channel_destroy(chan);
+}
+
+u32
+_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ return ioread32_native(chan->user + addr);
+}
+
+void
+_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+ struct nouveau_fifo_chan *chan = (void *)object;
+ iowrite32_native(data, chan->user + addr);
+}
+
+void
+nouveau_fifo_destroy(struct nouveau_fifo *priv)
+{
+ kfree(priv->channel);
+ nouveau_engine_destroy(&priv->base);
+}
+
+int
+nouveau_fifo_create_(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass,
+ int min, int max, int length, void **pobject)
+{
+ struct nouveau_fifo *priv;
+ int ret;
+
+ ret = nouveau_engine_create_(parent, engine, oclass, true, "PFIFO",
+ "fifo", length, pobject);
+ priv = *pobject;
+ if (ret)
+ return ret;
+
+ priv->min = min;
+ priv->max = max;
+ priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL);
+ if (!priv->channel)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->lock);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 6ab7eb0dd9bb..8b7513f4dc8f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -1,44 +1,45 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include "nouveau_util.h"
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
#include <core/ramht.h>
-#include "nouveau_software.h"
-
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv04_ramfc[] = {
+
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv04_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
@@ -50,283 +51,360 @@ static struct ramfc_desc {
{}
};
-struct nv04_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
-};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
-struct nv04_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 ramfc;
-};
-
-bool
-nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+int
+nv04_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
{
- int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
-
- if (!enable) {
- /* In some cases the PFIFO puller may be left in an
- * inconsistent state if you try to stop it when it's
- * busy translating handles. Sometimes you get a
- * PFIFO_CACHE_ERROR, sometimes it just fails silently
- * sending incorrect instance offsets to PGRAPH after
- * it's started up again. To avoid the latter we
- * invalidate the most recently calculated instance.
- */
- if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
- NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
-
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
- NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ u32 context, chid = chan->base.chid;
+ int ret;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->addr >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW:
+ context |= 0x00000000;
+ break;
+ case NVDEV_ENGINE_GR:
+ context |= 0x00010000;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ context |= 0x00020000;
+ break;
+ default:
+ return -EINVAL;
}
- return pull & 1;
+ context |= 0x80000000; /* valid */
+ context |= chid << 24;
+
+ mutex_lock(&nv_subdev(priv)->mutex);
+ ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
+
+void
+nv04_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ mutex_lock(&nv_subdev(priv)->mutex);
+ nouveau_ramht_remove(priv->ramht, cookie);
+ mutex_unlock(&nv_subdev(priv)->mutex);
}
static int
-nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv04_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- struct nv04_fifo_chan *fctx;
- unsigned long flags;
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv_channel_dma_class *args = data;
int ret;
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
+ if (size < sizeof(*args))
+ return -EINVAL;
- fctx->ramfc = chan->id * 32;
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ chan->ramfc = chan->base.chid * 32;
- /* initialise default fifo context */
- nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x08, chan->pushbuf->addr >> 4);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x10,
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
+
+void
+nv04_fifo_chan_dtor(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ struct ramfc_desc *c = priv->ramfc_desc;
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ do {
+ nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000);
+ } while ((++c)->bits);
+
+ nouveau_fifo_channel_destroy(&chan->base);
+}
-error:
+int
+nv04_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ u32 mask = 1 << chan->base.chid;
+ unsigned long flags;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ return ret;
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_mask(priv, NV04_PFIFO_MODE, mask, mask);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
}
-void
-nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
+int
+nv04_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv04_fifo_chan *fctx = chan->engctx[engine];
- struct ramfc_desc *c = priv->ramfc_desc;
+ struct nv04_fifo_priv *priv = (void *)object->engine;
+ struct nv04_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *fctx = priv->ramfc;
+ struct ramfc_desc *c;
unsigned long flags;
- int chid;
+ u32 data = chan->ramfc;
+ u32 chid;
/* prevent fifo context switches */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0);
/* if this channel is active, replace it with a null context */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
- if (chid == chan->id) {
- nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+ chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+ if (chid == chan->base.chid) {
+ nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+ c = priv->ramfc_desc;
do {
- u32 mask = ((1ULL << c->bits) - 1) << c->regs;
- nv_mask(dev, c->regp, mask, 0x00000000);
- nv_wo32(priv->ramfc, fctx->ramfc + c->ctxp, 0x00000000);
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
+ nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+
+ c = priv->ramfc_desc;
+ do {
+ nv_wr32(priv, c->regp, 0x00000000);
} while ((++c)->bits);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
/* restore normal operation, after disabling dma mode */
- nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* clean up */
- nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
+ nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
}
+static struct nouveau_ofuncs
+nv04_fifo_ofuncs = {
+ .ctor = nv04_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv04_fifo_sclass[] = {
+ { 0x006e, &nv04_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
int
-nv04_fifo_init(struct drm_device *dev, int engine)
+nv04_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- int i;
+ struct nv04_fifo_base *base;
+ int ret;
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
- nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+ return 0;
+}
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->addr >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
+static struct nouveau_oclass
+nv04_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+void
+nv04_fifo_pause(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__acquires(priv->base.lock)
+{
+ struct nv04_fifo_priv *priv = (void *)pfifo;
+ unsigned long flags;
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+ spin_lock_irqsave(&priv->base.lock, flags);
+ *pflags = flags;
+
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000);
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
+
+ /* in some cases the puller may be left in an inconsistent state
+ * if you try to stop it while it's busy translating handles.
+ * sometimes you get a CACHE_ERROR, sometimes it just fails
+ * silently; sending incorrect instance offsets to PGRAPH after
+ * it's started up again.
+ *
+ * to avoid this, we invalidate the most recently calculated
+ * instance.
+ */
+ if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0,
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000))
+ nv_warn(priv, "timeout idling puller\n");
+
+ if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000);
+}
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
+void
+nv04_fifo_start(struct nouveau_fifo *pfifo, unsigned long *pflags)
+__releases(priv->base.lock)
+{
+ struct nv04_fifo_priv *priv = (void *)pfifo;
+ unsigned long flags = *pflags;
- return 0;
+ nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001);
+
+ spin_unlock_irqrestore(&priv->base.lock, flags);
}
-int
-nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+static const char *
+nv_dma_state_err(u32 state)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- struct nouveau_channel *chan;
- int chid;
-
- /* prevent context switches and halt fifo operation */
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
-
- /* store current fifo context in ramfc */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
- chan = dev_priv->channels.ptr[chid];
- if (suspend && chid != priv->base.channels && chan) {
- struct nv04_fifo_chan *fctx = chan->engctx[engine];
- struct nouveau_gpuobj *ctx = priv->ramfc;
- struct ramfc_desc *c = priv->ramfc_desc;
- do {
- u32 rm = ((1ULL << c->bits) - 1) << c->regs;
- u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
- u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
- u32 cv = (nv_ro32(ctx, c->ctxp + fctx->ramfc) & ~cm);
- nv_wo32(ctx, c->ctxp + fctx->ramfc, cv | (rv << c->ctxs));
- } while ((++c)->bits);
- }
-
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
- return 0;
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
}
static bool
-nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- struct nouveau_gpuobj *obj;
- unsigned long flags;
+ struct nv04_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
+ unsigned long flags;
u32 engine;
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < pfifo->channels))
- chan = dev_priv->channels.ptr[chid];
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
if (unlikely(!chan))
goto out;
switch (mthd) {
- case 0x0000: /* bind object to subchannel */
- obj = nouveau_ramht_find(chan, data);
- if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+ case 0x0000:
+ bind = nouveau_namedb_get(nv_namedb(chan), data);
+ if (unlikely(!bind))
break;
- engine = 0x0000000f << (subc * 4);
+ if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
+ engine = 0x0000000f << (subc * 4);
+ chan->subc[subc] = data;
+ handled = true;
+
+ nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
+ }
- nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
- handled = true;
+ nouveau_namedb_put(bind);
break;
default:
- engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+ engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
- if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
- mthd, data))
- handled = true;
+ bind = nouveau_namedb_get(nv_namedb(chan), chan->subc[subc]);
+ if (likely(bind)) {
+ if (!nv_call(bind->object, mthd, data))
+ handled = true;
+ nouveau_namedb_put(bind);
+ }
break;
}
out:
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
return handled;
}
-static const char *nv_dma_state_err(u32 state)
-{
- static const char * const desc[] = {
- "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
- "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
- };
- return desc[(state >> 29) & 0x7];
-}
-
void
-nv04_fifo_isr(struct drm_device *dev)
+nv04_fifo_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_device *device = nv_device(subdev);
+ struct nv04_fifo_priv *priv = (void *)subdev;
uint32_t status, reassign;
int cnt = 0;
- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
uint32_t chid, get;
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 0);
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+ chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max;
+ get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t mthd, data;
@@ -340,86 +418,85 @@ nv04_fifo_isr(struct drm_device *dev)
*/
ptr = (get & 0x7ff) >> 2;
- if (dev_priv->card_type < NV_40) {
- mthd = nv_rd32(dev,
+ if (device->card_type < NV_40) {
+ mthd = nv_rd32(priv,
NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
+ data = nv_rd32(priv,
NV04_PFIFO_CACHE1_DATA(ptr));
} else {
- mthd = nv_rd32(dev,
+ mthd = nv_rd32(priv,
NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
+ data = nv_rd32(priv,
NV40_PFIFO_CACHE1_DATA(ptr));
}
- if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
+ if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
+ nv_info(priv, "CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
}
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_INTR_0,
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(priv, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(dev, 0x003244);
- u32 dma_put = nv_rd32(dev, 0x003240);
- u32 push = nv_rd32(dev, 0x003220);
- u32 state = nv_rd32(dev, 0x003228);
-
- if (dev_priv->card_type == NV_50) {
- u32 ho_get = nv_rd32(dev, 0x003328);
- u32 ho_put = nv_rd32(dev, 0x003320);
- u32 ib_get = nv_rd32(dev, 0x003334);
- u32 ib_put = nv_rd32(dev, 0x003330);
-
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x (err: %s) Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- nv_dma_state_err(state),
- push);
+ u32 dma_get = nv_rd32(priv, 0x003244);
+ u32 dma_put = nv_rd32(priv, 0x003240);
+ u32 push = nv_rd32(priv, 0x003220);
+ u32 state = nv_rd32(priv, 0x003228);
+
+ if (device->card_type == NV_50) {
+ u32 ho_get = nv_rd32(priv, 0x003328);
+ u32 ho_put = nv_rd32(priv, 0x003320);
+ u32 ib_get = nv_rd32(priv, 0x003334);
+ u32 ib_put = nv_rd32(priv, 0x003330);
+
+ nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
+ push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(dev, 0x003364, 0x00000000);
+ nv_wr32(priv, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, dma_put);
- nv_wr32(dev, 0x003328, ho_put);
+ nv_wr32(priv, 0x003244, dma_put);
+ nv_wr32(priv, 0x003328, ho_put);
} else
if (ib_get != ib_put) {
- nv_wr32(dev, 0x003334, ib_put);
+ nv_wr32(priv, 0x003334, ib_put);
}
} else {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
- nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(priv, 0x003244, dma_put);
}
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000001);
- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ nv_wr32(priv, 0x003228, 0x00000000);
+ nv_wr32(priv, 0x003220, 0x00000001);
+ nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
@@ -427,81 +504,118 @@ nv04_fifo_isr(struct drm_device *dev)
uint32_t sem;
status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
+ nv_wr32(priv, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_SEMAPHORE);
- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+ sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
}
- if (dev_priv->card_type == NV_50) {
+ if (device->card_type == NV_50) {
if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, nouveau_ratelimit());
+ nv50_fb_trap(nouveau_fb(priv), 1);
status &= ~0x00000010;
- nv_wr32(dev, 0x002100, 0x00000010);
+ nv_wr32(priv, 0x002100, 0x00000010);
}
}
if (status) {
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
- nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ nv_info(priv, "unknown intr 0x%08x, ch %d\n",
+ status, chid);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, status);
status = 0;
}
- nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ nv_wr32(priv, NV03_PFIFO_CACHES, reassign);
}
if (status) {
- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
- nv_wr32(dev, 0x2140, 0);
- nv_wr32(dev, 0x140, 0);
+ nv_info(priv, "still angry after %d spins, halt\n", cnt);
+ nv_wr32(priv, 0x002140, 0);
+ nv_wr32(priv, 0x000140, 0);
}
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+ nv_wr32(priv, 0x000100, 0x00000100);
}
-void
-nv04_fifo_destroy(struct drm_device *dev, int engine)
+static int
+nv04_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
- nouveau_irq_unregister(dev, 8);
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 15, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv04_fifo_cclass;
+ nv_engine(priv)->sclass = nv04_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv04_ramfc;
+ return 0;
+}
+void
+nv04_fifo_dtor(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
-
- dev_priv->eng[engine] = NULL;
- kfree(priv);
+ nouveau_ramht_ref(NULL, &priv->ramht);
+ nouveau_fifo_destroy(&priv->base);
}
int
-nv04_fifo_create(struct drm_device *dev)
+nv04_fifo_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv;
+ struct nv04_fifo_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
- nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
- nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+ nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8);
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv04_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv04_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 15;
- priv->ramfc_desc = nv04_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
+
+struct nouveau_oclass
+nv04_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv04_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
new file mode 100644
index 000000000000..496a4b4fdfaf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.h
@@ -0,0 +1,178 @@
+#ifndef __NV04_FIFO_H__
+#define __NV04_FIFO_H__
+
+#include <engine/fifo.h>
+
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
+# define NV_PFIFO_INTR_RUNOUT (1<<4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+# define NV04_PFIFO_CACHE1_PULL0_HASH_FAILED 0x00000010
+# define NV04_PFIFO_CACHE1_PULL0_HASH_BUSY 0x00001000
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+
+struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo base;
+ struct ramfc_desc *ramfc_desc;
+ struct nouveau_ramht *ramht;
+ struct nouveau_gpuobj *ramro;
+ struct nouveau_gpuobj *ramfc;
+};
+
+struct nv04_fifo_base {
+ struct nouveau_fifo_base base;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 subc[8];
+ u32 ramfc;
+};
+
+int nv04_fifo_object_attach(struct nouveau_object *,
+ struct nouveau_object *, u32);
+void nv04_fifo_object_detach(struct nouveau_object *, int);
+
+void nv04_fifo_chan_dtor(struct nouveau_object *);
+int nv04_fifo_chan_init(struct nouveau_object *);
+int nv04_fifo_chan_fini(struct nouveau_object *, bool suspend);
+
+int nv04_fifo_context_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+
+void nv04_fifo_dtor(struct nouveau_object *);
+int nv04_fifo_init(struct nouveau_object *);
+void nv04_fifo_pause(struct nouveau_fifo *, unsigned long *);
+void nv04_fifo_start(struct nouveau_fifo *, unsigned long *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 2d38fa88f9c7..391fefa7c472 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -1,43 +1,42 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include "nouveau_util.h"
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
#include <core/ramht.h>
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv10_ramfc[] = {
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv10_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -50,87 +49,122 @@ static struct ramfc_desc {
{}
};
-struct nv10_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
-};
-
-struct nv10_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 ramfc;
-};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
static int
-nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv10_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_fifo_priv *priv = nv_engine(dev, engine);
- struct nv10_fifo_chan *fctx;
- unsigned long flags;
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv_channel_dma_class *args = data;
int ret;
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- fctx->ramfc = chan->id * 32;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ chan->ramfc = chan->base.chid * 32;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+static struct nouveau_ofuncs
+nv10_fifo_ofuncs = {
+ .ctor = nv10_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
+static struct nouveau_oclass
+nv10_fifo_sclass[] = {
+ { 0x006e, &nv10_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv10_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
-int
-nv10_fifo_create(struct drm_device *dev)
+static int
+nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
- nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv04_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv10_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv10_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv10_fifo_cclass;
+ nv_engine(priv)->sclass = nv10_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv10_ramfc;
return 0;
}
+
+struct nouveau_oclass
+nv10_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv04_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index 2f700a15e286..3b9d6c97f9ba 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -1,43 +1,42 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include "nouveau_util.h"
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
#include <core/ramht.h>
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv17_ramfc[] = {
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv17_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -55,124 +54,154 @@ static struct ramfc_desc {
{}
};
-struct nv17_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
-};
-
-struct nv17_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 ramfc;
-};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
static int
-nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv17_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv = nv_engine(dev, engine);
- struct nv17_fifo_chan *fctx;
- unsigned long flags;
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv_channel_dma_class *args = data;
int ret;
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- fctx->ramfc = chan->id * 64;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x14,
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
+ 0x10000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), /* NV31- */
+ &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->object_attach = nv04_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ chan->ramfc = chan->base.chid * 64;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ return 0;
+}
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+static struct nouveau_ofuncs
+nv17_fifo_ofuncs = {
+ .ctor = nv17_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
+static struct nouveau_oclass
+nv17_fifo_sclass[] = {
+ { 0x006e, &nv17_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv17_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x17),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
static int
-nv17_fifo_init(struct drm_device *dev, int engine)
+nv17_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv = nv_engine(dev, engine);
- int i;
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv17_fifo_cclass;
+ nv_engine(priv)->sclass = nv17_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv17_ramfc;
+ return 0;
+}
- nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+static int
+nv17_fifo_init(struct nouveau_object *object)
+{
+ struct nv04_fifo_priv *priv = (void *)object;
+ int ret;
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->addr >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
- priv->ramfc->addr >> 8);
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+ nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
-int
-nv17_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
- nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv17_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv17_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv17_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
+struct nouveau_oclass
+nv17_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x17),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv17_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv17_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 65a670f92a07..43d5c9eea865 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -1,43 +1,42 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include "nouveau_util.h"
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
#include <core/ramht.h>
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv40_ramfc[] = {
+#include <subdev/instmem.h>
+#include <subdev/instmem/nv04.h>
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+
+#include "nv04.h"
+
+static struct ramfc_desc
+nv40_ramfc[] = {
{ 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
{ 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
{ 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
@@ -63,148 +62,287 @@ static struct ramfc_desc {
{}
};
-struct nv40_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
- struct nouveau_gpuobj *ramro;
- struct nouveau_gpuobj *ramfc;
-};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
-struct nv40_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 ramfc;
-};
+static int
+nv40_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ u32 context, chid = chan->base.chid;
+ int ret;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->addr >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW:
+ context |= 0x00000000;
+ break;
+ case NVDEV_ENGINE_GR:
+ context |= 0x00100000;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ context |= 0x00200000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ context |= chid << 23;
+
+ mutex_lock(&nv_subdev(priv)->mutex);
+ ret = nouveau_ramht_insert(priv->ramht, chid, handle, context);
+ mutex_unlock(&nv_subdev(priv)->mutex);
+ return ret;
+}
static int
-nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv40_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *engctx)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv = nv_engine(dev, engine);
- struct nv40_fifo_chan *fctx;
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
unsigned long flags;
- int ret;
+ u32 reg, ctx;
+
+ switch (nv_engidx(engctx->engine)) {
+ case NVDEV_ENGINE_SW:
+ return 0;
+ case NVDEV_ENGINE_GR:
+ reg = 0x32e0;
+ ctx = 0x38;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ reg = 0x330c;
+ ctx = 0x54;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
+ if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+ nv_wr32(priv, reg, nv_gpuobj(engctx)->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_gpuobj(engctx)->addr >> 4);
+
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
+}
- fctx->ramfc = chan->id * 128;
+static int
+nv40_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *engctx)
+{
+ struct nv04_fifo_priv *priv = (void *)parent->engine;
+ struct nv04_fifo_chan *chan = (void *)parent;
+ unsigned long flags;
+ u32 reg, ctx;
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
+ switch (nv_engidx(engctx->engine)) {
+ case NVDEV_ENGINE_SW:
+ return 0;
+ case NVDEV_ENGINE_GR:
+ reg = 0x32e0;
+ ctx = 0x38;
+ break;
+ case NVDEV_ENGINE_MPEG:
+ reg = 0x330c;
+ ctx = 0x54;
+ break;
+ default:
+ return -EINVAL;
}
- /* initialise default fifo context */
- nv_wo32(priv->ramfc, fctx->ramfc + 0x00, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x04, chan->pushbuf_base);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x0c, chan->pushbuf->addr >> 4);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x18, 0x30000000 |
+ spin_lock_irqsave(&priv->base.lock, flags);
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
+
+ if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
+ nv_wr32(priv, reg, 0x00000000);
+ nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000);
+
+ nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->base.lock, flags);
+ return 0;
+}
+
+static int
+nv40_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_fifo_priv *priv = (void *)engine;
+ struct nv04_fifo_chan *chan;
+ struct nv_channel_dma_class *args = data;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x1000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nv40_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv40_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv40_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+ chan->ramfc = chan->base.chid * 128;
+
+ nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->offset);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- nv_wo32(priv->ramfc, fctx->ramfc + 0x3c, 0x0001ffff);
+ nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
+ return 0;
+}
+
+static struct nouveau_ofuncs
+nv40_fifo_ofuncs = {
+ .ctor = nv40_fifo_chan_ctor,
+ .dtor = nv04_fifo_chan_dtor,
+ .init = nv04_fifo_chan_init,
+ .fini = nv04_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv40_fifo_sclass[] = {
+ { 0x006e, &nv40_fifo_ofuncs },
+ {}
+};
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
- /*XXX: remove this later, need fifo engine context commit hook */
- nouveau_gpuobj_ref(priv->ramfc, &chan->ramfc);
+static struct nouveau_oclass
+nv40_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_fifo_context_ctor,
+ .dtor = _nouveau_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
-error:
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv40_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_instmem_priv *imem = nv04_instmem(parent);
+ struct nv04_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv);
+ *pobject = nv_object(priv);
if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ return ret;
+
+ nouveau_ramht_ref(imem->ramht, &priv->ramht);
+ nouveau_gpuobj_ref(imem->ramro, &priv->ramro);
+ nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc);
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv40_fifo_cclass;
+ nv_engine(priv)->sclass = nv40_fifo_sclass;
+ priv->base.pause = nv04_fifo_pause;
+ priv->base.start = nv04_fifo_start;
+ priv->ramfc_desc = nv40_ramfc;
+ return 0;
}
static int
-nv40_fifo_init(struct drm_device *dev, int engine)
+nv40_fifo_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv = nv_engine(dev, engine);
- int i;
+ struct nv04_fifo_priv *priv = (void *)object;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ int ret;
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002044, 0x2101ffff);
- nv_wr32(dev, 0x002058, 0x00000001);
+ nv_wr32(priv, 0x002040, 0x000000ff);
+ nv_wr32(priv, 0x002044, 0x2101ffff);
+ nv_wr32(priv, 0x002058, 0x00000001);
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->addr >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
+ nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((priv->ramht->bits - 9) << 16) |
+ (priv->ramht->base.addr >> 8));
+ nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8);
- switch (dev_priv->chipset) {
+ switch (nv_device(priv)->chipset) {
case 0x47:
case 0x49:
case 0x4b:
- nv_wr32(dev, 0x002230, 0x00000001);
+ nv_wr32(priv, 0x002230, 0x00000001);
case 0x40:
case 0x41:
case 0x42:
case 0x43:
case 0x45:
case 0x48:
- nv_wr32(dev, 0x002220, 0x00030002);
+ nv_wr32(priv, 0x002220, 0x00030002);
break;
default:
- nv_wr32(dev, 0x002230, 0x00000000);
- nv_wr32(dev, 0x002220, ((nvfb_vram_size(dev) - 512 * 1024 +
+ nv_wr32(priv, 0x002230, 0x00000000);
+ nv_wr32(priv, 0x002220, ((pfb->ram.size - 512 * 1024 +
priv->ramfc->addr) >> 16) |
- 0x00030000);
+ 0x00030000);
break;
}
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max);
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
+ nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+ nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(priv, NV03_PFIFO_CACHES, 1);
return 0;
}
-int
-nv40_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- nouveau_gpuobj_ref(nvimem_ramro(dev), &priv->ramro);
- nouveau_gpuobj_ref(nvimem_ramfc(dev), &priv->ramfc);
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv40_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv40_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv40_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
+struct nouveau_oclass
+nv40_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_fifo_ctor,
+ .dtor = nv04_fifo_dtor,
+ .init = nv40_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 7b5b1592bf61..4914c3b94413 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -1,126 +1,123 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
+#include <core/client.h>
+#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/class.h>
+#include <core/math.h>
-struct nv50_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
+#include <subdev/timer.h>
+#include <subdev/bar.h>
-struct nv50_fifo_chan {
- struct nouveau_fifo_chan base;
-};
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
void
-nv50_fifo_playlist_update(struct drm_device *dev)
+nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
{
- struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
cur = priv->playlist[priv->cur_playlist];
priv->cur_playlist = !priv->cur_playlist;
- for (i = 0, p = 0; i < priv->base.channels; i++) {
- if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
+ for (i = priv->base.min, p = 0; i < priv->base.max; i++) {
+ if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000)
nv_wo32(cur, p++ * 4, i);
}
- nvimem_flush(dev);
+ bar->flush(bar);
- nv_wr32(dev, 0x0032f4, cur->addr >> 12);
- nv_wr32(dev, 0x0032ec, p);
- nv_wr32(dev, 0x002500, 0x00000101);
+ nv_wr32(priv, 0x0032f4, cur->addr >> 12);
+ nv_wr32(priv, 0x0032ec, p);
+ nv_wr32(priv, 0x002500, 0x00000101);
}
static int
-nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv50_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
{
- struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv50_fifo_chan *fctx;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- u64 instance = chan->ramin->addr >> 12;
- unsigned long flags;
- int ret = 0, i;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
- nvvm_engref(chan->vm, engine, 1);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nouveau_gpuobj *ectx = (void *)object;
+ u64 limit = ectx->addr + ectx->size - 1;
+ u64 start = ectx->addr;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0000; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ default:
+ return -EINVAL;
}
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x3c, 0x403f6078);
- nv_wo32(chan->ramin, 0x40, 0x00000000);
- nv_wo32(chan->ramin, 0x44, 0x01003fff);
- nv_wo32(chan->ramin, 0x48, chan->pushbuf->node->offset >> 4);
- nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
- nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
- nv_wo32(chan->ramin, 0x60, 0x7fffffff);
- nv_wo32(chan->ramin, 0x78, 0x00000000);
- nv_wo32(chan->ramin, 0x7c, 0x30000001);
- nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
-
- nvimem_flush(dev);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ nv_wo32(base->eng, addr + 0x00, 0x00190000);
+ nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+ nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+ nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ return 0;
}
-static bool
-nv50_fifo_kickoff(struct nouveau_channel *chan)
+static int
+nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
{
- struct drm_device *dev = chan->dev;
- bool done = true;
- u32 me;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_priv *priv = (void *)parent->engine;
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 addr, me;
+ int ret = 0;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0000; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
/* HW bug workaround:
*
@@ -134,159 +131,308 @@ nv50_fifo_kickoff(struct nouveau_channel *chan)
* there's also a "ignore these engines" bitmask reg we can use
* if we hit the issue there..
*/
-
- /* PME: make sure engine is enabled */
- me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
+ me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001);
/* do the kickoff... */
- nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
- done = false;
+ nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+ if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ if (suspend)
+ ret = -EBUSY;
}
- /* restore any engine states we changed, and exit */
- nv_wr32(dev, 0x00b860, me);
- return done;
+ nv_wr32(priv, 0x00b860, me);
+ return ret;
}
-static void
-nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
+static int
+nv50_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
{
- struct nv50_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- /* remove channel from playlist, will context switch if active */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
- nv50_fifo_playlist_update(dev);
-
- /* tell any engines on this channel to unload their contexts */
- nv50_fifo_kickoff(chan);
-
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* clean up */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 context;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->node->offset >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW : context |= 0x00000000; break;
+ case NVDEV_ENGINE_GR : context |= 0x00100000; break;
+ case NVDEV_ENGINE_MPEG : context |= 0x00200000; break;
+ default:
+ return -EINVAL;
}
- nvvm_engref(chan->vm, engine, -1);
- chan->engctx[engine] = NULL;
- kfree(fctx);
+ return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
+
+void
+nv50_fifo_object_detach(struct nouveau_object *parent, int cookie)
+{
+ struct nv50_fifo_chan *chan = (void *)parent;
+ nouveau_ramht_remove(chan->ramht, cookie);
}
static int
-nv50_fifo_init(struct drm_device *dev, int engine)
+nv50_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 instance;
- int i;
-
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x00250c, 0x6f3cfc34);
- nv_wr32(dev, 0x002044, 0x01003fff);
-
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-
- for (i = 0; i < 128; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && chan->engctx[engine])
- instance = 0x80000000 | chan->ramin->addr >> 12;
- else
- instance = 0x00000000;
- nv_wr32(dev, 0x002600 + (i * 4), instance);
- }
+ struct nv_channel_ind_class *args = data;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ u64 ioffset, ilength;
+ int ret;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
- nv50_fifo_playlist_update(dev);
+ nv_parent(chan)->context_attach = nv50_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv50_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv50_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
- nv_wr32(dev, 0x003200, 1);
- nv_wr32(dev, 0x003250, 1);
- nv_wr32(dev, 0x002500, 1);
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
+ if (ret)
+ return ret;
+
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+ nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ bar->flush(bar);
return 0;
}
+void
+nv50_fifo_chan_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_chan *chan = (void *)object;
+ nouveau_ramht_ref(NULL, &chan->ramht);
+ nouveau_fifo_channel_destroy(&chan->base);
+}
+
static int
-nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+nv50_fifo_chan_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- /* set playlist length to zero, fifo will unload context */
- nv_wr32(dev, 0x0032ec, 0);
-
- /* tell all connected engines to unload their contexts */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && !nv50_fifo_kickoff(chan))
- return -EBUSY;
- }
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_base *base = (void *)object->parent;
+ struct nv50_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *ramfc = base->ramfc;
+ u32 chid = chan->base.chid;
+ int ret;
- nv_wr32(dev, 0x002140, 0);
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12);
+ nv50_fifo_playlist_update(priv);
return 0;
}
-void
-nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
+int
+nv50_fifo_chan_fini(struct nouveau_object *object, bool suspend)
{
- nv50_vm_flush_engine(dev, 5);
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+
+ /* remove channel from playlist, fifo will unload context */
+ nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(priv);
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000);
+
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
}
-void
-nv50_fifo_destroy(struct drm_device *dev, int engine)
+static struct nouveau_ofuncs
+nv50_fifo_ofuncs = {
+ .ctor = nv50_fifo_chan_ctor,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv50_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
+
+static struct nouveau_oclass
+nv50_fifo_sclass[] = {
+ { 0x506f, &nv50_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
+
+static int
+nv50_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv50_fifo_base *base;
+ int ret;
- nouveau_irq_unregister(dev, 8);
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0, 0,
+ &base->pgd);
+ if (ret)
+ return ret;
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
- dev_priv->eng[engine] = NULL;
- kfree(priv);
+ return 0;
}
-int
-nv50_fifo_create(struct drm_device *dev)
+void
+nv50_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->eng);
+ nouveau_gpuobj_ref(NULL, &base->ramfc);
+ nouveau_gpuobj_ref(NULL, &base->cache);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nv50_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fifo_context_ctor,
+ .dtor = nv50_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
+static int
+nv50_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv50_fifo_destroy;
- priv->base.base.init = nv50_fifo_init;
- priv->base.base.fini = nv50_fifo_fini;
- priv->base.base.context_new = nv50_fifo_context_new;
- priv->base.base.context_del = nv50_fifo_context_del;
- priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
- priv->base.channels = 127;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ *pobject = nv_object(priv);
if (ret)
- goto error;
+ return ret;
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
if (ret)
- goto error;
+ return ret;
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv50_fifo_cclass;
+ nv_engine(priv)->sclass = nv50_fifo_sclass;
+ return 0;
+}
+
+void
+nv50_fifo_dtor(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object;
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ nouveau_fifo_destroy(&priv->base);
}
+
+int
+nv50_fifo_init(struct nouveau_object *object)
+{
+ struct nv50_fifo_priv *priv = (void *)object;
+ int ret, i;
+
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(priv, 0x00250c, 0x6f3cfc34);
+ nv_wr32(priv, 0x002044, 0x01003fff);
+
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++)
+ nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
+ nv50_fifo_playlist_update(priv);
+
+ nv_wr32(priv, 0x003200, 0x00000001);
+ nv_wr32(priv, 0x003250, 0x00000001);
+ nv_wr32(priv, 0x002500, 0x00000001);
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_fifo_ctor,
+ .dtor = nv50_fifo_dtor,
+ .init = nv50_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
new file mode 100644
index 000000000000..3a9ceb315c20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.h
@@ -0,0 +1,36 @@
+#ifndef __NV50_FIFO_H__
+#define __NV50_FIFO_H__
+
+struct nv50_fifo_priv {
+ struct nouveau_fifo base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+ struct nouveau_gpuobj *eng;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 subc[8];
+ struct nouveau_ramht *ramht;
+};
+
+void nv50_fifo_playlist_update(struct nv50_fifo_priv *);
+
+void nv50_fifo_object_detach(struct nouveau_object *, int);
+void nv50_fifo_chan_dtor(struct nouveau_object *);
+int nv50_fifo_chan_fini(struct nouveau_object *, bool);
+
+void nv50_fifo_context_dtor(struct nouveau_object *);
+
+void nv50_fifo_dtor(struct nouveau_object *);
+int nv50_fifo_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 63a4941e285c..765affb12666 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -1,249 +1,343 @@
/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
+#include <core/os.h>
+#include <core/client.h>
+#include <core/engctx.h>
#include <core/ramht.h>
+#include <core/class.h>
+#include <core/math.h>
-struct nv84_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
+#include <subdev/timer.h>
+#include <subdev/bar.h>
-struct nv84_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
-};
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+
+#include "nv50.h"
+
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
static int
-nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
+nv84_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
{
- struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv84_fifo_chan *fctx;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- u64 instance;
- unsigned long flags;
- int ret;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nouveau_gpuobj *ectx = (void *)object;
+ u64 limit = ectx->addr + ectx->size - 1;
+ u64 start = ectx->addr;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0020; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+ default:
+ return -EINVAL;
+ }
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
- nvvm_engref(chan->vm, engine, 1);
+ nv_wo32(base->eng, addr + 0x00, 0x00190000);
+ nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
+ nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
+ nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
+ upper_32_bits(start));
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+ return 0;
+}
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
+static int
+nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_priv *priv = (void *)parent->engine;
+ struct nv50_fifo_base *base = (void *)parent->parent;
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0020; break;
+ case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
+ case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x00c0; break;
+ default:
+ return -EINVAL;
}
- ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- instance = fctx->ramfc->addr >> 8;
+ nv_wo32(base->eng, addr + 0x00, 0x00000000);
+ nv_wo32(base->eng, addr + 0x04, 0x00000000);
+ nv_wo32(base->eng, addr + 0x08, 0x00000000);
+ nv_wo32(base->eng, addr + 0x0c, 0x00000000);
+ nv_wo32(base->eng, addr + 0x10, 0x00000000);
+ nv_wo32(base->eng, addr + 0x14, 0x00000000);
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
+ if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ nv_error(priv, "channel %d unload timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
+ }
+ return 0;
+}
- ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
- nv_wo32(fctx->ramfc, 0x40, 0x00000000);
- nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
- nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->node->offset >> 4);
- nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
- nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
- nv_wo32(fctx->ramfc, 0x78, 0x00000000);
- nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
- nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->node->offset >> 4));
- nv_wo32(fctx->ramfc, 0x88, fctx->cache->addr >> 10);
- nv_wo32(fctx->ramfc, 0x98, chan->ramin->addr >> 12);
+static int
+nv84_fifo_object_attach(struct nouveau_object *parent,
+ struct nouveau_object *object, u32 handle)
+{
+ struct nv50_fifo_chan *chan = (void *)parent;
+ u32 context;
+
+ if (nv_iclass(object, NV_GPUOBJ_CLASS))
+ context = nv_gpuobj(object)->node->offset >> 4;
+ else
+ context = 0x00000004; /* just non-zero */
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_DMAOBJ:
+ case NVDEV_ENGINE_SW : context |= 0x00000000; break;
+ case NVDEV_ENGINE_GR : context |= 0x00100000; break;
+ case NVDEV_ENGINE_MPEG :
+ case NVDEV_ENGINE_PPP : context |= 0x00200000; break;
+ case NVDEV_ENGINE_ME :
+ case NVDEV_ENGINE_COPY0 : context |= 0x00300000; break;
+ case NVDEV_ENGINE_VP : context |= 0x00400000; break;
+ case NVDEV_ENGINE_CRYPT :
+ case NVDEV_ENGINE_UNK1C1: context |= 0x00500000; break;
+ case NVDEV_ENGINE_BSP : context |= 0x00600000; break;
+ default:
+ return -EINVAL;
+ }
- nv_wo32(chan->ramin, 0x00, chan->id);
- nv_wo32(chan->ramin, 0x04, fctx->ramfc->addr >> 8);
+ return nouveau_ramht_insert(chan->ramht, 0, handle, context);
+}
- nvimem_flush(dev);
+static int
+nv84_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_fifo_base *base = (void *)parent;
+ struct nv50_fifo_chan *chan;
+ struct nv_channel_ind_class *args = data;
+ u64 ioffset, ilength;
+ int ret;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
+ 0x2000, args->pushbuf,
+ (1 << NVDEV_ENGINE_DMAOBJ) |
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_MPEG) |
+ (1 << NVDEV_ENGINE_ME) |
+ (1 << NVDEV_ENGINE_VP) |
+ (1 << NVDEV_ENGINE_CRYPT) |
+ (1 << NVDEV_ENGINE_BSP) |
+ (1 << NVDEV_ENGINE_PPP) |
+ (1 << NVDEV_ENGINE_COPY0) |
+ (1 << NVDEV_ENGINE_UNK1C1), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
-error:
+ ret = nouveau_ramht_new(parent, parent, 0x8000, 16, &chan->ramht);
if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ return ret;
+
+ nv_parent(chan)->context_attach = nv84_fifo_context_attach;
+ nv_parent(chan)->context_detach = nv84_fifo_context_detach;
+ nv_parent(chan)->object_attach = nv84_fifo_object_attach;
+ nv_parent(chan)->object_detach = nv50_fifo_object_detach;
+
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ nv_wo32(base->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(base->ramfc, 0x44, 0x01003fff);
+ nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
+ nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
+ nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(base->ramfc, 0x78, 0x00000000);
+ nv_wo32(base->ramfc, 0x7c, 0x30000001);
+ nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->base.node->offset >> 4));
+ nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
+ nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
+ bar->flush(bar);
+ return 0;
}
-static void
-nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
+static int
+nv84_fifo_chan_init(struct nouveau_object *object)
{
- struct nv84_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
- u32 save;
+ struct nv50_fifo_priv *priv = (void *)object->engine;
+ struct nv50_fifo_base *base = (void *)object->parent;
+ struct nv50_fifo_chan *chan = (void *)object;
+ struct nouveau_gpuobj *ramfc = base->ramfc;
+ u32 chid = chan->base.chid;
+ int ret;
+
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
- /* remove channel from playlist, will context switch if active */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
- nv50_fifo_playlist_update(dev);
+ nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
+ nv50_fifo_playlist_update(priv);
+ return 0;
+}
- save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+static struct nouveau_ofuncs
+nv84_fifo_ofuncs = {
+ .ctor = nv84_fifo_chan_ctor,
+ .dtor = nv50_fifo_chan_dtor,
+ .init = nv84_fifo_chan_init,
+ .fini = nv50_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
- /* tell any engines on this channel to unload their contexts */
- nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+static struct nouveau_oclass
+nv84_fifo_sclass[] = {
+ { 0x826f, &nv84_fifo_ofuncs },
+ {}
+};
- nv_wr32(dev, 0x002520, save);
+/*******************************************************************************
+ * FIFO context - basically just the instmem reserved for the channel
+ ******************************************************************************/
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+int
+nv84_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_fifo_base *base;
+ int ret;
- /* clean up */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
+ 0x1000, NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
- nouveau_gpuobj_ref(NULL, &fctx->ramfc);
- nouveau_gpuobj_ref(NULL, &fctx->cache);
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0200, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->eng);
+ if (ret)
+ return ret;
- nvvm_engref(chan->vm, engine, -1);
- chan->engctx[engine] = NULL;
- kfree(fctx);
-}
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x4000, 0,
+ 0, &base->pgd);
+ if (ret)
+ return ret;
-static int
-nv84_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_chan *fctx;
- u32 instance;
- int i;
-
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x00250c, 0x6f3cfc34);
- nv_wr32(dev, 0x002044, 0x01003fff);
-
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-
- for (i = 0; i < 128; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && (fctx = chan->engctx[engine]))
- instance = 0x80000000 | fctx->ramfc->addr >> 8;
- else
- instance = 0x00000000;
- nv_wr32(dev, 0x002600 + (i * 4), instance);
- }
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
- nv50_fifo_playlist_update(dev);
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x1000, 0x400,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->cache);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, nv_object(base), 0x0100, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &base->ramfc);
+ if (ret)
+ return ret;
- nv_wr32(dev, 0x003200, 1);
- nv_wr32(dev, 0x003250, 1);
- nv_wr32(dev, 0x002500, 1);
return 0;
}
-static int
-nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_priv *priv = nv_engine(dev, engine);
- int i;
- u32 save;
-
- /* set playlist length to zero, fifo will unload context */
- nv_wr32(dev, 0x0032ec, 0);
-
- save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
-
- /* tell all connected engines to unload their contexts */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan)
- nv_wr32(dev, 0x0032fc, chan->ramin->addr >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
- return -EBUSY;
- }
- }
+static struct nouveau_oclass
+nv84_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_fifo_context_ctor,
+ .dtor = nv50_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
- nv_wr32(dev, 0x002520, save);
- nv_wr32(dev, 0x002140, 0);
- return 0;
-}
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
-int
-nv84_fifo_create(struct drm_device *dev)
+static int
+nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_priv *priv;
+ struct nv50_fifo_priv *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv50_fifo_destroy;
- priv->base.base.init = nv84_fifo_init;
- priv->base.base.fini = nv84_fifo_fini;
- priv->base.base.context_new = nv84_fifo_context_new;
- priv->base.base.context_del = nv84_fifo_context_del;
- priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
- priv->base.channels = 127;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ ret = nouveau_fifo_create(parent, engine, oclass, 1, 127, &priv);
+ *pobject = nv_object(priv);
if (ret)
- goto error;
+ return ret;
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[0]);
if (ret)
- goto error;
+ return ret;
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 4, 0x1000, 0,
+ &priv->playlist[1]);
if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nv04_fifo_intr;
+ nv_engine(priv)->cclass = &nv84_fifo_cclass;
+ nv_engine(priv)->sclass = nv84_fifo_sclass;
+ return 0;
}
+
+struct nouveau_oclass
+nv84_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_fifo_ctor,
+ .dtor = nv50_fifo_dtor,
+ .init = nv50_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b99d976011d1..ef403fe66ce0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,17 +22,24 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
-#include "nouveau_drv.h"
-#include <core/mm.h>
-#include <engine/fifo.h>
-#include "nouveau_software.h"
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
-static void nvc0_fifo_isr(struct drm_device *);
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
struct nvc0_fifo_priv {
- struct nouveau_fifo_priv base;
+ struct nouveau_fifo base;
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
struct {
@@ -42,14 +49,24 @@ struct nvc0_fifo_priv {
int spoon_nr;
};
+struct nvc0_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
struct nvc0_fifo_chan {
struct nouveau_fifo_chan base;
};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
static void
-nvc0_fifo_playlist_update(struct drm_device *dev)
+nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
{
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nouveau_bar *bar = nouveau_bar(priv);
struct nouveau_gpuobj *cur;
int i, p;
@@ -57,174 +74,253 @@ nvc0_fifo_playlist_update(struct drm_device *dev)
priv->cur_playlist = !priv->cur_playlist;
for (i = 0, p = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+ if (!(nv_rd32(priv, 0x003004 + (i * 8)) & 1))
continue;
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000004);
p += 8;
}
- nvimem_flush(dev);
+ bar->flush(bar);
- nv_wr32(dev, 0x002270, cur->addr >> 12);
- nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
- if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
- NV_ERROR(dev, "PFIFO - playlist update failed\n");
+ nv_wr32(priv, 0x002270, cur->addr >> 12);
+ nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
+ nv_error(priv, "playlist update failed\n");
}
static int
-nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
+nvc0_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
{
- struct drm_device *dev = chan->dev;
- struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
- struct nvc0_fifo_chan *fctx;
- u64 usermem = priv->user.mem->addr + chan->id * 0x1000;
- u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret, i;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_base *base = (void *)parent->parent;
+ struct nouveau_engctx *ectx = (void *)object;
+ u32 addr;
+ int ret;
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+ case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ default:
+ return -EINVAL;
+ }
- chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
- priv->user.bar.offset + (chan->id * 0x1000),
- PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
+ if (!ectx->vma.node) {
+ ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
+ if (ret)
+ return ret;
}
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
- nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
- nv_wo32(chan->ramin, 0x10, 0x0000face);
- nv_wo32(chan->ramin, 0x30, 0xfffff902);
- nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
- upper_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x54, 0x00000002);
- nv_wo32(chan->ramin, 0x84, 0x20400000);
- nv_wo32(chan->ramin, 0x94, 0x30000001);
- nv_wo32(chan->ramin, 0x9c, 0x00000100);
- nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
- nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
- nv_wo32(chan->ramin, 0xac, 0x0000001f);
- nv_wo32(chan->ramin, 0xb8, 0xf8000000);
- nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
- nvimem_flush(dev);
-
- nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
- (chan->ramin->addr >> 12));
- nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
- nvc0_fifo_playlist_update(dev);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+ nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+ bar->flush(bar);
+ return 0;
}
-static void
-nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
+static int
+nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
{
- struct nvc0_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
-
- nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, chan->id);
- if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
- NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
- nvc0_fifo_playlist_update(dev);
- nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
-
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_priv *priv = (void *)parent->engine;
+ struct nvc0_fifo_base *base = (void *)parent->parent;
+ struct nvc0_fifo_chan *chan = (void *)parent;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
+ case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
+ default:
+ return -EINVAL;
}
- chan->engctx[engine] = NULL;
- kfree(fctx);
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
+ }
+
+ return 0;
}
static int
-nvc0_fifo_init(struct drm_device *dev, int engine)
+nvc0_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
- struct nouveau_channel *chan;
- int i;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nvc0_fifo_priv *priv = (void *)engine;
+ struct nvc0_fifo_base *base = (void *)parent;
+ struct nvc0_fifo_chan *chan;
+ struct nv_channel_ind_class *args = data;
+ u64 usermem, ioffset, ilength;
+ int ret, i;
- /* reset PFIFO, enable all available PSUBFIFO areas */
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x000204, 0xffffffff);
- nv_wr32(dev, 0x002204, 0xffffffff);
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x1000,
+ args->pushbuf,
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR) |
+ (1 << NVDEV_ENGINE_COPY0) |
+ (1 << NVDEV_ENGINE_COPY1), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nvc0_fifo_context_attach;
+ nv_parent(chan)->context_detach = nvc0_fifo_context_detach;
+
+ usermem = chan->base.chid * 0x1000;
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ for (i = 0; i < 0x1000; i += 4)
+ nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+ nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x10, 0x0000face);
+ nv_wo32(base, 0x30, 0xfffff902);
+ nv_wo32(base, 0x48, lower_32_bits(ioffset));
+ nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base, 0x54, 0x00000002);
+ nv_wo32(base, 0x84, 0x20400000);
+ nv_wo32(base, 0x94, 0x30000001);
+ nv_wo32(base, 0x9c, 0x00000100);
+ nv_wo32(base, 0xa4, 0x1f1f1f1f);
+ nv_wo32(base, 0xa8, 0x1f1f1f1f);
+ nv_wo32(base, 0xac, 0x0000001f);
+ nv_wo32(base, 0xb8, 0xf8000000);
+ nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+ bar->flush(bar);
+ return 0;
+}
- priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
- NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+static int
+nvc0_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+ struct nvc0_fifo_priv *priv = (void *)object->engine;
+ struct nvc0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+ int ret;
- /* assign engines to subfifos */
- if (priv->spoon_nr >= 3) {
- nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
- nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
- nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
- nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
- nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
- nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
- }
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
- }
+ nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12);
+ nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(priv);
+ return 0;
+}
- nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
- nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+static int
+nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nvc0_fifo_priv *priv = (void *)object->engine;
+ struct nvc0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
- nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xbfffffff);
+ nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
+ nvc0_fifo_playlist_update(priv);
+ nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
- /* restore PFIFO context table */
- for (i = 0; i < 128; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->engctx[engine])
- continue;
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
- nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
- (chan->ramin->addr >> 12));
- nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
- }
- nvc0_fifo_playlist_update(dev);
+static struct nouveau_ofuncs
+nvc0_fifo_ofuncs = {
+ .ctor = nvc0_fifo_chan_ctor,
+ .dtor = _nouveau_fifo_channel_dtor,
+ .init = nvc0_fifo_chan_init,
+ .fini = nvc0_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
- return 0;
-}
+static struct nouveau_oclass
+nvc0_fifo_sclass[] = {
+ { 0x906f, &nvc0_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
static int
-nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+nvc0_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- int i;
+ struct nvc0_fifo_base *base;
+ int ret;
- for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
- continue;
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_HEAP, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
+ ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ if (ret)
+ return ret;
+
+ nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0208, 0xffffffff);
+ nv_wo32(base, 0x020c, 0x000000ff);
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
- nv_wr32(dev, 0x002140, 0x00000000);
return 0;
}
+static void
+nvc0_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nvc0_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nvc0_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fifo_context_ctor,
+ .dtor = nvc0_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
struct nouveau_enum nvc0_fifo_fault_unit[] = {
{ 0x00, "PGRAPH" },
@@ -289,16 +385,16 @@ struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
};
static void
-nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
{
- u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
- u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
- u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
- u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
- NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
- (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
+ "write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
printk("] from ");
nouveau_enum_print(nvc0_fifo_fault_unit, unit);
@@ -313,165 +409,223 @@ nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
}
static int
-nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
+nvc0_fifo_swmthd(struct nvc0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
+ struct nvc0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
unsigned long flags;
int ret = -EINVAL;
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < priv->base.channels)) {
- chan = dev_priv->channels.ptr[chid];
- if (likely(chan)) {
- struct nouveau_software_chan *swch =
- chan->engctx[NVOBJ_ENGINE_SW];
- ret = swch->flip(swch->flip_data);
- }
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
}
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
return ret;
}
static void
-nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
{
- u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
- u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
- u32 subc = (addr & 0x00070000);
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
- if (!nvc0_fifo_page_flip(dev, chid))
+ if (!nvc0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
+ if (stat & 0x00800000) {
+ if (!nvc0_fifo_swmthd(priv, chid, mthd, data))
+ show &= ~0x00800000;
+ }
+
if (show) {
- NV_INFO(dev, "PFIFO%d:", unit);
+ nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
- NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ printk("\n");
+ nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+ "data 0x%08x\n",
+ unit, chid, subc, mthd, data);
}
- nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
- nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
static void
-nvc0_fifo_isr(struct drm_device *dev)
+nvc0_fifo_intr(struct nouveau_subdev *subdev)
{
- u32 mask = nv_rd32(dev, 0x002140);
- u32 stat = nv_rd32(dev, 0x002100) & mask;
+ struct nvc0_fifo_priv *priv = (void *)subdev;
+ u32 mask = nv_rd32(priv, 0x002140);
+ u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
- nv_wr32(dev, 0x002100, 0x00000100);
+ nv_info(priv, "unknown status 0x00000100\n");
+ nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x10000000) {
- u32 units = nv_rd32(dev, 0x00259c);
+ u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nvc0_fifo_isr_vm_fault(dev, i);
+ nvc0_fifo_isr_vm_fault(priv, i);
u &= ~(1 << i);
}
- nv_wr32(dev, 0x00259c, units);
+ nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
- u32 units = nv_rd32(dev, 0x0025a0);
+ u32 units = nv_rd32(priv, 0x0025a0);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nvc0_fifo_isr_subfifo_intr(dev, i);
+ nvc0_fifo_isr_subfifo_intr(priv, i);
u &= ~(1 << i);
}
- nv_wr32(dev, 0x0025a0, units);
+ nv_wr32(priv, 0x0025a0, units);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
- nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ nv_warn(priv, "unknown status 0x40000000\n");
+ nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
stat &= ~0x40000000;
}
if (stat) {
- NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
- nv_wr32(dev, 0x002100, stat);
- nv_wr32(dev, 0x002140, 0);
+ nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, stat);
+ nv_wr32(priv, 0x002140, 0);
}
}
+static int
+nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 127, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[0]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x1000, 0,
+ &priv->playlist[1]);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 128 * 0x1000, 0x1000, 0,
+ &priv->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nvc0_fifo_intr;
+ nv_engine(priv)->cclass = &nvc0_fifo_cclass;
+ nv_engine(priv)->sclass = nvc0_fifo_sclass;
+ return 0;
+}
+
static void
-nvc0_fifo_destroy(struct drm_device *dev, int engine)
+nvc0_fifo_dtor(struct nouveau_object *object)
{
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv = (void *)object;
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
-
nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- dev_priv->eng[engine] = NULL;
- kfree(priv);
+ nouveau_fifo_destroy(&priv->base);
}
-int
-nvc0_fifo_create(struct drm_device *dev)
+static int
+nvc0_fifo_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fifo_priv *priv;
- int ret;
+ struct nvc0_fifo_priv *priv = (void *)object;
+ int ret, i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
- priv->base.base.destroy = nvc0_fifo_destroy;
- priv->base.base.init = nvc0_fifo_init;
- priv->base.base.fini = nvc0_fifo_fini;
- priv->base.base.context_new = nvc0_fifo_context_new;
- priv->base.base.context_del = nvc0_fifo_context_del;
- priv->base.channels = 128;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ nv_wr32(priv, 0x002204, 0xffffffff);
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
- if (ret)
- goto error;
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204));
+ nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
- if (ret)
- goto error;
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(priv, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(priv, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4096, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
- if (ret)
- goto error;
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
- ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
- if (ret)
- goto error;
+ nv_mask(priv, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
- nouveau_irq_register(dev, 8, nvc0_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
+ nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
+ return 0;
}
+
+struct nouveau_oclass
+nvc0_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_fifo_ctor,
+ .dtor = nvc0_fifo_dtor,
+ .init = nvc0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 0b356f1b6864..aaff086dfd2a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,25 +22,30 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engctx.h>
+#include <core/class.h>
+#include <core/math.h>
+#include <core/enum.h>
-#include "nouveau_drv.h"
-#include <core/mm.h>
-#include <engine/fifo.h>
-#include "nouveau_software.h"
-
-#define NVE0_FIFO_ENGINE_NUM 32
+#include <subdev/timer.h>
+#include <subdev/bar.h>
+#include <subdev/vm.h>
-static void nve0_fifo_isr(struct drm_device *);
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
-struct nve0_fifo_engine {
+struct nve0_fifo_engn {
struct nouveau_gpuobj *playlist[2];
int cur_playlist;
};
struct nve0_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
+ struct nouveau_fifo base;
+ struct nve0_fifo_engn engine[16];
struct {
struct nouveau_gpuobj *mem;
struct nouveau_vma bar;
@@ -48,194 +53,286 @@ struct nve0_fifo_priv {
int spoon_nr;
};
+struct nve0_fifo_base {
+ struct nouveau_fifo_base base;
+ struct nouveau_gpuobj *pgd;
+ struct nouveau_vm *vm;
+};
+
struct nve0_fifo_chan {
struct nouveau_fifo_chan base;
u32 engine;
};
+/*******************************************************************************
+ * FIFO channel objects
+ ******************************************************************************/
+
static void
-nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
+nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
{
- struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nve0_fifo_engine *peng = &priv->engine[engine];
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nve0_fifo_engn *engn = &priv->engine[engine];
struct nouveau_gpuobj *cur;
u32 match = (engine << 16) | 0x00000001;
- int ret, i, p;
+ int i, p;
- cur = peng->playlist[peng->cur_playlist];
+ cur = engn->playlist[engn->cur_playlist];
if (unlikely(cur == NULL)) {
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
+ int ret = nouveau_gpuobj_new(nv_object(priv)->parent, NULL,
+ 0x8000, 0x1000, 0, &cur);
if (ret) {
- NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
+ nv_error(priv, "playlist alloc failed\n");
return;
}
- peng->playlist[peng->cur_playlist] = cur;
+ engn->playlist[engn->cur_playlist] = cur;
}
- peng->cur_playlist = !peng->cur_playlist;
+ engn->cur_playlist = !engn->cur_playlist;
- for (i = 0, p = 0; i < priv->base.channels; i++) {
- u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
+ for (i = 0, p = 0; i < priv->base.max; i++) {
+ u32 ctrl = nv_rd32(priv, 0x800004 + (i * 8)) & 0x001f0001;
if (ctrl != match)
continue;
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000000);
p += 8;
}
- nvimem_flush(dev);
+ bar->flush(bar);
- nv_wr32(dev, 0x002270, cur->addr >> 12);
- nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
- NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
+ nv_wr32(priv, 0x002270, cur->addr >> 12);
+ nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ nv_error(priv, "playlist %d update timeout\n", engine);
}
static int
-nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
+nve0_fifo_context_attach(struct nouveau_object *parent,
+ struct nouveau_object *object)
{
- struct drm_device *dev = chan->dev;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- struct nve0_fifo_chan *fctx;
- u64 usermem = priv->user.mem->addr + chan->id * 512;
- u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret = 0, i;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- fctx->engine = 0; /* PGRAPH */
-
- /* allocate vram for control regs, map into polling area */
- chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
- priv->user.bar.offset + (chan->id * 512), 512);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_base *base = (void *)parent->parent;
+ struct nouveau_engctx *ectx = (void *)object;
+ u32 addr;
+ int ret;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ default:
+ return -EINVAL;
}
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
- nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
- nv_wo32(chan->ramin, 0x10, 0x0000face);
- nv_wo32(chan->ramin, 0x30, 0xfffff902);
- nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
- upper_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x84, 0x20400000);
- nv_wo32(chan->ramin, 0x94, 0x30000001);
- nv_wo32(chan->ramin, 0x9c, 0x00000100);
- nv_wo32(chan->ramin, 0xac, 0x0000001f);
- nv_wo32(chan->ramin, 0xe4, 0x00000000);
- nv_wo32(chan->ramin, 0xe8, chan->id);
- nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
- nvimem_flush(dev);
-
- nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
- (chan->ramin->addr >> 12));
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
+ if (!ectx->vma.node) {
+ ret = nouveau_gpuobj_map_vm(nv_gpuobj(ectx), base->vm,
+ NV_MEM_ACCESS_RW, &ectx->vma);
+ if (ret)
+ return ret;
+ }
+
+ nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
+ nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
+ bar->flush(bar);
+ return 0;
}
-static void
-nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
+static int
+nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
+ struct nouveau_object *object)
{
- struct nve0_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
-
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
- nv_wr32(dev, 0x002634, chan->id);
- if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
- NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
-
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_priv *priv = (void *)parent->engine;
+ struct nve0_fifo_base *base = (void *)parent->parent;
+ struct nve0_fifo_chan *chan = (void *)parent;
+ u32 addr;
+
+ switch (nv_engidx(object->engine)) {
+ case NVDEV_ENGINE_SW : return 0;
+ case NVDEV_ENGINE_GR : addr = 0x0210; break;
+ default:
+ return -EINVAL;
+ }
+
+ nv_wo32(base, addr + 0x00, 0x00000000);
+ nv_wo32(base, addr + 0x04, 0x00000000);
+ bar->flush(bar);
+
+ nv_wr32(priv, 0x002634, chan->base.chid);
+ if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
+ nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
+ if (suspend)
+ return -EBUSY;
}
- chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
- kfree(fctx);
+ return 0;
}
static int
-nve0_fifo_init(struct drm_device *dev, int engine)
+nve0_fifo_chan_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- struct nve0_fifo_chan *fctx;
- int i;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nve0_fifo_priv *priv = (void *)engine;
+ struct nve0_fifo_base *base = (void *)parent;
+ struct nve0_fifo_chan *chan;
+ struct nv_channel_ind_class *args = data;
+ u64 usermem, ioffset, ilength;
+ int ret, i;
+
+ if (size < sizeof(*args))
+ return -EINVAL;
+
+ ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
+ priv->user.bar.offset, 0x200,
+ args->pushbuf,
+ (1 << NVDEV_ENGINE_SW) |
+ (1 << NVDEV_ENGINE_GR), &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ nv_parent(chan)->context_attach = nve0_fifo_context_attach;
+ nv_parent(chan)->context_detach = nve0_fifo_context_detach;
+
+ usermem = chan->base.chid * 0x200;
+ ioffset = args->ioffset;
+ ilength = log2i(args->ilength / 8);
+
+ for (i = 0; i < 0x200; i += 4)
+ nv_wo32(priv->user.mem, usermem + i, 0x00000000);
+
+ nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem));
+ nv_wo32(base, 0x10, 0x0000face);
+ nv_wo32(base, 0x30, 0xfffff902);
+ nv_wo32(base, 0x48, lower_32_bits(ioffset));
+ nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
+ nv_wo32(base, 0x84, 0x20400000);
+ nv_wo32(base, 0x94, 0x30000001);
+ nv_wo32(base, 0x9c, 0x00000100);
+ nv_wo32(base, 0xac, 0x0000001f);
+ nv_wo32(base, 0xe8, chan->base.chid);
+ nv_wo32(base, 0xb8, 0xf8000000);
+ nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
+ bar->flush(bar);
+ return 0;
+}
- /* reset PFIFO, enable all available PSUBFIFO areas */
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x000204, 0xffffffff);
+static int
+nve0_fifo_chan_init(struct nouveau_object *object)
+{
+ struct nouveau_gpuobj *base = nv_gpuobj(object->parent);
+ struct nve0_fifo_priv *priv = (void *)object->engine;
+ struct nve0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
+ int ret;
- priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
- NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+ ret = nouveau_fifo_channel_init(&chan->base);
+ if (ret)
+ return ret;
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
- }
+ nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(priv, chan->engine);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400);
+ return 0;
+}
- nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+static int
+nve0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nve0_fifo_priv *priv = (void *)object->engine;
+ struct nve0_fifo_chan *chan = (void *)object;
+ u32 chid = chan->base.chid;
- nv_wr32(dev, 0x002a00, 0xffffffff);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xbfffffff);
+ nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800);
+ nve0_fifo_playlist_update(priv, chan->engine);
+ nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000);
- /* restore PFIFO context table */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (!chan || !(fctx = chan->engctx[engine]))
- continue;
+ return nouveau_fifo_channel_fini(&chan->base, suspend);
+}
- nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
- (chan->ramin->addr >> 12));
- nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
- }
+static struct nouveau_ofuncs
+nve0_fifo_ofuncs = {
+ .ctor = nve0_fifo_chan_ctor,
+ .dtor = _nouveau_fifo_channel_dtor,
+ .init = nve0_fifo_chan_init,
+ .fini = nve0_fifo_chan_fini,
+ .rd32 = _nouveau_fifo_channel_rd32,
+ .wr32 = _nouveau_fifo_channel_wr32,
+};
- return 0;
-}
+static struct nouveau_oclass
+nve0_fifo_sclass[] = {
+ { 0xa06f, &nve0_fifo_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * FIFO context - instmem heap and vm setup
+ ******************************************************************************/
static int
-nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+nve0_fifo_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- int i;
+ struct nve0_fifo_base *base;
+ int ret;
- for (i = 0; i < priv->base.channels; i++) {
- if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
- continue;
+ ret = nouveau_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &base);
+ *pobject = nv_object(base);
+ if (ret)
+ return ret;
- nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
+ ret = nouveau_gpuobj_new(parent, NULL, 0x10000, 0x1000, 0, &base->pgd);
+ if (ret)
+ return ret;
+
+ nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
+ nv_wo32(base, 0x0208, 0xffffffff);
+ nv_wo32(base, 0x020c, 0x000000ff);
+
+ ret = nouveau_vm_ref(nouveau_client(parent)->vm, &base->vm, base->pgd);
+ if (ret)
+ return ret;
- nv_wr32(dev, 0x002140, 0x00000000);
return 0;
}
+static void
+nve0_fifo_context_dtor(struct nouveau_object *object)
+{
+ struct nve0_fifo_base *base = (void *)object;
+ nouveau_vm_ref(NULL, &base->vm, base->pgd);
+ nouveau_gpuobj_ref(NULL, &base->pgd);
+ nouveau_fifo_context_destroy(&base->base);
+}
+
+static struct nouveau_oclass
+nve0_fifo_cclass = {
+ .handle = NV_ENGCTX(FIFO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_context_ctor,
+ .dtor = nve0_fifo_context_dtor,
+ .init = _nouveau_fifo_context_init,
+ .fini = _nouveau_fifo_context_fini,
+ .rd32 = _nouveau_fifo_context_rd32,
+ .wr32 = _nouveau_fifo_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PFIFO engine
+ ******************************************************************************/
+
struct nouveau_enum nve0_fifo_fault_unit[] = {
{}
};
@@ -268,16 +365,16 @@ struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
};
static void
-nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
{
- u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
- u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
- u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
- u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 inst = nv_rd32(priv, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(priv, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
u32 client = (stat & 0x00001f00) >> 8;
- NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
- (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
+ "write" : "read", (u64)vahi << 32 | valo);
nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
printk("] from ");
nouveau_enum_print(nve0_fifo_fault_unit, unit);
@@ -292,160 +389,205 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
}
static int
-nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+nve0_fifo_swmthd(struct nve0_fifo_priv *priv, u32 chid, u32 mthd, u32 data)
{
- struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
+ struct nve0_fifo_chan *chan = NULL;
+ struct nouveau_handle *bind;
unsigned long flags;
int ret = -EINVAL;
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < priv->base.channels)) {
- chan = dev_priv->channels.ptr[chid];
- if (likely(chan)) {
- struct nouveau_software_chan *swch =
- chan->engctx[NVOBJ_ENGINE_SW];
- ret = swch->flip(swch->flip_data);
- }
+ spin_lock_irqsave(&priv->base.lock, flags);
+ if (likely(chid >= priv->base.min && chid <= priv->base.max))
+ chan = (void *)priv->base.channel[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ bind = nouveau_namedb_get_class(nv_namedb(chan), 0x906e);
+ if (likely(bind)) {
+ if (!mthd || !nv_call(bind->object, mthd, data))
+ ret = 0;
+ nouveau_namedb_put(bind);
}
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+out:
+ spin_unlock_irqrestore(&priv->base.lock, flags);
return ret;
}
static void
-nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
{
- u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
- u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0xfff;
- u32 subc = (addr & 0x00070000);
+ u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
+ u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00003ffc);
u32 show = stat;
if (stat & 0x00200000) {
if (mthd == 0x0054) {
- if (!nve0_fifo_page_flip(dev, chid))
+ if (!nve0_fifo_swmthd(priv, chid, 0x0500, 0x00000000))
show &= ~0x00200000;
}
}
+ if (stat & 0x00800000) {
+ if (!nve0_fifo_swmthd(priv, chid, mthd, data))
+ show &= ~0x00800000;
+ }
+
if (show) {
- NV_INFO(dev, "PFIFO%d:", unit);
+ nv_error(priv, "SUBFIFO%d:", unit);
nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
- NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
+ printk("\n");
+ nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x "
+ "data 0x%08x\n",
+ unit, chid, subc, mthd, data);
}
- nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
- nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+ nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
}
static void
-nve0_fifo_isr(struct drm_device *dev)
+nve0_fifo_intr(struct nouveau_subdev *subdev)
{
- u32 mask = nv_rd32(dev, 0x002140);
- u32 stat = nv_rd32(dev, 0x002100) & mask;
+ struct nve0_fifo_priv *priv = (void *)subdev;
+ u32 mask = nv_rd32(priv, 0x002140);
+ u32 stat = nv_rd32(priv, 0x002100) & mask;
if (stat & 0x00000100) {
- NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
- nv_wr32(dev, 0x002100, 0x00000100);
+ nv_warn(priv, "unknown status 0x00000100\n");
+ nv_wr32(priv, 0x002100, 0x00000100);
stat &= ~0x00000100;
}
if (stat & 0x10000000) {
- u32 units = nv_rd32(dev, 0x00259c);
+ u32 units = nv_rd32(priv, 0x00259c);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nve0_fifo_isr_vm_fault(dev, i);
+ nve0_fifo_isr_vm_fault(priv, i);
u &= ~(1 << i);
}
- nv_wr32(dev, 0x00259c, units);
+ nv_wr32(priv, 0x00259c, units);
stat &= ~0x10000000;
}
if (stat & 0x20000000) {
- u32 units = nv_rd32(dev, 0x0025a0);
+ u32 units = nv_rd32(priv, 0x0025a0);
u32 u = units;
while (u) {
int i = ffs(u) - 1;
- nve0_fifo_isr_subfifo_intr(dev, i);
+ nve0_fifo_isr_subfifo_intr(priv, i);
u &= ~(1 << i);
}
- nv_wr32(dev, 0x0025a0, units);
+ nv_wr32(priv, 0x0025a0, units);
stat &= ~0x20000000;
}
if (stat & 0x40000000) {
- NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
- nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ nv_warn(priv, "unknown status 0x40000000\n");
+ nv_mask(priv, 0x002a00, 0x00000000, 0x00000000);
stat &= ~0x40000000;
}
if (stat) {
- NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
- nv_wr32(dev, 0x002100, stat);
- nv_wr32(dev, 0x002140, 0);
+ nv_fatal(priv, "unhandled status 0x%08x\n", stat);
+ nv_wr32(priv, 0x002100, stat);
+ nv_wr32(priv, 0x002140, 0);
}
}
+static int
+nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ ret = nouveau_fifo_create(parent, engine, oclass, 0, 4095, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 4096 * 0x200, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW,
+ &priv->user.bar);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000100;
+ nv_subdev(priv)->intr = nve0_fifo_intr;
+ nv_engine(priv)->cclass = &nve0_fifo_cclass;
+ nv_engine(priv)->sclass = nve0_fifo_sclass;
+ return 0;
+}
+
static void
-nve0_fifo_destroy(struct drm_device *dev, int engine)
+nve0_fifo_dtor(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_priv *priv = (void *)object;
int i;
nouveau_gpuobj_unmap(&priv->user.bar);
nouveau_gpuobj_ref(NULL, &priv->user.mem);
- for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ for (i = 0; i < ARRAY_SIZE(priv->engine); i++) {
nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
}
- dev_priv->eng[engine] = NULL;
- kfree(priv);
+ nouveau_fifo_destroy(&priv->base);
}
-int
-nve0_fifo_create(struct drm_device *dev)
+static int
+nve0_fifo_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv;
- int ret;
+ struct nve0_fifo_priv *priv = (void *)object;
+ int ret, i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ ret = nouveau_fifo_init(&priv->base);
+ if (ret)
+ return ret;
- priv->base.base.destroy = nve0_fifo_destroy;
- priv->base.base.init = nve0_fifo_init;
- priv->base.base.fini = nve0_fifo_fini;
- priv->base.base.context_new = nve0_fifo_context_new;
- priv->base.base.context_del = nve0_fifo_context_del;
- priv->base.channels = 4096;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+ /* enable all available PSUBFIFOs */
+ nv_wr32(priv, 0x000204, 0xffffffff);
+ priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204));
+ nv_debug(priv, "%d subfifo(s)\n", priv->spoon_nr);
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
- if (ret)
- goto error;
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
+ }
- ret = nouveau_gpuobj_map_bar(priv->user.mem, NV_MEM_ACCESS_RW,
- &priv->user.bar);
- if (ret)
- goto error;
+ nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
- nouveau_irq_register(dev, 8, nve0_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
+ nv_wr32(priv, 0x002a00, 0xffffffff);
+ nv_wr32(priv, 0x002100, 0xffffffff);
+ nv_wr32(priv, 0x002140, 0xbfffffff);
+ return 0;
}
+
+struct nouveau_oclass
+nve0_fifo_oclass = {
+ .handle = NV_ENGINE(FIFO, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_fifo_ctor,
+ .dtor = nve0_fifo_dtor,
+ .init = nve0_fifo_init,
+ .fini = _nouveau_fifo_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
index b0795ececbda..e1947013d3bc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctx.h
@@ -2,7 +2,7 @@
#define __NOUVEAU_GRCTX_H__
struct nouveau_grctx {
- struct drm_device *dev;
+ struct nouveau_device *device;
enum {
NOUVEAU_GRCTX_PROG,
@@ -10,18 +10,18 @@ struct nouveau_grctx {
} mode;
void *data;
- uint32_t ctxprog_max;
- uint32_t ctxprog_len;
- uint32_t ctxprog_reg;
- int ctxprog_label[32];
- uint32_t ctxvals_pos;
- uint32_t ctxvals_base;
+ u32 ctxprog_max;
+ u32 ctxprog_len;
+ u32 ctxprog_reg;
+ int ctxprog_label[32];
+ u32 ctxvals_pos;
+ u32 ctxvals_base;
};
static inline void
-cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+cp_out(struct nouveau_grctx *ctx, u32 inst)
{
- uint32_t *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->data;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
return;
@@ -31,13 +31,13 @@ cp_out(struct nouveau_grctx *ctx, uint32_t inst)
}
static inline void
-cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+cp_lsr(struct nouveau_grctx *ctx, u32 val)
{
cp_out(ctx, CP_LOAD_SR | val);
}
static inline void
-cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+cp_ctx(struct nouveau_grctx *ctx, u32 reg, u32 length)
{
ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
@@ -55,7 +55,7 @@ cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
static inline void
cp_name(struct nouveau_grctx *ctx, int name)
{
- uint32_t *ctxprog = ctx->data;
+ u32 *ctxprog = ctx->data;
int i;
if (ctx->mode != NOUVEAU_GRCTX_PROG)
@@ -115,7 +115,7 @@ cp_pos(struct nouveau_grctx *ctx, int offset)
}
static inline void
-gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+gr_def(struct nouveau_grctx *ctx, u32 reg, u32 val)
{
if (ctx->mode != NOUVEAU_GRCTX_VALS)
return;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index b17506d7eb60..e45035efb8ca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -22,6 +22,8 @@
* Authors: Ben Skeggs
*/
+#include <core/gpuobj.h>
+
/* NVIDIA context programs handle a number of other conditions which are
* not implemented in our versions. It's not clear why NVIDIA context
* programs have this code, nor whether it's strictly necessary for
@@ -109,8 +111,7 @@
#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include "nv40.h"
#include "ctx.h"
/* TODO:
@@ -118,11 +119,10 @@
*/
static int
-nv40_graph_vs_count(struct drm_device *dev)
+nv40_graph_vs_count(struct nouveau_device *device)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -160,7 +160,7 @@ enum cp_label {
static void
nv40_graph_construct_general(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x4000a4, 1);
@@ -187,7 +187,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x400724, 1);
gr_def(ctx, 0x400724, 0x02008821);
cp_ctx(ctx, 0x400770, 3);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x400814, 4);
cp_ctx(ctx, 0x400828, 5);
cp_ctx(ctx, 0x400840, 5);
@@ -208,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x4009dc, 0x80000000);
} else {
cp_ctx(ctx, 0x400840, 20);
- if (nv44_graph_class(ctx->dev)) {
+ if (nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
}
@@ -217,21 +217,21 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400888, 0x00000040);
cp_ctx(ctx, 0x400894, 11);
gr_def(ctx, 0x400894, 0x00000040);
- if (!nv44_graph_class(ctx->dev)) {
+ if (!nv44_graph_class(ctx->device)) {
for (i = 0; i < 8; i++)
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
}
cp_ctx(ctx, 0x4008e0, 2);
cp_ctx(ctx, 0x4008f8, 2);
- if (dev_priv->chipset == 0x4c ||
- (dev_priv->chipset & 0xf0) == 0x60)
+ if (device->chipset == 0x4c ||
+ (device->chipset & 0xf0) == 0x60)
cp_ctx(ctx, 0x4009f8, 1);
}
cp_ctx(ctx, 0x400a00, 73);
gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
cp_ctx(ctx, 0x401000, 4);
cp_ctx(ctx, 0x405004, 1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -240,7 +240,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x403440, 1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x403440, 0x00000010);
break;
@@ -266,19 +266,19 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401880, 51);
gr_def(ctx, 0x401940, 0x00000100);
} else
- if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ if (device->chipset == 0x46 || device->chipset == 0x47 ||
+ device->chipset == 0x49 || device->chipset == 0x4b) {
cp_ctx(ctx, 0x401880, 32);
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
- if (dev_priv->chipset == 0x46)
+ if (device->chipset == 0x46)
cp_ctx(ctx, 0x401900, 16);
cp_ctx(ctx, 0x401940, 3);
}
@@ -289,7 +289,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401978, 0xffff0000);
gr_def(ctx, 0x40197c, 0x00000001);
gr_def(ctx, 0x401990, 0x46400000);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x4019a0, 2);
cp_ctx(ctx, 0x4019ac, 5);
} else {
@@ -297,7 +297,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x4019b4, 3);
}
gr_def(ctx, 0x4019bc, 0xffff0000);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@@ -316,7 +316,7 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
for (i = 0; i < 16; i++)
gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
gr_def(ctx, 0x401a8c, 0x4b7fffff);
- if (dev_priv->chipset == 0x40) {
+ if (device->chipset == 0x40) {
cp_ctx(ctx, 0x401ab8, 3);
} else {
cp_ctx(ctx, 0x401ab8, 1);
@@ -327,10 +327,10 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401ad4, 0x70605040);
gr_def(ctx, 0x401ad8, 0xb8a89888);
gr_def(ctx, 0x401adc, 0xf8e8d8c8);
- cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1);
gr_def(ctx, 0x401b10, 0x40100000);
- cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
- gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, device->chipset == 0x40 ?
0x00000004 : 0x00000000);
cp_ctx(ctx, 0x401b30, 25);
gr_def(ctx, 0x401b34, 0x0000ffff);
@@ -341,8 +341,8 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
gr_def(ctx, 0x401b84, 0xffffffff);
gr_def(ctx, 0x401b88, 0x00ff7000);
gr_def(ctx, 0x401b8c, 0x0000ffff);
- if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
- dev_priv->chipset != 0x4e)
+ if (device->chipset != 0x44 && device->chipset != 0x4a &&
+ device->chipset != 0x4e)
cp_ctx(ctx, 0x401b94, 1);
cp_ctx(ctx, 0x401b98, 8);
gr_def(ctx, 0x401b9c, 0x00ff0000);
@@ -371,12 +371,12 @@ nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
cp_ctx(ctx, 0x402000, 1);
- cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
- switch (dev_priv->chipset) {
+ cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2);
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402404, 0x00000001);
break;
@@ -393,9 +393,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
gr_def(ctx, 0x402404, 0x00000021);
}
- if (dev_priv->chipset != 0x40)
+ if (device->chipset != 0x40)
gr_def(ctx, 0x402408, 0x030c30c3);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x44:
case 0x46:
case 0x4a:
@@ -408,10 +408,10 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
default:
break;
}
- cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9);
gr_def(ctx, 0x402488, 0x3e020200);
gr_def(ctx, 0x40248c, 0x00ffffff);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x40:
gr_def(ctx, 0x402490, 0x60103f00);
break;
@@ -428,16 +428,16 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402490, 0x0c103f00);
break;
}
- gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ gr_def(ctx, 0x40249c, device->chipset <= 0x43 ?
0x00020000 : 0x00040000);
cp_ctx(ctx, 0x402500, 31);
gr_def(ctx, 0x402530, 0x00008100);
- if (dev_priv->chipset == 0x40)
+ if (device->chipset == 0x40)
cp_ctx(ctx, 0x40257c, 6);
cp_ctx(ctx, 0x402594, 16);
cp_ctx(ctx, 0x402800, 17);
gr_def(ctx, 0x402800, 0x00000001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -445,7 +445,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x402864, 0x00001001);
cp_ctx(ctx, 0x402870, 3);
gr_def(ctx, 0x402878, 0x00000003);
- if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ if (device->chipset != 0x47) { /* belong at end!! */
cp_ctx(ctx, 0x402900, 1);
cp_ctx(ctx, 0x402940, 1);
cp_ctx(ctx, 0x402980, 1);
@@ -470,9 +470,9 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
}
cp_ctx(ctx, 0x402c00, 4);
- gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ gr_def(ctx, 0x402c00, device->chipset == 0x40 ?
0x80800001 : 0x00888001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x47:
case 0x49:
case 0x4b:
@@ -485,30 +485,30 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
break;
default:
cp_ctx(ctx, 0x402c10, 4);
- if (dev_priv->chipset == 0x40)
+ if (device->chipset == 0x40)
cp_ctx(ctx, 0x402c20, 36);
else
- if (dev_priv->chipset <= 0x42)
+ if (device->chipset <= 0x42)
cp_ctx(ctx, 0x402c20, 24);
else
- if (dev_priv->chipset <= 0x4a)
+ if (device->chipset <= 0x4a)
cp_ctx(ctx, 0x402c20, 16);
else
cp_ctx(ctx, 0x402c20, 8);
- cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13);
gr_def(ctx, 0x402cd4, 0x00000005);
- if (dev_priv->chipset != 0x40)
+ if (device->chipset != 0x40)
gr_def(ctx, 0x402ce0, 0x0000ffff);
break;
}
- cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
- for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->device));
+ for (i = 0; i < nv40_graph_vs_count(ctx->device); i++)
gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
- if (dev_priv->chipset != 0x40) {
+ if (device->chipset != 0x40) {
cp_ctx(ctx, 0x403600, 1);
gr_def(ctx, 0x403600, 0x00000001);
}
@@ -516,7 +516,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x403c18, 1);
gr_def(ctx, 0x403c18, 0x00000001);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x46:
case 0x47:
case 0x49:
@@ -527,7 +527,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
gr_def(ctx, 0x405c24, 0x000e3000);
break;
}
- if (dev_priv->chipset != 0x4e)
+ if (device->chipset != 0x4e)
cp_ctx(ctx, 0x405800, 11);
cp_ctx(ctx, 0x407000, 1);
}
@@ -535,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
{
- int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
+ int len = nv44_graph_class(ctx->device) ? 0x0084 : 0x0684;
cp_out (ctx, 0x300000);
cp_lsr (ctx, len - 4);
@@ -550,32 +550,31 @@ nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
static void
nv40_graph_construct_shader(struct nouveau_grctx *ctx)
{
- struct drm_device *dev = ctx->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_device *device = ctx->device;
struct nouveau_gpuobj *obj = ctx->data;
int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
int offset, i;
- vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr = nv40_graph_vs_count(ctx->device);
vs_nr_b0 = 363;
- vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
- if (dev_priv->chipset == 0x40) {
+ vs_nr_b1 = device->chipset == 0x40 ? 128 : 64;
+ if (device->chipset == 0x40) {
b0_offset = 0x2200/4; /* 33a0 */
b1_offset = 0x55a0/4; /* 1500 */
vs_len = 0x6aa0/4;
} else
- if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ if (device->chipset == 0x41 || device->chipset == 0x42) {
b0_offset = 0x2200/4; /* 2200 */
b1_offset = 0x4400/4; /* 0b00 */
vs_len = 0x4f00/4;
} else {
b0_offset = 0x1d40/4; /* 2200 */
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
+ vs_len = nv44_graph_class(device) ? 0x4980/4 : 0x4a40/4;
}
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
+ cp_out(ctx, nv44_graph_class(device) ? 0x800029 : 0x800041);
offset = ctx->ctxvals_pos;
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
@@ -661,21 +660,21 @@ nv40_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
{
nv40_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_VALS,
.data = mem,
});
}
void
-nv40_grctx_init(struct drm_device *dev, u32 *size)
+nv40_grctx_init(struct nouveau_device *device, u32 *size)
{
u32 ctxprog[256], i;
struct nouveau_grctx ctx = {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_PROG,
.data = ctxprog,
.ctxprog_max = ARRAY_SIZE(ctxprog)
@@ -683,8 +682,8 @@ nv40_grctx_init(struct drm_device *dev, u32 *size)
nv40_grctx_generate(&ctx);
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ nv_wr32(device, 0x400324, 0);
for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ nv_wr32(device, 0x400328, ctxprog[i]);
*size = ctx.ctxvals_pos * 4;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index e17c17bfd89e..552fdbd45ebe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -20,6 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <core/gpuobj.h>
+
#define CP_FLAG_CLEAR 0
#define CP_FLAG_SET 1
#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
@@ -105,8 +107,7 @@
#define CP_SEEK_1 0x00c000ff
#define CP_SEEK_2 0x00c800ff
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include "nv50.h"
#include "ctx.h"
#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
@@ -175,32 +176,6 @@ static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
static int
nv50_grctx_generate(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- case 0xa0:
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- break;
- default:
- NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
- "your NV%x card.\n", dev_priv->chipset);
- NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
- "the devs.\n");
- return -ENOSYS;
- }
-
cp_set (ctx, STATE, RUNNING);
cp_set (ctx, XFER_SWITCH, ENABLE);
/* decide whether we're loading/unloading the context */
@@ -278,30 +253,36 @@ nv50_grctx_generate(struct nouveau_grctx *ctx)
}
void
-nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+nv50_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
{
nv50_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_VALS,
.data = mem,
});
}
int
-nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+nv50_grctx_init(struct nouveau_device *device, u32 *size)
{
+ u32 *ctxprog = kmalloc(512 * 4, GFP_KERNEL), i;
struct nouveau_grctx ctx = {
- .dev = dev,
+ .device = device,
.mode = NOUVEAU_GRCTX_PROG,
- .data = data,
- .ctxprog_max = max
+ .data = ctxprog,
+ .ctxprog_max = 512,
};
- int ret;
- ret = nv50_grctx_generate(&ctx);
- *cnt = ctx.ctxvals_pos * 4;
- *len = ctx.ctxprog_len;
- return ret;
+ if (!ctxprog)
+ return -ENOMEM;
+ nv50_grctx_generate(&ctx);
+
+ nv_wr32(device, 0x400324, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(device, 0x400328, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+ kfree(ctxprog);
+ return 0;
}
/*
@@ -315,36 +296,36 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
static void
nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i, j;
int offset, base;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 units = nv_rd32 (ctx->device, 0x1540);
/* 0800: DISPATCH */
cp_ctx(ctx, 0x400808, 7);
gr_def(ctx, 0x400814, 0x00000030);
cp_ctx(ctx, 0x400834, 0x32);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
gr_def(ctx, 0x400834, 0xff400040);
gr_def(ctx, 0x400838, 0xfff00080);
gr_def(ctx, 0x40083c, 0xfff70090);
gr_def(ctx, 0x400840, 0xffe806a8);
}
gr_def(ctx, 0x400844, 0x00000002);
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
gr_def(ctx, 0x400894, 0x00001000);
gr_def(ctx, 0x4008e8, 0x00000003);
gr_def(ctx, 0x4008ec, 0x00001000);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x400908, 0xb);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
cp_ctx(ctx, 0x400908, 0xc);
else
cp_ctx(ctx, 0x400908, 0xe);
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
cp_ctx(ctx, 0x400b00, 0x1);
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
cp_ctx(ctx, 0x400b10, 0x1);
gr_def(ctx, 0x400b10, 0x0001629d);
cp_ctx(ctx, 0x400b20, 0x1);
@@ -358,10 +339,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400c08, 0x0000fe0c);
/* 1000 */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x401008, 0x4);
gr_def(ctx, 0x401014, 0x00001000);
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
cp_ctx(ctx, 0x401008, 0x5);
gr_def(ctx, 0x401018, 0x00001000);
} else {
@@ -372,7 +353,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1400 */
cp_ctx(ctx, 0x401400, 0x8);
cp_ctx(ctx, 0x401424, 0x3);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x40142c, 0x0001fd87);
else
gr_def(ctx, 0x40142c, 0x00000187);
@@ -382,10 +363,10 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1800: STREAMOUT */
cp_ctx(ctx, 0x401814, 0x1);
gr_def(ctx, 0x401814, 0x000000ff);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, 0x40181c, 0xe);
gr_def(ctx, 0x401850, 0x00000004);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x40181c, 0xf);
gr_def(ctx, 0x401854, 0x00000004);
} else {
@@ -395,7 +376,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 1C00 */
cp_ctx(ctx, 0x401c00, 0x1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, 0x401c00, 0x0001005f);
break;
@@ -424,7 +405,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 2400 */
cp_ctx(ctx, 0x402400, 0x1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x402408, 0x1);
else
cp_ctx(ctx, 0x402408, 0x2);
@@ -432,21 +413,21 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 2800: CSCHED */
cp_ctx(ctx, 0x402800, 0x1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x402800, 0x00000006);
/* 2C00: ZCULL */
cp_ctx(ctx, 0x402c08, 0x6);
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
gr_def(ctx, 0x402c14, 0x01000000);
gr_def(ctx, 0x402c18, 0x000000ff);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x402ca0, 0x1);
else
cp_ctx(ctx, 0x402ca0, 0x2);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
gr_def(ctx, 0x402ca0, 0x00000400);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
gr_def(ctx, 0x402ca0, 0x00000800);
else
gr_def(ctx, 0x402ca0, 0x00000400);
@@ -457,14 +438,14 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x403004, 0x00000001);
/* 3400 */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, 0x403404, 0x1);
gr_def(ctx, 0x403404, 0x00000001);
}
/* 5000: CCACHE */
cp_ctx(ctx, 0x405000, 0x1);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, 0x405000, 0x00300080);
break;
@@ -493,22 +474,22 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x40502c, 0x1);
/* 6000? */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
/* 6800: M2MF */
- if (dev_priv->chipset < 0x90) {
+ if (device->chipset < 0x90) {
cp_ctx(ctx, 0x406814, 0x2b);
gr_def(ctx, 0x406818, 0x00000f80);
gr_def(ctx, 0x406860, 0x007f0080);
gr_def(ctx, 0x40689c, 0x007f0080);
} else {
cp_ctx(ctx, 0x406814, 0x4);
- if (dev_priv->chipset == 0x98)
+ if (device->chipset == 0x98)
gr_def(ctx, 0x406818, 0x00000f80);
else
gr_def(ctx, 0x406818, 0x00001f80);
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
gr_def(ctx, 0x40681c, 0x00000030);
cp_ctx(ctx, 0x406830, 0x3);
}
@@ -517,43 +498,43 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
for (i = 0; i < 8; i++) {
if (units & (1<<(i+16))) {
cp_ctx(ctx, 0x407000 + (i<<8), 3);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
- else if (dev_priv->chipset != 0xa5)
+ else if (device->chipset != 0xa5)
gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
else
gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, 0x407010 + (i<<8), 1);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x407010 + (i<<8), 2);
gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
} else {
cp_ctx(ctx, 0x407010 + (i<<8), 3);
gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
- if (dev_priv->chipset != 0xa5)
+ if (device->chipset != 0xa5)
gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
else
gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
}
cp_ctx(ctx, 0x407080 + (i<<8), 4);
- if (dev_priv->chipset != 0xa5)
+ if (device->chipset != 0xa5)
gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
else
gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
else
gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, 0x407094 + (i<<8), 1);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
cp_ctx(ctx, 0x407094 + (i<<8), 3);
else {
cp_ctx(ctx, 0x407094 + (i<<8), 4);
@@ -563,30 +544,30 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
cp_ctx(ctx, 0x407c00, 0x3);
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
gr_def(ctx, 0x407c00, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, 0x407c00, 0x00390040);
else
gr_def(ctx, 0x407c00, 0x003d0040);
gr_def(ctx, 0x407c08, 0x00000022);
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, 0x407c10, 0x3);
cp_ctx(ctx, 0x407c20, 0x1);
cp_ctx(ctx, 0x407c2c, 0x1);
}
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, 0x407d00, 0x9);
} else {
cp_ctx(ctx, 0x407d00, 0x15);
}
- if (dev_priv->chipset == 0x98)
+ if (device->chipset == 0x98)
gr_def(ctx, 0x407d08, 0x00380040);
else {
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
gr_def(ctx, 0x407d08, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, 0x407d08, 0x00390040);
else
gr_def(ctx, 0x407d08, 0x003d0040);
@@ -596,11 +577,11 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
/* 8000+: per-TP state */
for (i = 0; i < 10; i++) {
if (units & (1<<i)) {
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
base = 0x408000 + (i<<12);
else
base = 0x408000 + (i<<11);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xc00;
else
offset = base + 0x80;
@@ -609,9 +590,9 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, offset + 0x08, 1);
/* per-MP state */
- for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ for (j = 0; j < (device->chipset < 0xa0 ? 2 : 4); j++) {
if (!(units & (1 << (j+24)))) continue;
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0x200 + (j<<7);
else
offset = base + 0x100 + (j<<7);
@@ -620,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x04, 0x00160000);
gr_def(ctx, offset + 0x08, 0x01800000);
gr_def(ctx, offset + 0x18, 0x0003ffff);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
gr_def(ctx, offset + 0x1c, 0x00080000);
break;
@@ -651,53 +632,53 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
break;
}
gr_def(ctx, offset + 0x40, 0x00010401);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x48, 0x00000040);
else
gr_def(ctx, offset + 0x48, 0x00000078);
gr_def(ctx, offset + 0x50, 0x000000bf);
gr_def(ctx, offset + 0x58, 0x00001210);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x5c, 0x00000080);
else
gr_def(ctx, offset + 0x5c, 0x08000080);
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
gr_def(ctx, offset + 0x68, 0x0000003e);
}
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x300, 0x4);
else
cp_ctx(ctx, base + 0x300, 0x5);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, base + 0x304, 0x00007070);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, base + 0x304, 0x00027070);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
gr_def(ctx, base + 0x304, 0x01127070);
else
gr_def(ctx, base + 0x304, 0x05127070);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x318, 1);
else
cp_ctx(ctx, base + 0x320, 1);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, base + 0x318, 0x0003ffff);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, base + 0x318, 0x03ffffff);
else
gr_def(ctx, base + 0x320, 0x07ffffff);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
cp_ctx(ctx, base + 0x324, 5);
else
cp_ctx(ctx, base + 0x328, 4);
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
cp_ctx(ctx, base + 0x340, 9);
offset = base + 0x340;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
cp_ctx(ctx, base + 0x33c, 0xb);
offset = base + 0x344;
} else {
@@ -706,12 +687,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
gr_def(ctx, offset + 0x0, 0x00120407);
gr_def(ctx, offset + 0x4, 0x05091507);
- if (dev_priv->chipset == 0x84)
+ if (device->chipset == 0x84)
gr_def(ctx, offset + 0x8, 0x05100202);
else
gr_def(ctx, offset + 0x8, 0x05010202);
gr_def(ctx, offset + 0xc, 0x00030201);
- if (dev_priv->chipset == 0xa3)
+ if (device->chipset == 0xa3)
cp_ctx(ctx, base + 0x36c, 1);
cp_ctx(ctx, base + 0x400, 2);
@@ -720,7 +701,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
gr_def(ctx, base + 0x410, 0x00141210);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0x800;
else
offset = base + 0x500;
@@ -728,55 +709,55 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x0, 0x000001f0);
gr_def(ctx, offset + 0x4, 0x00000001);
gr_def(ctx, offset + 0x8, 0x00000003);
- if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
+ if (device->chipset == 0x50 || IS_NVAAF(device->chipset))
gr_def(ctx, offset + 0xc, 0x00008000);
gr_def(ctx, offset + 0x14, 0x00039e00);
cp_ctx(ctx, offset + 0x1c, 2);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x1c, 0x00000040);
else
gr_def(ctx, offset + 0x1c, 0x00000100);
gr_def(ctx, offset + 0x20, 0x00003800);
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
cp_ctx(ctx, base + 0x54c, 2);
- if (!IS_NVA3F(dev_priv->chipset))
+ if (!IS_NVA3F(device->chipset))
gr_def(ctx, base + 0x54c, 0x003fe006);
else
gr_def(ctx, base + 0x54c, 0x003fe007);
gr_def(ctx, base + 0x550, 0x003fe000);
}
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xa00;
else
offset = base + 0x680;
cp_ctx(ctx, offset, 1);
gr_def(ctx, offset, 0x00404040);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
offset = base + 0xe00;
else
offset = base + 0x700;
cp_ctx(ctx, offset, 2);
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
gr_def(ctx, offset, 0x0077f005);
- else if (dev_priv->chipset == 0xa5)
+ else if (device->chipset == 0xa5)
gr_def(ctx, offset, 0x6cf7f007);
- else if (dev_priv->chipset == 0xa8)
+ else if (device->chipset == 0xa8)
gr_def(ctx, offset, 0x6cfff007);
- else if (dev_priv->chipset == 0xac)
+ else if (device->chipset == 0xac)
gr_def(ctx, offset, 0x0cfff007);
else
gr_def(ctx, offset, 0x0cf7f007);
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
gr_def(ctx, offset + 0x4, 0x00007fff);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
gr_def(ctx, offset + 0x4, 0x003f7fff);
else
gr_def(ctx, offset + 0x4, 0x02bf7fff);
cp_ctx(ctx, offset + 0x2c, 1);
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
cp_ctx(ctx, offset + 0x50, 9);
gr_def(ctx, offset + 0x54, 0x000003ff);
gr_def(ctx, offset + 0x58, 0x00000003);
@@ -785,7 +766,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, offset + 0x64, 0x0000001f);
gr_def(ctx, offset + 0x68, 0x0000000f);
gr_def(ctx, offset + 0x6c, 0x0000000f);
- } else if (dev_priv->chipset < 0xa0) {
+ } else if (device->chipset < 0xa0) {
cp_ctx(ctx, offset + 0x50, 1);
cp_ctx(ctx, offset + 0x70, 1);
} else {
@@ -797,7 +778,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
}
static void
-dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+dd_emit(struct nouveau_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
@@ -808,7 +789,7 @@ dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
static void
nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int base, num;
base = ctx->ctxvals_pos;
@@ -822,7 +803,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
- if (dev_priv->chipset >= 0x94)
+ if (device->chipset >= 0x94)
dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
@@ -851,7 +832,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
@@ -863,7 +844,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
/* compat 2d state */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
@@ -923,7 +904,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
/* more compat 2d state */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
@@ -957,18 +938,18 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
dd_emit(ctx, 1, 0); /* ffffffff */
dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
} else {
dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
}
dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
dd_emit(ctx, 1, 0); /* 00000001 */
} else {
@@ -994,7 +975,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 3, 0); /* 1, 1, 1 */
else
dd_emit(ctx, 2, 0); /* 1, 1 */
@@ -1002,15 +983,15 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
dd_emit(ctx, 1, 3); /* 00000003 */
dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
}
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
@@ -1022,16 +1003,16 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* ffffffff */
dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
dd_emit(ctx, 8, 0); /* 00000001 */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
@@ -1042,20 +1023,20 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0); /* 00000001 */
dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
dd_emit(ctx, 1, 0); /* 00000003 */
dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
dd_emit(ctx, 1, 0xe00); /* 7fff */
dd_emit(ctx, 1, 0x1000); /* 7fff */
dd_emit(ctx, 1, 0x1e00); /* 7fff */
@@ -1070,10 +1051,10 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
dd_emit(ctx, 1, 0x200);
dd_emit(ctx, 1, 0); /* 00000001 */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
dd_emit(ctx, 1, 1); /* 00000001 */
dd_emit(ctx, 1, 0x70); /* 000000ff */
dd_emit(ctx, 1, 0x80); /* 000000ff */
@@ -1120,7 +1101,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
num = ctx->ctxvals_pos - base;
ctx->ctxvals_pos = base;
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
cp_ctx(ctx, 0x404800, num);
else
cp_ctx(ctx, 0x405400, num);
@@ -1169,7 +1150,7 @@ nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
*/
static void
-xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+xf_emit(struct nouveau_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
for (i = 0; i < num; i++)
@@ -1201,16 +1182,16 @@ static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
static void
nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
int offset;
int size = 0;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 units = nv_rd32 (ctx->device, 0x1540);
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
ctx->ctxvals_base = offset;
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
/* Strand 0 */
ctx->ctxvals_pos = offset;
nv50_graph_construct_gene_dispatch(ctx);
@@ -1280,7 +1261,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 2 */
ctx->ctxvals_pos = offset + 2;
- if (dev_priv->chipset == 0xa0)
+ if (device->chipset == 0xa0)
nv50_graph_construct_gene_unk14xx(ctx);
nv50_graph_construct_gene_unk24xx(ctx);
if ((ctx->ctxvals_pos-offset)/8 > size)
@@ -1327,7 +1308,7 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
/* Strand 7 */
ctx->ctxvals_pos = offset + 7;
- if (dev_priv->chipset == 0xa0) {
+ if (device->chipset == 0xa0) {
if (units & (1 << 4))
nv50_graph_construct_xfer_tp(ctx);
if (units & (1 << 5))
@@ -1365,24 +1346,24 @@ static void
nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
{
/* start of strand 0 */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* SEEK */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 5, 0);
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 6, 0);
else
xf_emit(ctx, 4, 0);
/* SEEK */
/* the PGRAPH's internal FIFO */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 8*3, 0);
else
xf_emit(ctx, 0x100*3, 0);
/* and another bonus slot?!? */
xf_emit(ctx, 3, 0);
/* and YET ANOTHER bonus slot? */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 3, 0);
/* SEEK */
/* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
@@ -1394,7 +1375,7 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
/* SEEK */
xf_emit(ctx, 9, 0);
/* SEEK */
- if (dev_priv->chipset < 0x90)
+ if (device->chipset < 0x90)
xf_emit(ctx, 4, 0);
/* SEEK */
xf_emit(ctx, 2, 0);
@@ -1407,9 +1388,9 @@ nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
xf_emit(ctx, 6*2, 0);
xf_emit(ctx, 2, 0);
/* SEEK */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 0x1c, 0);
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
xf_emit(ctx, 0x1e, 0);
else
xf_emit(ctx, 0x22, 0);
@@ -1421,9 +1402,9 @@ static void
nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
{
/* Strand 0, right after dispatch */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int smallm2mf = 0;
- if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+ if (device->chipset < 0x92 || device->chipset == 0x98)
smallm2mf = 1;
/* SEEK */
xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
@@ -1472,10 +1453,10 @@ nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* RO */
xf_emit(ctx, 0x800, 0); /* ffffffff */
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x92:
case 0xa0:
@@ -1540,7 +1521,7 @@ nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
@@ -1550,14 +1531,14 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff);
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff */
xf_emit(ctx, 1, 0); /* 111/113 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
for (i = 0; i < 8; i++) {
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x86:
case 0x98:
@@ -1600,7 +1581,7 @@ nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* end of area 2 on pre-NVA0, area 1 on NVAx */
xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
@@ -1614,9 +1595,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x0fac6881);
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 1);
xf_emit(ctx, 3, 0);
}
@@ -1625,9 +1606,9 @@ nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 5, 0); /* ffffffff */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 */
@@ -1643,14 +1624,14 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 3ff */
xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
@@ -1669,7 +1650,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
xf_emit(ctx, 1, 0); /* 0000000f */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -1704,11 +1685,11 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* ffffffff */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
@@ -1736,7 +1717,7 @@ nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
@@ -1774,7 +1755,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
@@ -1789,7 +1770,7 @@ nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
}
@@ -1817,7 +1798,7 @@ nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
/* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
/* SEEK */
@@ -1829,7 +1810,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 4, 0); /* RO */
xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
xf_emit(ctx, 1, 0); /* 1ff */
@@ -1860,7 +1841,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
/* SEEK */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
@@ -1869,7 +1850,7 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 1); /* 00000001 */
/* SEEK */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 2, 4); /* 000000ff */
xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
@@ -1893,20 +1874,20 @@ nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 000003ff */
}
static void
nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int acnt = 0x10, rep, i;
/* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
acnt = 0x20;
/* SEEK */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
}
@@ -1923,9 +1904,9 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0x9, 0); /* RO */
else
xf_emit(ctx, 0x8, 0); /* RO */
@@ -1944,11 +1925,11 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
- if (dev_priv->chipset == 0xa8)
+ if (device->chipset == 0xa8)
xf_emit(ctx, 1, 0x1e00); /* 7fff */
/* SEEK */
xf_emit(ctx, 0xc, 0); /* RO or close */
@@ -1956,13 +1937,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+ if (device->chipset > 0x50 && device->chipset < 0xa0)
xf_emit(ctx, 2, 0); /* ffffffff */
else
xf_emit(ctx, 1, 0); /* ffffffff */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 0x10, 0); /* 0? */
xf_emit(ctx, 2, 0); /* weird... */
xf_emit(ctx, 2, 0); /* RO */
@@ -1975,7 +1956,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
/* SEEK */
xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
@@ -2013,23 +1994,23 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
xf_emit(ctx, 3, 0); /* f/1f */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, acnt, 0); /* f */
xf_emit(ctx, 3, 0); /* f/1f */
}
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 2, 0); /* RO */
else
xf_emit(ctx, 5, 0); /* RO */
/* SEEK */
xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
/* SEEK */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
xf_emit(ctx, 0x41, 0); /* RO */
/* SEEK */
xf_emit(ctx, 0x11, 0); /* RO */
- } else if (!IS_NVA3F(dev_priv->chipset))
+ } else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 0x50, 0); /* RO */
else
xf_emit(ctx, 0x58, 0); /* RO */
@@ -2041,7 +2022,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x1d, 0); /* RO */
else
xf_emit(ctx, 0x16, 0); /* RO */
@@ -2049,21 +2030,21 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
/* SEEK */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 8, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
+ else if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0xc, 0); /* RO */
else
xf_emit(ctx, 7, 0); /* RO */
/* SEEK */
xf_emit(ctx, 0xa, 0); /* RO */
- if (dev_priv->chipset == 0xa0)
+ if (device->chipset == 0xa0)
rep = 0xc;
else
rep = 4;
for (i = 0; i < rep; i++) {
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x20, 0); /* ffffffff */
xf_emit(ctx, 0x200, 0); /* ffffffff */
xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
@@ -2077,7 +2058,7 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
/* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 7, 0); /* weird... */
else
xf_emit(ctx, 5, 0); /* weird... */
@@ -2086,13 +2067,13 @@ nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
/* this is useless on everything but the original NV50,
* guess they forgot to nuke it. Or just didn't bother. */
xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
@@ -2148,7 +2129,7 @@ nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
/* SEEK */
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
@@ -2173,7 +2154,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
/* SEEK */
xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
case 0x92:
xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
@@ -2247,7 +2228,7 @@ nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2277,9 +2258,9 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
@@ -2293,11 +2274,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* 3ff */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
}
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
@@ -2316,11 +2297,11 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 0x1c, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
+ else if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x9, 0);
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
@@ -2328,13 +2309,13 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
xf_emit(ctx, 1, 0); /* 3ff */
}
/* XXX: the following block could belong either to unk1cxx, or
* to STRMOUT. Rather hard to tell. */
- if (dev_priv->chipset < 0xa0)
+ if (device->chipset < 0xa0)
xf_emit(ctx, 0x25, 0);
else
xf_emit(ctx, 0x3b, 0);
@@ -2343,18 +2324,18 @@ nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
}
xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
else
xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
@@ -2365,7 +2346,7 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
}
@@ -2385,12 +2366,12 @@ nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* 000003ff */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
@@ -2398,7 +2379,7 @@ nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
/* SEEK */
xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
@@ -2416,7 +2397,7 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
}
@@ -2424,11 +2405,11 @@ nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic2;
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
magic2 = 0x001ffe67;
} else {
magic2 = 0x00087e67;
@@ -2446,14 +2427,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+ if (device->chipset >= 0xa0 && !IS_NVAAF(device->chipset))
xf_emit(ctx, 1, 0x15); /* 000000ff */
xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
@@ -2462,14 +2443,14 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ if (device->chipset == 0x86 || device->chipset == 0x92 || device->chipset == 0x98 || device->chipset >= 0xa0) {
xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
xf_emit(ctx, 1, 4); /* 7 */
xf_emit(ctx, 1, 0x400); /* fffffff */
xf_emit(ctx, 1, 0x300); /* ffff */
xf_emit(ctx, 1, 0x1001); /* 1fff */
- if (dev_priv->chipset != 0xa0) {
- if (IS_NVA3F(dev_priv->chipset))
+ if (device->chipset != 0xa0) {
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
else
xf_emit(ctx, 1, 0x15); /* ff */
@@ -2547,7 +2528,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 2, 0);
xf_emit(ctx, 1, 0x1001);
xf_emit(ctx, 0xb, 0);
@@ -2564,7 +2545,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
xf_emit(ctx, 1, 0x11); /* 3f/7f */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (dev_priv->chipset != 0x50) {
+ if (device->chipset != 0x50) {
xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
xf_emit(ctx, 1, 0); /* 000000ff */
}
@@ -2581,7 +2562,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2600,7 +2581,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
- } else if (dev_priv->chipset >= 0xa0) {
+ } else if (device->chipset >= 0xa0) {
xf_emit(ctx, 2, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* 00000003 */
@@ -2614,7 +2595,7 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 2, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* 000003ff */
xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
@@ -2628,9 +2609,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
@@ -2659,9 +2640,9 @@ nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic3;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x50:
magic3 = 0x1000;
break;
@@ -2681,16 +2662,16 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x1f, 0); /* ffffffff */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0x0f, 0); /* ffffffff */
else
xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x03020100); /* ffffffff */
else
xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
@@ -2733,11 +2714,11 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
xf_emit(ctx, 1, 0); /* 111/113 */
- if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ if (device->chipset == 0x94 || device->chipset == 0x96)
xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (dev_priv->chipset < 0xa0)
+ else if (device->chipset < 0xa0)
xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (!IS_NVA3F(dev_priv->chipset))
+ else if (!IS_NVA3F(device->chipset))
xf_emit(ctx, 0x210, 0); /* ffffffff */
else
xf_emit(ctx, 0x410, 0); /* ffffffff */
@@ -2751,12 +2732,12 @@ nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int magic1, magic2;
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
magic1 = 0x3ff;
magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
+ } else if (!IS_NVA3F(device->chipset)) {
magic1 = 0x7ff;
magic2 = 0x001ffe67;
} else {
@@ -2766,7 +2747,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
@@ -2800,11 +2781,11 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
- } else if (dev_priv->chipset >= 0xa0) {
+ } else if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000003 */
} else {
@@ -2818,7 +2799,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
@@ -2846,7 +2827,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
@@ -2870,9 +2851,9 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0); /* ff */
else
xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
@@ -2907,7 +2888,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
@@ -2945,7 +2926,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
@@ -2974,7 +2955,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 0); /* 00000001 */
xf_emit(ctx, 1, 0); /* ffff0ff3 */
xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
@@ -2988,14 +2969,14 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
xf_emit(ctx, 1, 0); /* 7 */
xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
}
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* ffff0ff3 */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
@@ -3012,12 +2993,12 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
}
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
xf_emit(ctx, 1, 0xfac6881); /* fffffff */
xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
@@ -3027,7 +3008,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 2, 0); /* 7, f */
xf_emit(ctx, 1, 1); /* 1 */
xf_emit(ctx, 1, 0); /* 7/f */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 0x9, 0); /* 1 */
else
xf_emit(ctx, 0x8, 0); /* 1 */
@@ -3041,7 +3022,7 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x11); /* 7f */
xf_emit(ctx, 1, 1); /* 1 */
xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
}
@@ -3051,15 +3032,15 @@ nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
- if (dev_priv->chipset != 0x50)
+ if (device->chipset != 0x50)
xf_emit(ctx, 1, 0); /* 3 */
xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
else
xf_emit(ctx, 2, 0); /* 3ff, 1 */
@@ -3071,13 +3052,13 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
- if (dev_priv->chipset == 0x50) {
+ if (device->chipset == 0x50) {
xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
- } else if (!IS_NVAAF(dev_priv->chipset)) {
+ } else if (!IS_NVAAF(device->chipset)) {
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
xf_emit(ctx, 1, 0); /* 00000003 */
xf_emit(ctx, 1, 0); /* 000003ff */
@@ -3097,7 +3078,7 @@ nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
@@ -3109,7 +3090,7 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
@@ -3136,8 +3117,8 @@ nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- if (dev_priv->chipset < 0xa0) {
+ struct nouveau_device *device = ctx->device;
+ if (device->chipset < 0xa0) {
nv50_graph_construct_xfer_unk84xx(ctx);
nv50_graph_construct_xfer_tprop(ctx);
nv50_graph_construct_xfer_tex(ctx);
@@ -3153,9 +3134,9 @@ nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i, mpcnt = 2;
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0x98:
case 0xaa:
mpcnt = 1;
@@ -3182,34 +3163,34 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
xf_emit(ctx, 1, 0x04000400); /* ffffffff */
- if (dev_priv->chipset >= 0xa0)
+ if (device->chipset >= 0xa0)
xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+ if (device->chipset == 0x86 || device->chipset == 0x98 || device->chipset == 0xa8 || IS_NVAAF(device->chipset)) {
xf_emit(ctx, 1, 0xe00); /* 7fff */
xf_emit(ctx, 1, 0x1e00); /* 7fff */
}
xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
- if (IS_NVAAF(dev_priv->chipset))
+ if (IS_NVAAF(device->chipset))
xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
+ else if (device->chipset >= 0xa0)
xf_emit(ctx, 0xc, 0); /* RO */
else
xf_emit(ctx, 0xa, 0); /* RO */
}
xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
xf_emit(ctx, 1, 0); /* ff/3ff */
- if (dev_priv->chipset >= 0xa0) {
+ if (device->chipset >= 0xa0) {
xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
}
xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
@@ -3223,7 +3204,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
xf_emit(ctx, 1, 0); /* ff/3ff */
xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
@@ -3238,7 +3219,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000007 */
xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
- if (IS_NVA3F(dev_priv->chipset))
+ if (IS_NVA3F(device->chipset))
xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
@@ -3253,7 +3234,7 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- if (IS_NVA3F(dev_priv->chipset)) {
+ if (IS_NVA3F(device->chipset)) {
xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
@@ -3268,11 +3249,11 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
/* XXX: demagic this part some day */
- if (dev_priv->chipset == 0x50)
+ if (device->chipset == 0x50)
xf_emit(ctx, 0x3a0, 0);
- else if (dev_priv->chipset < 0x94)
+ else if (device->chipset < 0x94)
xf_emit(ctx, 0x3a2, 0);
- else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ else if (device->chipset == 0x98 || device->chipset == 0xaa)
xf_emit(ctx, 0x39f, 0);
else
xf_emit(ctx, 0x3a3, 0);
@@ -3285,15 +3266,15 @@ nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
static void
nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ struct nouveau_device *device = ctx->device;
int i;
- uint32_t offset;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ u32 offset;
+ u32 units = nv_rd32 (ctx->device, 0x1540);
int size = 0;
offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
- if (dev_priv->chipset < 0xa0) {
+ if (device->chipset < 0xa0) {
for (i = 0; i < 8; i++) {
ctx->ctxvals_pos = offset + i;
/* that little bugger belongs to csched. No idea
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
index b19a406e55d9..c12e7668dbfe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnvc0.c
@@ -22,13 +22,10 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include <core/mm.h>
#include "nvc0.h"
void
-nv_icmd(struct drm_device *priv, u32 icmd, u32 data)
+nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data)
{
nv_wr32(priv, 0x400204, data);
nv_wr32(priv, 0x400200, icmd);
@@ -36,21 +33,22 @@ nv_icmd(struct drm_device *priv, u32 icmd, u32 data)
}
int
-nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
- struct nvc0_grctx *info)
+nvc0_grctx_init(struct nvc0_graph_priv *priv, struct nvc0_grctx *info)
{
+ struct nouveau_bar *bar = nouveau_bar(priv);
+ struct nouveau_object *parent = nv_object(priv);
struct nouveau_gpuobj *chan;
- u32 size = (0x80000 + oprv->size + 4095) & ~4095;
+ u32 size = (0x80000 + priv->size + 4095) & ~4095;
int ret, i;
/* allocate memory to for a "channel", which we'll use to generate
* the default context values
*/
- ret = nouveau_gpuobj_new(priv, NULL, size, 0x1000,
+ ret = nouveau_gpuobj_new(parent, NULL, size, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &info->chan);
chan = info->chan;
if (ret) {
- NV_ERROR(priv, "failed to allocate channel memory, %d\n", ret);
+ nv_error(priv, "failed to allocate channel memory, %d\n", ret);
return ret;
}
@@ -75,32 +73,31 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
nv_wo32(chan, 0x0210, 0x00080004);
nv_wo32(chan, 0x0214, 0x00000000);
- nvimem_flush(priv);
+ bar->flush(bar);
nv_wr32(priv, 0x100cb8, (chan->addr + 0x1000) >> 8);
nv_wr32(priv, 0x100cbc, 0x80000001);
nv_wait(priv, 0x100c80, 0x00008000, 0x00008000);
/* setup default state for mmio list construction */
- info->dev = priv;
- info->data = oprv->mmio_data;
- info->mmio = oprv->mmio_list;
+ info->data = priv->mmio_data;
+ info->mmio = priv->mmio_list;
info->addr = 0x2000 + (i * 8);
- info->priv = oprv;
+ info->priv = priv;
info->buffer_nr = 0;
- if (oprv->firmware) {
+ if (priv->firmware) {
nv_wr32(priv, 0x409840, 0x00000030);
nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000003);
if (!nv_wait(priv, 0x409800, 0x00000010, 0x00000010))
- NV_ERROR(priv, "load_ctx timeout\n");
+ nv_error(priv, "load_ctx timeout\n");
nv_wo32(chan, 0x8001c, 1);
nv_wo32(chan, 0x80020, 0);
nv_wo32(chan, 0x80028, 0);
nv_wo32(chan, 0x8002c, 0);
- nvimem_flush(priv);
+ bar->flush(bar);
return 0;
}
@@ -109,7 +106,7 @@ nvc0_grctx_init(struct drm_device *priv, struct nvc0_graph_priv *oprv,
nv_wr32(priv, 0x409500, 0x80000000 | chan->addr >> 12);
nv_wr32(priv, 0x409504, 0x00000001);
if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(priv, "HUB_SET_CHAN timeout\n");
+ nv_error(priv, "HUB_SET_CHAN timeout\n");
nvc0_graph_ctxctl_debug(priv);
nouveau_gpuobj_ref(NULL, &info->chan);
return -EBUSY;
@@ -135,6 +132,8 @@ nvc0_grctx_data(struct nvc0_grctx *info, u32 size, u32 align, u32 access)
void
nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
{
+ struct nvc0_graph_priv *priv = info->priv;
+
info->mmio->addr = addr;
info->mmio->data = data;
info->mmio->shift = shift;
@@ -143,7 +142,7 @@ nvc0_grctx_mmio(struct nvc0_grctx *info, u32 addr, u32 data, u32 shift, u32 buf)
if (shift)
data |= info->buffer[buf] >> shift;
- nv_wr32(info->dev, addr, data);
+ nv_wr32(priv, addr, data);
}
int
@@ -153,11 +152,11 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
int i;
if (priv->firmware) {
- nv_wr32(info->dev, 0x409840, 0x00000003);
- nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12);
- nv_wr32(info->dev, 0x409504, 0x00000009);
- if (!nv_wait(info->dev, 0x409800, 0x00000001, 0x00000000)) {
- NV_ERROR(info->dev, "unload_ctx timeout\n");
+ nv_wr32(priv, 0x409840, 0x00000003);
+ nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
+ nv_wr32(priv, 0x409504, 0x00000009);
+ if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000000)) {
+ nv_error(priv, "unload_ctx timeout\n");
return -EBUSY;
}
@@ -165,12 +164,12 @@ nvc0_grctx_fini(struct nvc0_grctx *info)
}
/* HUB_FUC(CTX_SAVE) */
- nv_wr32(info->dev, 0x409840, 0x80000000);
- nv_wr32(info->dev, 0x409500, 0x80000000 | info->chan->addr >> 12);
- nv_wr32(info->dev, 0x409504, 0x00000002);
- if (!nv_wait(info->dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(info->dev, "HUB_CTX_SAVE timeout\n");
- nvc0_graph_ctxctl_debug(info->dev);
+ nv_wr32(priv, 0x409840, 0x80000000);
+ nv_wr32(priv, 0x409500, 0x80000000 | info->chan->addr >> 12);
+ nv_wr32(priv, 0x409504, 0x00000002);
+ if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+ nv_error(priv, "HUB_CTX_SAVE timeout\n");
+ nvc0_graph_ctxctl_debug(priv);
return -EBUSY;
}
@@ -186,7 +185,7 @@ save:
}
static void
-nvc0_grctx_generate_9097(struct drm_device *priv)
+nvc0_grctx_generate_9097(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@@ -1343,7 +1342,7 @@ nvc0_grctx_generate_9097(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_9197(struct drm_device *priv)
+nvc0_grctx_generate_9197(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@@ -1356,7 +1355,7 @@ nvc0_grctx_generate_9197(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_9297(struct drm_device *priv)
+nvc0_grctx_generate_9297(struct nvc0_graph_priv *priv)
{
u32 fermi = nvc0_graph_class(priv);
u32 mthd;
@@ -1374,7 +1373,7 @@ nvc0_grctx_generate_9297(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_902d(struct drm_device *priv)
+nvc0_grctx_generate_902d(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@@ -1396,7 +1395,7 @@ nvc0_grctx_generate_902d(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_9039(struct drm_device *priv)
+nvc0_grctx_generate_9039(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x9039, 0x030c, 0x00000000);
nv_mthd(priv, 0x9039, 0x0310, 0x00000000);
@@ -1409,12 +1408,11 @@ nvc0_grctx_generate_9039(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_90c0(struct drm_device *priv)
+nvc0_grctx_generate_90c0(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
int i;
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
nv_mthd(priv, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
@@ -1430,7 +1428,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
nv_mthd(priv, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(priv, 0x90c0, 0x27ec, 0x00000000);
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ for (i = 0; nv_device(priv)->chipset == 0xd9 && i < 4; i++) {
nv_mthd(priv, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
nv_mthd(priv, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
@@ -1458,7 +1456,7 @@ nvc0_grctx_generate_90c0(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_dispatch(struct drm_device *priv)
+nvc0_grctx_generate_dispatch(struct nvc0_graph_priv *priv)
{
int i;
@@ -1511,7 +1509,7 @@ nvc0_grctx_generate_dispatch(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_macro(struct drm_device *priv)
+nvc0_grctx_generate_macro(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404404, 0x00000000);
nv_wr32(priv, 0x404408, 0x00000000);
@@ -1536,7 +1534,7 @@ nvc0_grctx_generate_macro(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_m2mf(struct drm_device *priv)
+nvc0_grctx_generate_m2mf(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404604, 0x00000015);
nv_wr32(priv, 0x404608, 0x00000000);
@@ -1600,7 +1598,7 @@ nvc0_grctx_generate_m2mf(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_unk47xx(struct drm_device *priv)
+nvc0_grctx_generate_unk47xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404700, 0x00000000);
nv_wr32(priv, 0x404704, 0x00000000);
@@ -1627,16 +1625,15 @@ nvc0_grctx_generate_unk47xx(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_shaders(struct drm_device *priv)
+nvc0_grctx_generate_shaders(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
- if (dev_priv->chipset == 0xd9) {
+ if (nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x405800, 0x0f8000bf);
nv_wr32(priv, 0x405830, 0x02180218);
nv_wr32(priv, 0x405834, 0x08000000);
} else
- if (dev_priv->chipset == 0xc1) {
+ if (nv_device(priv)->chipset == 0xc1) {
nv_wr32(priv, 0x405800, 0x0f8000bf);
nv_wr32(priv, 0x405830, 0x02180218);
nv_wr32(priv, 0x405834, 0x00000000);
@@ -1657,7 +1654,7 @@ nvc0_grctx_generate_shaders(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_unk60xx(struct drm_device *priv)
+nvc0_grctx_generate_unk60xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x406020, 0x000103c1);
nv_wr32(priv, 0x406028, 0x00000001);
@@ -1667,25 +1664,24 @@ nvc0_grctx_generate_unk60xx(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_unk64xx(struct drm_device *priv)
+nvc0_grctx_generate_unk64xx(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
nv_wr32(priv, 0x4064a8, 0x00000000);
nv_wr32(priv, 0x4064ac, 0x00003fff);
nv_wr32(priv, 0x4064b4, 0x00000000);
nv_wr32(priv, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xd9)
+ if (nv_device(priv)->chipset == 0xd9)
nv_wr32(priv, 0x4064bc, 0x00000000);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9) {
+ if (nv_device(priv)->chipset == 0xc1 ||
+ nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x4064c0, 0x80140078);
nv_wr32(priv, 0x4064c4, 0x0086ffff);
}
}
static void
-nvc0_grctx_generate_tpbus(struct drm_device *priv)
+nvc0_grctx_generate_tpbus(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407804, 0x00000023);
nv_wr32(priv, 0x40780c, 0x0a418820);
@@ -1698,7 +1694,7 @@ nvc0_grctx_generate_tpbus(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_ccache(struct drm_device *priv)
+nvc0_grctx_generate_ccache(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408000, 0x00000000);
nv_wr32(priv, 0x408004, 0x00000000);
@@ -1711,10 +1707,9 @@ nvc0_grctx_generate_ccache(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_rop(struct drm_device *priv)
+nvc0_grctx_generate_rop(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
- int chipset = dev_priv->chipset;
+ int chipset = nv_device(priv)->chipset;
/* ROPC_BROADCAST */
nv_wr32(priv, 0x408800, 0x02802a3c);
@@ -1741,10 +1736,9 @@ nvc0_grctx_generate_rop(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_gpc(struct drm_device *priv)
+nvc0_grctx_generate_gpc(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
- int chipset = dev_priv->chipset;
+ int chipset = nv_device(priv)->chipset;
int i;
/* GPC_BROADCAST */
@@ -1834,10 +1828,9 @@ nvc0_grctx_generate_gpc(struct drm_device *priv)
}
static void
-nvc0_grctx_generate_tp(struct drm_device *priv)
+nvc0_grctx_generate_tp(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
- int chipset = dev_priv->chipset;
+ int chipset = nv_device(priv)->chipset;
/* GPC_BROADCAST.TP_BROADCAST */
nv_wr32(priv, 0x419818, 0x00000000);
@@ -1876,7 +1869,7 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
nv_wr32(priv, 0x419c04, 0x00000006);
nv_wr32(priv, 0x419c08, 0x00000002);
nv_wr32(priv, 0x419c20, 0x00000000);
- if (dev_priv->chipset == 0xd9) {
+ if (nv_device(priv)->chipset == 0xd9) {
nv_wr32(priv, 0x419c24, 0x00084210);
nv_wr32(priv, 0x419c28, 0x3cf3cf3c);
nv_wr32(priv, 0x419cb0, 0x00020048);
@@ -1929,16 +1922,14 @@ nvc0_grctx_generate_tp(struct drm_device *priv)
}
int
-nvc0_grctx_generate(struct drm_device *priv)
+nvc0_grctx_generate(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
- struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
struct nvc0_grctx info;
int ret, i, gpc, tpc, id;
u32 fermi = nvc0_graph_class(priv);
u32 r000260, tmp;
- ret = nvc0_grctx_init(priv, oprv, &info);
+ ret = nvc0_grctx_init(priv, &info);
if (ret)
return ret;
@@ -1975,11 +1966,11 @@ nvc0_grctx_generate(struct drm_device *priv)
mmio_list(0x419008, 0x00000000, 0, 0);
mmio_list(0x418808, 0x00000000, 8, 0);
mmio_list(0x41880c, 0x80000018, 0, 0);
- if (dev_priv->chipset != 0xc1) {
+ if (nv_device(priv)->chipset != 0xc1) {
tmp = 0x02180000;
mmio_list(0x405830, tmp, 0, 0);
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
mmio_list(reg, tmp, 0, 0);
tmp += 0x0324;
@@ -1989,13 +1980,13 @@ nvc0_grctx_generate(struct drm_device *priv)
tmp = 0x02180000;
mmio_list(0x405830, 0x00000218 | tmp, 0, 0);
mmio_list(0x4064c4, 0x0086ffff, 0, 0);
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
- for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0520);
mmio_list(reg, 0x10000000 | tmp, 0, 0);
tmp += 0x0324;
}
- for (tpc = 0; tpc < oprv->tpc_nr[gpc]; tpc++) {
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
u32 reg = TPC_UNIT(gpc, tpc, 0x0544);
mmio_list(reg, tmp, 0, 0);
tmp += 0x0324;
@@ -2004,8 +1995,8 @@ nvc0_grctx_generate(struct drm_device *priv)
}
for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
- if (tpc < oprv->tpc_nr[gpc]) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x698), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x4e8), id);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
@@ -2013,14 +2004,14 @@ nvc0_grctx_generate(struct drm_device *priv)
id++;
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
}
}
tmp = 0;
- for (i = 0; i < oprv->gpc_nr; i++)
- tmp |= oprv->tpc_nr[i] << (i * 4);
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
nv_wr32(priv, 0x406028, tmp);
nv_wr32(priv, 0x405870, tmp);
@@ -2034,13 +2025,13 @@ nvc0_grctx_generate(struct drm_device *priv)
if (1) {
u8 tpcnr[GPC_MAX], data[TPC_MAX];
- memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
memset(data, 0x1f, sizeof(data));
gpc = -1;
- for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % oprv->gpc_nr;
+ gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
data[tpc] = gpc;
@@ -2056,12 +2047,12 @@ nvc0_grctx_generate(struct drm_device *priv)
u8 shift, ntpcv;
/* calculate first set of magics */
- memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
gpc = -1;
- for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % oprv->gpc_nr;
+ gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@@ -2073,7 +2064,7 @@ nvc0_grctx_generate(struct drm_device *priv)
/* and the second... */
shift = 0;
- ntpcv = oprv->tpc_total;
+ ntpcv = priv->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@@ -2086,22 +2077,22 @@ nvc0_grctx_generate(struct drm_device *priv)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* GPC_BROADCAST */
- nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) |
- oprv->magic_not_rop_nr);
+ nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
/* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(priv, 0x419bd0, (oprv->tpc_total << 8) |
- oprv->magic_not_rop_nr |
+ nv_wr32(priv, 0x419bd0, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr |
data2[0]);
nv_wr32(priv, 0x419be4, data2[1]);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x419b00 + (i * 4), data[i]);
/* UNK78xx */
- nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) |
- oprv->magic_not_rop_nr);
+ nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) |
+ priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x40780c + (i * 4), data[i]);
}
@@ -2110,18 +2101,18 @@ nvc0_grctx_generate(struct drm_device *priv)
u32 tpc_mask = 0, tpc_set = 0;
u8 tpcnr[GPC_MAX], a, b;
- memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++)
- tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8);
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (oprv->tpc_total - 1)) / 32;
+ a = (i * (priv->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
- gpc = (gpc + 1) % oprv->gpc_nr;
+ gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
}
@@ -2232,7 +2223,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x00000215, 0x00000040);
nv_icmd(priv, 0x00000216, 0x00000040);
nv_icmd(priv, 0x00000217, 0x00000040);
- if (dev_priv->chipset == 0xd9) {
+ if (nv_device(priv)->chipset == 0xd9) {
for (i = 0x0400; i <= 0x0417; i++)
nv_icmd(priv, i, 0x00000040);
}
@@ -2244,7 +2235,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x0000021d, 0x0000c080);
nv_icmd(priv, 0x0000021e, 0x0000c080);
nv_icmd(priv, 0x0000021f, 0x0000c080);
- if (dev_priv->chipset == 0xd9) {
+ if (nv_device(priv)->chipset == 0xd9) {
for (i = 0x0440; i <= 0x0457; i++)
nv_icmd(priv, i, 0x0000c080);
}
@@ -2810,8 +2801,8 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x0000053f, 0xffff0000);
nv_icmd(priv, 0x00000585, 0x0000003f);
nv_icmd(priv, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9)
+ if (nv_device(priv)->chipset == 0xc1 ||
+ nv_device(priv)->chipset == 0xd9)
nv_icmd(priv, 0x0000057b, 0x00000059);
nv_icmd(priv, 0x00000586, 0x00000040);
nv_icmd(priv, 0x00000582, 0x00000080);
@@ -2913,7 +2904,7 @@ nvc0_grctx_generate(struct drm_device *priv)
nv_icmd(priv, 0x00000957, 0x00000003);
nv_icmd(priv, 0x0000095e, 0x20164010);
nv_icmd(priv, 0x0000095f, 0x00000020);
- if (dev_priv->chipset == 0xd9)
+ if (nv_device(priv)->chipset == 0xd9)
nv_icmd(priv, 0x0000097d, 0x00000020);
nv_icmd(priv, 0x00000683, 0x00000006);
nv_icmd(priv, 0x00000685, 0x003fffff);
@@ -3056,5 +3047,6 @@ nvc0_grctx_generate(struct drm_device *priv)
nvc0_grctx_generate_90c0(priv);
nv_wr32(priv, 0x000260, r000260);
+
return nvc0_grctx_fini(&info);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
index e5503170d68c..6d8c63931ee6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnve0.c
@@ -22,13 +22,10 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include <core/mm.h>
#include "nvc0.h"
static void
-nve0_grctx_generate_icmd(struct drm_device *priv)
+nve0_grctx_generate_icmd(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x400208, 0x80000000);
nv_icmd(priv, 0x001000, 0x00000004);
@@ -916,7 +913,7 @@ nve0_grctx_generate_icmd(struct drm_device *priv)
}
static void
-nve0_grctx_generate_a097(struct drm_device *priv)
+nve0_grctx_generate_a097(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0xa097, 0x0800, 0x00000000);
nv_mthd(priv, 0xa097, 0x0840, 0x00000000);
@@ -2146,7 +2143,7 @@ nve0_grctx_generate_a097(struct drm_device *priv)
}
static void
-nve0_grctx_generate_902d(struct drm_device *priv)
+nve0_grctx_generate_902d(struct nvc0_graph_priv *priv)
{
nv_mthd(priv, 0x902d, 0x0200, 0x000000cf);
nv_mthd(priv, 0x902d, 0x0204, 0x00000001);
@@ -2169,7 +2166,7 @@ nve0_grctx_generate_902d(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk40xx(struct drm_device *priv)
+nve0_graph_generate_unk40xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404010, 0x0);
nv_wr32(priv, 0x404014, 0x0);
@@ -2213,7 +2210,7 @@ nve0_graph_generate_unk40xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk44xx(struct drm_device *priv)
+nve0_graph_generate_unk44xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404404, 0x0);
nv_wr32(priv, 0x404408, 0x0);
@@ -2238,7 +2235,7 @@ nve0_graph_generate_unk44xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk46xx(struct drm_device *priv)
+nve0_graph_generate_unk46xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404604, 0x14);
nv_wr32(priv, 0x404608, 0x0);
@@ -2278,7 +2275,7 @@ nve0_graph_generate_unk46xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk47xx(struct drm_device *priv)
+nve0_graph_generate_unk47xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x404700, 0x0);
nv_wr32(priv, 0x404704, 0x0);
@@ -2299,7 +2296,7 @@ nve0_graph_generate_unk47xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk58xx(struct drm_device *priv)
+nve0_graph_generate_unk58xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x405800, 0xf8000bf);
nv_wr32(priv, 0x405830, 0x2180648);
@@ -2318,7 +2315,7 @@ nve0_graph_generate_unk58xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk60xx(struct drm_device *priv)
+nve0_graph_generate_unk60xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x406020, 0x4103c1);
nv_wr32(priv, 0x406028, 0x1);
@@ -2328,7 +2325,7 @@ nve0_graph_generate_unk60xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk64xx(struct drm_device *priv)
+nve0_graph_generate_unk64xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x4064a8, 0x0);
nv_wr32(priv, 0x4064ac, 0x3fff);
@@ -2350,13 +2347,13 @@ nve0_graph_generate_unk64xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk70xx(struct drm_device *priv)
+nve0_graph_generate_unk70xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407040, 0x0);
}
static void
-nve0_graph_generate_unk78xx(struct drm_device *priv)
+nve0_graph_generate_unk78xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x407804, 0x23);
nv_wr32(priv, 0x40780c, 0xa418820);
@@ -2369,7 +2366,7 @@ nve0_graph_generate_unk78xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk80xx(struct drm_device *priv)
+nve0_graph_generate_unk80xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408000, 0x0);
nv_wr32(priv, 0x408004, 0x0);
@@ -2382,7 +2379,7 @@ nve0_graph_generate_unk80xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_unk88xx(struct drm_device *priv)
+nve0_graph_generate_unk88xx(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x408800, 0x2802a3c);
nv_wr32(priv, 0x408804, 0x40);
@@ -2395,7 +2392,7 @@ nve0_graph_generate_unk88xx(struct drm_device *priv)
}
static void
-nve0_graph_generate_gpc(struct drm_device *priv)
+nve0_graph_generate_gpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x418380, 0x16);
nv_wr32(priv, 0x418400, 0x38004e00);
@@ -2521,7 +2518,7 @@ nve0_graph_generate_gpc(struct drm_device *priv)
}
static void
-nve0_graph_generate_tpc(struct drm_device *priv)
+nve0_graph_generate_tpc(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x419848, 0x0);
nv_wr32(priv, 0x419864, 0x129);
@@ -2586,7 +2583,7 @@ nve0_graph_generate_tpc(struct drm_device *priv)
}
static void
-nve0_graph_generate_tpcunk(struct drm_device *priv)
+nve0_graph_generate_tpcunk(struct nvc0_graph_priv *priv)
{
nv_wr32(priv, 0x41be24, 0x6);
nv_wr32(priv, 0x41bec0, 0x12180000);
@@ -2604,9 +2601,8 @@ nve0_graph_generate_tpcunk(struct drm_device *priv)
}
int
-nve0_grctx_generate(struct drm_device *priv)
+nve0_grctx_generate(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *oprv = nv_engine(priv, NVOBJ_ENGINE_GR);
struct nvc0_grctx info;
int ret, i, gpc, tpc, id;
u32 data[6] = {}, data2[2] = {}, tmp;
@@ -2615,7 +2611,7 @@ nve0_grctx_generate(struct drm_device *priv)
u8 tpcnr[GPC_MAX], a, b;
u8 shift, ntpcv;
- ret = nvc0_grctx_init(priv, oprv, &info);
+ ret = nvc0_grctx_init(priv, &info);
if (ret)
return ret;
@@ -2657,17 +2653,17 @@ nve0_grctx_generate(struct drm_device *priv)
mmio_list(0x419848, 0x10000000, 12, 2);
mmio_list(0x405830, 0x02180648, 0, 0);
mmio_list(0x4064c4, 0x0192ffff, 0, 0);
- for (gpc = 0, offset = 0; gpc < oprv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * oprv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * oprv->tpc_nr[gpc];
+ for (gpc = 0, offset = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * oprv->tpc_nr[gpc];
+ offset += 0x0324 * priv->tpc_nr[gpc];
}
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
mmio_list(GPC_UNIT(gpc, 0x30c0), magic[gpc][0], 0, 0);
mmio_list(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset, 0, 0);
- offset += 0x07ff * oprv->tpc_nr[gpc];
+ offset += 0x07ff * priv->tpc_nr[gpc];
}
mmio_list(0x17e91c, 0x06060609, 0, 0);
mmio_list(0x17e920, 0x00090a05, 0, 0);
@@ -2680,22 +2676,22 @@ nve0_grctx_generate(struct drm_device *priv)
nv_wr32(priv, 0x419c00, 0xa);
for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++) {
- if (tpc < oprv->tpc_nr[gpc]) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0698), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x04e8), id);
nv_wr32(priv, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x0088), id++);
}
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), oprv->tpc_nr[gpc]);
- nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), oprv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
}
}
tmp = 0;
- for (i = 0; i < oprv->gpc_nr; i++)
- tmp |= oprv->tpc_nr[i] << (i * 4);
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
nv_wr32(priv, 0x406028, tmp);
nv_wr32(priv, 0x405870, tmp);
@@ -2707,12 +2703,12 @@ nve0_grctx_generate(struct drm_device *priv)
nv_wr32(priv, 0x40587c, 0x0);
/* calculate first set of magics */
- memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
gpc = -1;
- for (tpc = 0; tpc < oprv->tpc_total; tpc++) {
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
do {
- gpc = (gpc + 1) % oprv->gpc_nr;
+ gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
tpcnr[gpc]--;
@@ -2724,7 +2720,7 @@ nve0_grctx_generate(struct drm_device *priv)
/* and the second... */
shift = 0;
- ntpcv = oprv->tpc_total;
+ ntpcv = priv->tpc_total;
while (!(ntpcv & (1 << 4))) {
ntpcv <<= 1;
shift++;
@@ -2733,13 +2729,13 @@ nve0_grctx_generate(struct drm_device *priv)
data2[0] = ntpcv << 16;
data2[0] |= shift << 21;
data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
- data2[0] |= oprv->tpc_total << 8;
- data2[0] |= oprv->magic_not_rop_nr;
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
for (i = 1; i < 7; i++)
data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
/* and write it all the various parts of PGRAPH */
- nv_wr32(priv, 0x418bb8, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr);
+ nv_wr32(priv, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x418b08 + (i * 4), data[i]);
@@ -2748,23 +2744,23 @@ nve0_grctx_generate(struct drm_device *priv)
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x41bf00 + (i * 4), data[i]);
- nv_wr32(priv, 0x4078bc, (oprv->tpc_total << 8) | oprv->magic_not_rop_nr);
+ nv_wr32(priv, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
for (i = 0; i < 6; i++)
nv_wr32(priv, 0x40780c + (i * 4), data[i]);
- memcpy(tpcnr, oprv->tpc_nr, sizeof(oprv->tpc_nr));
- for (gpc = 0; gpc < oprv->gpc_nr; gpc++)
- tpc_mask |= ((1 << oprv->tpc_nr[gpc]) - 1) << (gpc * 8);
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (oprv->tpc_total - 1)) / 32;
+ a = (i * (priv->tpc_total - 1)) / 32;
if (a != b) {
b = a;
do {
- gpc = (gpc + 1) % oprv->gpc_nr;
+ gpc = (gpc + 1) % priv->gpc_nr;
} while (!tpcnr[gpc]);
- tpc = oprv->tpc_nr[gpc] - tpcnr[gpc]--;
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
tpc_set |= 1 << ((gpc * 8) + tpc);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 7f3a275157bb..e5b01899dece 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,19 +22,22 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
-#include "drm.h"
-#include <nouveau_drm.h>
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
-
-struct nv04_graph_engine {
- struct nouveau_exec_engine base;
-};
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/namedb.h>
+
+#include <subdev/fb.h>
+#include <subdev/instmem.h>
+#include <subdev/timer.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
-static uint32_t nv04_graph_ctx_regs[] = {
+#include "regs.h"
+
+static u32
+nv04_graph_ctx_regs[] = {
0x0040053c,
0x00400544,
0x00400540,
@@ -348,205 +351,28 @@ static uint32_t nv04_graph_ctx_regs[] = {
NV04_PGRAPH_DEBUG_3
};
-struct graph_state {
- uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+struct nv04_graph_priv {
+ struct nouveau_graph base;
+ struct nv04_graph_chan *chan[16];
+ spinlock_t lock;
};
-static struct nouveau_channel *
-nv04_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = 15;
-
- if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
- chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
-
- if (chid > 15)
- return NULL;
-
- return dev_priv->channels.ptr[chid];
-}
-
-static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
- if (nv04_graph_ctx_regs[i] == reg)
- return &ctx->nv04[i];
- }
-
- return NULL;
-}
-
-static int
-nv04_graph_load_context(struct nouveau_channel *chan)
-{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
-
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
-
- tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
- nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
-
- return 0;
-}
-
-static int
-nv04_graph_unload_context(struct drm_device *dev)
-{
- struct nouveau_channel *chan = NULL;
- struct graph_state *ctx;
- uint32_t tmp;
- int i;
-
- chan = nv04_graph_channel(dev);
- if (!chan)
- return 0;
- ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
-
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 15 << 24;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
- return 0;
-}
-
-static int
-nv04_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct graph_state *pgraph_ctx;
- NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
-
- pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
- if (pgraph_ctx == NULL)
- return -ENOMEM;
-
- *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
- chan->engctx[engine] = pgraph_ctx;
- return 0;
-}
-
-static void
-nv04_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[engine];
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Unload the context if it's the currently active one */
- if (nv04_graph_channel(dev) == chan)
- nv04_graph_unload_context(dev);
-
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- kfree(pgraph_ctx);
- chan->engctx[engine] = NULL;
-}
-
-int
-nv04_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 1;
- obj->class = class;
-
-#ifdef __BIG_ENDIAN
- nv_wo32(obj, 0x00, 0x00080000 | class);
-#else
- nv_wo32(obj, 0x00, class);
-#endif
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
+struct nv04_graph_chan {
+ struct nouveau_object base;
+ int chid;
+ u32 nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-static int
-nv04_graph_init(struct drm_device *dev, int engine)
+static inline struct nv04_graph_priv *
+nv04_graph_priv(struct nv04_graph_chan *chan)
{
- uint32_t tmp;
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
-
- /* Enable PGRAPH interrupts */
- nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
- nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
- /*1231C000 blob, 001 haiku*/
- /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
- /*0x72111100 blob , 01 haiku*/
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
- /*haiku same*/
-
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
- /*haiku and blob 10d4*/
-
- nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 15 << 24;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
-
- /* These don't belong here, they're part of a per-channel context */
- nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
-
- return 0;
+ return (void *)nv_object(chan)->engine;
}
-static int
-nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv04_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
/*
* Software methods, why they are needed, and how they all work:
@@ -623,37 +449,35 @@ nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
*/
static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
+nv04_graph_set_ctx1(struct nouveau_object *object, u32 mask, u32 value)
{
- struct drm_device *dev = chan->dev;
- u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ int subc = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
u32 tmp;
- tmp = nv_ri32(dev, instance);
+ tmp = nv_ro32(object, 0x00);
tmp &= ~mask;
tmp |= value;
+ nv_wo32(object, 0x00, tmp);
- nv_wi32(dev, instance, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+ nv_wr32(priv, NV04_PGRAPH_CTX_SWITCH1, tmp);
+ nv_wr32(priv, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
}
static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
+nv04_graph_set_ctx_val(struct nouveau_object *object, u32 mask, u32 value)
{
- struct drm_device *dev = chan->dev;
- u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- u32 tmp, ctx1;
int class, op, valid = 1;
+ u32 tmp, ctx1;
- ctx1 = nv_ri32(dev, instance);
+ ctx1 = nv_ro32(object, 0x00);
class = ctx1 & 0xff;
op = (ctx1 >> 15) & 7;
- tmp = nv_ri32(dev, instance + 0xc);
+
+ tmp = nv_ro32(object, 0x0c);
tmp &= ~mask;
tmp |= value;
- nv_wi32(dev, instance + 0xc, tmp);
+ nv_wo32(object, 0x0c, tmp);
/* check for valid surf2d/surf_dst/surf_color */
if (!(tmp & 0x02000000))
@@ -685,30 +509,34 @@ nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
break;
}
- nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+ nv04_graph_set_ctx1(object, 0x01000000, valid << 24);
}
static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_set_operation(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
+ u32 class = nv_ro32(object, 0) & 0xff;
+ u32 data = *(u32 *)args;
if (data > 5)
return 1;
/* Old versions of the objects only accept first three operations. */
if (data > 2 && class < 0x40)
return 1;
- nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ nv04_graph_set_ctx1(object, 0x00038000, data << 15);
/* changing operation changes set of objects needed for validation */
- nv04_graph_set_ctx_val(chan, 0, 0);
+ nv04_graph_set_ctx_val(object, 0, 0);
return 0;
}
static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- uint32_t min = data & 0xffff, max;
- uint32_t w = data >> 16;
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ u32 data = *(u32 *)args;
+ u32 min = data & 0xffff, max;
+ u32 w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
@@ -717,17 +545,19 @@ nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
- nv_wr32(chan->dev, 0x40053c, min);
- nv_wr32(chan->dev, 0x400544, max);
+ nv_wr32(priv, 0x40053c, min);
+ nv_wr32(priv, 0x400544, max);
return 0;
}
static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- uint32_t min = data & 0xffff, max;
- uint32_t w = data >> 16;
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ u32 data = *(u32 *)args;
+ u32 min = data & 0xffff, max;
+ u32 w = data >> 16;
if (min & 0x8000)
/* too large */
return 1;
@@ -736,223 +566,661 @@ nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
w |= 0xffff0000;
max = min + w;
max &= 0x3ffff;
- nv_wr32(chan->dev, 0x400540, min);
- nv_wr32(chan->dev, 0x400548, max);
+ nv_wr32(priv, 0x400540, min);
+ nv_wr32(priv, 0x400548, max);
return 0;
}
+static u16
+nv04_graph_mthd_bind_class(struct nouveau_object *object, u32 *args, u32 size)
+{
+ struct nouveau_instmem *imem = nouveau_instmem(object);
+ u32 inst = *(u32 *)args << 4;
+ return nv_ro32(imem, inst);
+}
+
static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ nv04_graph_set_ctx1(object, 0x00004000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x42:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ nv04_graph_set_ctx1(object, 0x00004000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ nv04_graph_set_ctx1(object, 0x00004000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x42:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ nv04_graph_set_ctx1(object, 0x00004000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
case 0x52:
- nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ nv04_graph_set_ctx1(object, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv01_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ nv04_graph_set_ctx_val(object, 0x08000000, 0);
return 0;
case 0x18:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_patt(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ nv04_graph_set_ctx_val(object, 0x08000000, 0);
return 0;
case 0x44:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ nv04_graph_set_ctx_val(object, 0x08000000, 0x08000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_rop(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ nv04_graph_set_ctx_val(object, 0x10000000, 0);
return 0;
case 0x43:
- nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ nv04_graph_set_ctx_val(object, 0x10000000, 0x10000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_beta1(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ nv04_graph_set_ctx_val(object, 0x20000000, 0);
return 0;
case 0x12:
- nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ nv04_graph_set_ctx_val(object, 0x20000000, 0x20000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_beta4(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ nv04_graph_set_ctx_val(object, 0x40000000, 0);
return 0;
case 0x72:
- nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ nv04_graph_set_ctx_val(object, 0x40000000, 0x40000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x58:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ nv04_graph_set_ctx_val(object, 0x04000000, 0);
return 0;
case 0x59:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0);
return 0;
case 0x5a:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ nv04_graph_set_ctx_val(object, 0x02000000, 0x02000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ nv04_graph_set_ctx_val(object, 0x04000000, 0);
return 0;
case 0x5b:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ nv04_graph_set_ctx_val(object, 0x04000000, 0x04000000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv01_graph_mthd_bind_clip(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(chan, 0x2000, 0);
+ nv04_graph_set_ctx1(object, 0x2000, 0);
return 0;
case 0x19:
- nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ nv04_graph_set_ctx1(object, 0x2000, 0x2000);
return 0;
}
return 1;
}
static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ switch (nv04_graph_mthd_bind_class(object, args, size)) {
case 0x30:
- nv04_graph_set_ctx1(chan, 0x1000, 0);
+ nv04_graph_set_ctx1(object, 0x1000, 0);
return 0;
/* Yes, for some reason even the old versions of objects
* accept 0x57 and not 0x17. Consistency be damned.
*/
case 0x57:
- nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ nv04_graph_set_ctx1(object, 0x1000, 0x1000);
return 0;
}
return 1;
}
-static struct nouveau_bitfield nv04_graph_intr[] = {
+static struct nouveau_omthds
+nv03_graph_gdi_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_patt },
+ { 0x0188, nv04_graph_mthd_bind_rop },
+ { 0x018c, nv04_graph_mthd_bind_beta1 },
+ { 0x0190, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_gdi_omthds[] = {
+ { 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv01_graph_blit_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x019c, nv04_graph_mthd_bind_surf_src },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_blit_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_iifc_omthds[] = {
+ { 0x0188, nv01_graph_mthd_bind_chroma },
+ { 0x018c, nv01_graph_mthd_bind_clip },
+ { 0x0190, nv04_graph_mthd_bind_patt },
+ { 0x0194, nv04_graph_mthd_bind_rop },
+ { 0x0198, nv04_graph_mthd_bind_beta1 },
+ { 0x019c, nv04_graph_mthd_bind_beta4 },
+ { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
+ { 0x03e4, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv01_graph_ifc_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, nv01_graph_mthd_bind_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_ifc_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_patt },
+ { 0x0190, nv04_graph_mthd_bind_rop },
+ { 0x0194, nv04_graph_mthd_bind_beta1 },
+ { 0x0198, nv04_graph_mthd_bind_beta4 },
+ { 0x019c, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifc_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifc_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_chroma },
+ { 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv03_graph_sifm_omthds[] = {
+ { 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_sifm_omthds[] = {
+ { 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x0304, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_surf3d_omthds[] = {
+ { 0x02f8, nv04_graph_mthd_surf3d_clip_h },
+ { 0x02fc, nv04_graph_mthd_surf3d_clip_v },
+ {}
+};
+
+static struct nouveau_omthds
+nv03_graph_ttri_omthds[] = {
+ { 0x0188, nv01_graph_mthd_bind_clip },
+ { 0x018c, nv04_graph_mthd_bind_surf_color },
+ { 0x0190, nv04_graph_mthd_bind_surf_zeta },
+ {}
+};
+
+static struct nouveau_omthds
+nv01_graph_prim_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, nv01_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_surf_dst },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static struct nouveau_omthds
+nv04_graph_prim_omthds[] = {
+ { 0x0184, nv01_graph_mthd_bind_clip },
+ { 0x0188, nv04_graph_mthd_bind_patt },
+ { 0x018c, nv04_graph_mthd_bind_rop },
+ { 0x0190, nv04_graph_mthd_bind_beta1 },
+ { 0x0194, nv04_graph_mthd_bind_beta4 },
+ { 0x0198, nv04_graph_mthd_bind_surf2d },
+ { 0x02fc, nv04_graph_mthd_set_operation },
+ {}
+};
+
+static int
+nv04_graph_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj *obj;
+ int ret;
+
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
+ if (ret)
+ return ret;
+
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+#ifdef __BIG_ENDIAN
+ nv_mo32(obj, 0x00, 0x00080000, 0x00080000);
+#endif
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ return 0;
+}
+
+struct nouveau_ofuncs
+nv04_graph_ofuncs = {
+ .ctor = nv04_graph_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
+
+static struct nouveau_oclass
+nv04_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+ { 0x0017, &nv04_graph_ofuncs }, /* chroma */
+ { 0x0018, &nv04_graph_ofuncs }, /* pattern (nv01) */
+ { 0x0019, &nv04_graph_ofuncs }, /* clip */
+ { 0x001c, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* line */
+ { 0x001d, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* tri */
+ { 0x001e, &nv04_graph_ofuncs, nv01_graph_prim_omthds }, /* rect */
+ { 0x001f, &nv04_graph_ofuncs, nv01_graph_blit_omthds },
+ { 0x0021, &nv04_graph_ofuncs, nv01_graph_ifc_omthds },
+ { 0x0030, &nv04_graph_ofuncs }, /* null */
+ { 0x0036, &nv04_graph_ofuncs, nv03_graph_sifc_omthds },
+ { 0x0037, &nv04_graph_ofuncs, nv03_graph_sifm_omthds },
+ { 0x0038, &nv04_graph_ofuncs }, /* dvd subpicture */
+ { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+ { 0x0042, &nv04_graph_ofuncs }, /* surf2d */
+ { 0x0043, &nv04_graph_ofuncs }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs }, /* pattern */
+ { 0x0048, &nv04_graph_ofuncs, nv03_graph_ttri_omthds },
+ { 0x004a, &nv04_graph_ofuncs, nv04_graph_gdi_omthds },
+ { 0x004b, &nv04_graph_ofuncs, nv03_graph_gdi_omthds },
+ { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+ { 0x0053, &nv04_graph_ofuncs, nv04_graph_surf3d_omthds },
+ { 0x0054, &nv04_graph_ofuncs }, /* ttri */
+ { 0x0055, &nv04_graph_ofuncs }, /* mtri */
+ { 0x0057, &nv04_graph_ofuncs }, /* chroma */
+ { 0x0058, &nv04_graph_ofuncs }, /* surf_dst */
+ { 0x0059, &nv04_graph_ofuncs }, /* surf_src */
+ { 0x005a, &nv04_graph_ofuncs }, /* surf_color */
+ { 0x005b, &nv04_graph_ofuncs }, /* surf_zeta */
+ { 0x005c, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* line */
+ { 0x005d, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* tri */
+ { 0x005e, &nv04_graph_ofuncs, nv04_graph_prim_omthds }, /* rect */
+ { 0x005f, &nv04_graph_ofuncs, nv04_graph_blit_omthds },
+ { 0x0060, &nv04_graph_ofuncs, nv04_graph_iifc_omthds },
+ { 0x0061, &nv04_graph_ofuncs, nv04_graph_ifc_omthds },
+ { 0x0064, &nv04_graph_ofuncs }, /* iifc (nv05) */
+ { 0x0065, &nv04_graph_ofuncs }, /* ifc (nv05) */
+ { 0x0066, &nv04_graph_ofuncs }, /* sifc (nv05) */
+ { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+ { 0x0076, &nv04_graph_ofuncs, nv04_graph_sifc_omthds },
+ { 0x0077, &nv04_graph_ofuncs, nv04_graph_sifm_omthds },
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv04_graph_chan *
+nv04_graph_channel(struct nv04_graph_priv *priv)
+{
+ struct nv04_graph_chan *chan = NULL;
+ if (nv_rd32(priv, NV04_PGRAPH_CTX_CONTROL) & 0x00010000) {
+ int chid = nv_rd32(priv, NV04_PGRAPH_CTX_USER) >> 24;
+ if (chid < ARRAY_SIZE(priv->chan))
+ chan = priv->chan[chid];
+ }
+ return chan;
+}
+
+static int
+nv04_graph_load_context(struct nv04_graph_chan *chan, int chid)
+{
+ struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ nv_wr32(priv, nv04_graph_ctx_regs[i], chan->nv04[i]);
+
+ nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+ nv_mask(priv, NV04_PGRAPH_FFINTFC_ST2, 0xfff00000, 0x00000000);
+ return 0;
+}
+
+static int
+nv04_graph_unload_context(struct nv04_graph_chan *chan)
+{
+ struct nv04_graph_priv *priv = nv04_graph_priv(chan);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ chan->nv04[i] = nv_rd32(priv, nv04_graph_ctx_regs[i]);
+
+ nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+ return 0;
+}
+
+static void
+nv04_graph_context_switch(struct nv04_graph_priv *priv)
+{
+ struct nv04_graph_chan *prev = NULL;
+ struct nv04_graph_chan *next = NULL;
+ unsigned long flags;
+ int chid;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ nv04_graph_idle(priv);
+
+ /* If previous context is valid, we need to save it */
+ prev = nv04_graph_channel(priv);
+ if (prev)
+ nv04_graph_unload_context(prev);
+
+ /* load context for next channel */
+ chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0x0f;
+ next = priv->chan[chid];
+ if (next)
+ nv04_graph_load_context(next, chid);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static u32 *ctx_reg(struct nv04_graph_chan *chan, u32 reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+ if (nv04_graph_ctx_regs[i] == reg)
+ return &chan->nv04[i];
+ }
+
+ return NULL;
+}
+
+static int
+nv04_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_fifo_chan *fifo = (void *)parent;
+ struct nv04_graph_priv *priv = (void *)engine;
+ struct nv04_graph_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->chan[fifo->chid]) {
+ *pobject = nv_object(priv->chan[fifo->chid]);
+ atomic_inc(&(*pobject)->refcount);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ nouveau_object_destroy(&chan->base);
+ return 1;
+ }
+
+ *ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+ priv->chan[fifo->chid] = chan;
+ chan->chid = fifo->chid;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return 0;
+}
+
+static void
+nv04_graph_context_dtor(struct nouveau_object *object)
+{
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_graph_chan *chan = (void *)object;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->chan[chan->chid] = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ nouveau_object_destroy(&chan->base);
+}
+
+static int
+nv04_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_graph_chan *chan = (void *)object;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (nv04_graph_channel(priv) == chan)
+ nv04_graph_unload_context(chan);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return nouveau_object_fini(&chan->base, suspend);
+}
+
+static struct nouveau_oclass
+nv04_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_graph_context_ctor,
+ .dtor = nv04_graph_context_dtor,
+ .init = nouveau_object_init,
+ .fini = nv04_graph_context_fini,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+bool
+nv04_graph_idle(void *obj)
+{
+ struct nouveau_graph *graph = nouveau_graph(obj);
+ u32 mask = 0xffffffff;
+
+ if (nv_device(obj)->card_type == NV_40)
+ mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+ if (!nv_wait(graph, NV04_PGRAPH_STATUS, mask, 0)) {
+ nv_error(graph, "idle timed out with status 0x%08x\n",
+ nv_rd32(graph, NV04_PGRAPH_STATUS));
+ return false;
+ }
+
+ return true;
+}
+
+static struct nouveau_bitfield
+nv04_graph_intr_name[] = {
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
{}
};
-static struct nouveau_bitfield nv04_graph_nstatus[] = {
+static struct nouveau_bitfield
+nv04_graph_nstatus[] = {
{ NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
{ NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
@@ -960,7 +1228,8 @@ static struct nouveau_bitfield nv04_graph_nstatus[] = {
{}
};
-struct nouveau_bitfield nv04_graph_nsource[] = {
+struct nouveau_bitfield
+nv04_graph_nsource[] = {
{ NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
{ NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
@@ -984,343 +1253,135 @@ struct nouveau_bitfield nv04_graph_nsource[] = {
};
static void
-nv04_graph_context_switch(struct drm_device *dev)
+nv04_graph_intr(struct nouveau_subdev *subdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- int chid;
-
- nouveau_wait_for_idle(dev);
-
- /* If previous context is valid, we need to save it */
- nv04_graph_unload_context(dev);
+ struct nv04_graph_priv *priv = (void *)subdev;
+ struct nv04_graph_chan *chan = NULL;
+ struct nouveau_namedb *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+ u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+ u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x0f000000) >> 24;
+ u32 subc = (addr & 0x0000e000) >> 13;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(priv, 0x400180 + subc * 4) & 0xff;
+ u32 inst = (nv_rd32(priv, 0x40016c) & 0xffff) << 4;
+ u32 show = stat;
+ unsigned long flags;
- /* Load context for next channel */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
- chan = dev_priv->channels.ptr[chid];
+ spin_lock_irqsave(&priv->lock, flags);
+ chan = priv->chan[chid];
if (chan)
- nv04_graph_load_context(chan);
-}
-
-static void
-nv04_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x0f000000) >> 24;
- u32 subc = (addr & 0x0000e000) >> 13;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_NOTIFY) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_NOTIFY;
- }
+ namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (stat & NV_PGRAPH_INTR_NOTIFY) {
+ if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+ handle = nouveau_namedb_get_vinst(namedb, inst);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~NV_PGRAPH_INTR_NOTIFY;
}
+ }
- if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv04_graph_context_switch(dev);
- }
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv04_graph_context_switch(priv);
+ }
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv04_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
+ nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+ nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show) {
+ nv_error(priv, "");
+ nouveau_bitfield_print(nv04_graph_intr_name, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ printk("\n");
+ nv_error(priv, "ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
}
+
+ nouveau_namedb_put(handle);
}
-static void
-nv04_graph_destroy(struct drm_device *dev, int engine)
+static int
+nv04_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
+ struct nv04_graph_priv *priv;
+ int ret;
- nouveau_irq_unregister(dev, 12);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv04_graph_intr;
+ nv_engine(priv)->cclass = &nv04_graph_cclass;
+ nv_engine(priv)->sclass = nv04_graph_sclass;
+ spin_lock_init(&priv->lock);
+ return 0;
}
-int
-nv04_graph_create(struct drm_device *dev)
+static int
+nv04_graph_init(struct nouveau_object *object)
{
- struct nv04_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv04_graph_destroy;
- pgraph->base.init = nv04_graph_init;
- pgraph->base.fini = nv04_graph_fini;
- pgraph->base.context_new = nv04_graph_context_new;
- pgraph->base.context_del = nv04_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv04_graph_isr);
-
- /* dvd subpicture */
- NVOBJ_CLASS(dev, 0x0038, GR);
-
- /* m2mf */
- NVOBJ_CLASS(dev, 0x0039, GR);
-
- /* nv03 gdirect */
- NVOBJ_CLASS(dev, 0x004b, GR);
- NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 gdirect */
- NVOBJ_CLASS(dev, 0x004a, GR);
- NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 imageblit */
- NVOBJ_CLASS(dev, 0x001f, GR);
- NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
- NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 imageblit */
- NVOBJ_CLASS(dev, 0x005f, GR);
- NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 iifc */
- NVOBJ_CLASS(dev, 0x0060, GR);
- NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
- NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
-
- /* nv05 iifc */
- NVOBJ_CLASS(dev, 0x0064, GR);
-
- /* nv01 ifc */
- NVOBJ_CLASS(dev, 0x0021, GR);
- NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 ifc */
- NVOBJ_CLASS(dev, 0x0061, GR);
- NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv05 ifc */
- NVOBJ_CLASS(dev, 0x0065, GR);
-
- /* nv03 sifc */
- NVOBJ_CLASS(dev, 0x0036, GR);
- NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 sifc */
- NVOBJ_CLASS(dev, 0x0076, GR);
- NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv05 sifc */
- NVOBJ_CLASS(dev, 0x0066, GR);
-
- /* nv03 sifm */
- NVOBJ_CLASS(dev, 0x0037, GR);
- NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
-
- /* nv04 sifm */
- NVOBJ_CLASS(dev, 0x0077, GR);
- NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
- NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
-
- /* null */
- NVOBJ_CLASS(dev, 0x0030, GR);
-
- /* surf2d */
- NVOBJ_CLASS(dev, 0x0042, GR);
-
- /* rop */
- NVOBJ_CLASS(dev, 0x0043, GR);
-
- /* beta1 */
- NVOBJ_CLASS(dev, 0x0012, GR);
-
- /* beta4 */
- NVOBJ_CLASS(dev, 0x0072, GR);
-
- /* cliprect */
- NVOBJ_CLASS(dev, 0x0019, GR);
-
- /* nv01 pattern */
- NVOBJ_CLASS(dev, 0x0018, GR);
-
- /* nv04 pattern */
- NVOBJ_CLASS(dev, 0x0044, GR);
-
- /* swzsurf */
- NVOBJ_CLASS(dev, 0x0052, GR);
-
- /* surf3d */
- NVOBJ_CLASS(dev, 0x0053, GR);
- NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
- NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
-
- /* nv03 tex_tri */
- NVOBJ_CLASS(dev, 0x0048, GR);
- NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
- NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
-
- /* tex_tri */
- NVOBJ_CLASS(dev, 0x0054, GR);
-
- /* multitex_tri */
- NVOBJ_CLASS(dev, 0x0055, GR);
-
- /* nv01 chroma */
- NVOBJ_CLASS(dev, 0x0017, GR);
-
- /* nv04 chroma */
- NVOBJ_CLASS(dev, 0x0057, GR);
-
- /* surf_dst */
- NVOBJ_CLASS(dev, 0x0058, GR);
-
- /* surf_src */
- NVOBJ_CLASS(dev, 0x0059, GR);
-
- /* surf_color */
- NVOBJ_CLASS(dev, 0x005a, GR);
-
- /* surf_zeta */
- NVOBJ_CLASS(dev, 0x005b, GR);
-
- /* nv01 line */
- NVOBJ_CLASS(dev, 0x001c, GR);
- NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 line */
- NVOBJ_CLASS(dev, 0x005c, GR);
- NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 tri */
- NVOBJ_CLASS(dev, 0x001d, GR);
- NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 tri */
- NVOBJ_CLASS(dev, 0x005d, GR);
- NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 rect */
- NVOBJ_CLASS(dev, 0x001e, GR);
- NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 rect */
- NVOBJ_CLASS(dev, 0x005e, GR);
- NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv04_graph_priv *priv = (void *)engine;
+ int ret;
+
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
+ /* Enable PGRAPH interrupts */
+ nv_wr32(priv, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(priv, NV04_PGRAPH_VALID1, 0);
+ nv_wr32(priv, NV04_PGRAPH_VALID2, 0);
+ /*nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ /*1231C000 blob, 001 haiku*/
+ /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x72111100);
+ /*0x72111100 blob , 01 haiku*/
+ /*nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*haiku same*/
+
+ /*nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*haiku and blob 10d4*/
+
+ nv_wr32(priv, NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(priv, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
+ nv_mask(priv, NV04_PGRAPH_CTX_USER, 0xff000000, 0x0f000000);
+
+ /* These don't belong here, they're part of a per-channel context */
+ nv_wr32(priv, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ nv_wr32(priv, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
return 0;
}
+
+struct nouveau_oclass
+nv04_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_graph_ctor,
+ .dtor = _nouveau_graph_dtor,
+ .init = nv04_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index d006658e6468..ce38196634df 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,27 +22,28 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
-#include "drm.h"
-#include <nouveau_drm.h>
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-
-struct nv10_graph_engine {
- struct nouveau_exec_engine base;
-};
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+
+#include <subdev/fb.h>
+
+#include <engine/fifo.h>
+#include <engine/graph.h>
+
+#include "regs.h"
struct pipe_state {
- uint32_t pipe_0x0000[0x040/4];
- uint32_t pipe_0x0040[0x010/4];
- uint32_t pipe_0x0200[0x0c0/4];
- uint32_t pipe_0x4400[0x080/4];
- uint32_t pipe_0x6400[0x3b0/4];
- uint32_t pipe_0x6800[0x2f0/4];
- uint32_t pipe_0x6c00[0x030/4];
- uint32_t pipe_0x7000[0x130/4];
- uint32_t pipe_0x7400[0x0c0/4];
- uint32_t pipe_0x7800[0x0c0/4];
+ u32 pipe_0x0000[0x040/4];
+ u32 pipe_0x0040[0x010/4];
+ u32 pipe_0x0200[0x0c0/4];
+ u32 pipe_0x4400[0x080/4];
+ u32 pipe_0x6400[0x3b0/4];
+ u32 pipe_0x6800[0x2f0/4];
+ u32 pipe_0x6c00[0x030/4];
+ u32 pipe_0x7000[0x130/4];
+ u32 pipe_0x7400[0x0c0/4];
+ u32 pipe_0x7800[0x0c0/4];
};
static int nv10_graph_ctx_regs[] = {
@@ -388,117 +389,322 @@ static int nv17_graph_ctx_regs[] = {
0x00400a04,
};
-struct graph_state {
+struct nv10_graph_priv {
+ struct nouveau_graph base;
+ struct nv10_graph_chan *chan[32];
+ spinlock_t lock;
+};
+
+struct nv10_graph_chan {
+ struct nouveau_object base;
+ int chid;
int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
struct pipe_state pipe_state;
- uint32_t lma_window[4];
+ u32 lma_window[4];
};
-#define PIPE_SAVE(dev, state, addr) \
+
+static inline struct nv10_graph_priv *
+nv10_graph_priv(struct nv10_graph_chan *chan)
+{
+ return (void *)nv_object(chan)->engine;
+}
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+#define PIPE_SAVE(priv, state, addr) \
do { \
int __i; \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ state[__i] = nv_rd32(priv, NV10_PGRAPH_PIPE_DATA); \
} while (0)
-#define PIPE_RESTORE(dev, state, addr) \
+#define PIPE_RESTORE(priv, state, addr) \
do { \
int __i; \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, addr); \
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, state[__i]); \
} while (0)
-static void nv10_graph_save_pipe(struct nouveau_channel *chan)
+static struct nouveau_oclass
+nv10_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs }, /* null */
+ { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs }, /* pattern */
+ { 0x004a, &nv04_graph_ofuncs }, /* gdi */
+ { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_graph_ofuncs }, /* ttri */
+ { 0x0095, &nv04_graph_ofuncs }, /* mtri */
+ { 0x0056, &nv04_graph_ofuncs }, /* celcius */
+ {},
+};
+
+static struct nouveau_oclass
+nv15_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs }, /* null */
+ { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs }, /* pattern */
+ { 0x004a, &nv04_graph_ofuncs }, /* gdi */
+ { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_graph_ofuncs }, /* ttri */
+ { 0x0095, &nv04_graph_ofuncs }, /* mtri */
+ { 0x0096, &nv04_graph_ofuncs }, /* celcius */
+ {},
+};
+
+static int
+nv17_graph_mthd_lma_window(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv10_graph_chan *chan = (void *)object->parent;
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct pipe_state *pipe = &chan->pipe_state;
+ u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+ u32 xfmode0, xfmode1;
+ u32 data = *(u32 *)args;
+ int i;
+
+ chan->lma_window[(mthd - 0x1638) / 4] = data;
+
+ if (mthd != 0x1644)
+ return 0;
+
+ nv04_graph_idle(priv);
+
+ PIPE_SAVE(priv, pipe_0x0040, 0x0040);
+ PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+
+ PIPE_RESTORE(priv, chan->lma_window, 0x6790);
+
+ nv04_graph_idle(priv);
+
+ xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+
+ PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(priv, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(priv, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(priv, pipe_0x6a80, 0x6a80);
+
+ nv04_graph_idle(priv);
+
+ nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+ PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+
+ nv04_graph_idle(priv);
+
+ PIPE_RESTORE(priv, pipe_0x0040, 0x0040);
+
+ nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+
+ PIPE_RESTORE(priv, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(priv, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(priv, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv04_graph_idle(priv);
+
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *pipe = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
-
- PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
- PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
- PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
- PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
- PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
- PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
- PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
- PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
- PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
+ struct nv10_graph_chan *chan = (void *)object->parent;
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+
+ nv04_graph_idle(priv);
+
+ nv_mask(priv, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100);
+ nv_mask(priv, 0x4006b0, 0x08000000, 0x08000000);
+ return 0;
}
-static void nv10_graph_load_pipe(struct nouveau_channel *chan)
+static struct nouveau_omthds
+nv17_celcius_omthds[] = {
+ { 0x1638, nv17_graph_mthd_lma_window },
+ { 0x163c, nv17_graph_mthd_lma_window },
+ { 0x1640, nv17_graph_mthd_lma_window },
+ { 0x1644, nv17_graph_mthd_lma_window },
+ { 0x1658, nv17_graph_mthd_lma_enable },
+ {}
+};
+
+static struct nouveau_oclass
+nv17_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs }, /* null */
+ { 0x0039, &nv04_graph_ofuncs }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs }, /* pattern */
+ { 0x004a, &nv04_graph_ofuncs }, /* gdi */
+ { 0x0052, &nv04_graph_ofuncs }, /* swzsurf */
+ { 0x005f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0062, &nv04_graph_ofuncs }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs }, /* blit */
+ { 0x0093, &nv04_graph_ofuncs }, /* surf3d */
+ { 0x0094, &nv04_graph_ofuncs }, /* ttri */
+ { 0x0095, &nv04_graph_ofuncs }, /* mtri */
+ { 0x0099, &nv04_graph_ofuncs, nv17_celcius_omthds },
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nv10_graph_chan *
+nv10_graph_channel(struct nv10_graph_priv *priv)
{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *pipe = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
- uint32_t xfmode0, xfmode1;
+ struct nv10_graph_chan *chan = NULL;
+ if (nv_rd32(priv, 0x400144) & 0x00010000) {
+ int chid = nv_rd32(priv, 0x400148) >> 24;
+ if (chid < ARRAY_SIZE(priv->chan))
+ chan = priv->chan[chid];
+ }
+ return chan;
+}
+
+static void
+nv10_graph_save_pipe(struct nv10_graph_chan *chan)
+{
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct pipe_state *pipe = &chan->pipe_state;
+
+ PIPE_SAVE(priv, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(priv, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(priv, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(priv, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(priv, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(priv, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(priv, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(priv, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(priv, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(priv, pipe->pipe_0x0000, 0x0000);
+}
+
+static void
+nv10_graph_load_pipe(struct nv10_graph_chan *chan)
+{
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct pipe_state *pipe = &chan->pipe_state;
+ u32 xfmode0, xfmode1;
int i;
- nouveau_wait_for_idle(dev);
+ nv04_graph_idle(priv);
/* XXX check haiku comments */
- xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ xfmode0 = nv_rd32(priv, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(priv, NV10_PGRAPH_XFMODE1);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(priv, NV10_PGRAPH_PIPE_DATA, 0x00000008);
- PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
- nouveau_wait_for_idle(dev);
+ PIPE_RESTORE(priv, pipe->pipe_0x0200, 0x0200);
+ nv04_graph_idle(priv);
/* restore XFMODE */
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
- PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
- PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
- PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
- PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
- PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
- PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
- PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
- PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
- nouveau_wait_for_idle(dev);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(priv, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(priv, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(priv, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(priv, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(priv, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(priv, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(priv, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(priv, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(priv, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(priv, pipe->pipe_0x0040, 0x0040);
+ nv04_graph_idle(priv);
}
-static void nv10_graph_create_pipe(struct nouveau_channel *chan)
+static void
+nv10_graph_create_pipe(struct nv10_graph_chan *chan)
{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
- uint32_t *fifo_pipe_state_addr;
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ struct pipe_state *pipe_state = &chan->pipe_state;
+ u32 *pipe_state_addr;
int i;
#define PIPE_INIT(addr) \
do { \
- fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ pipe_state_addr = pipe_state->pipe_##addr; \
} while (0)
#define PIPE_INIT_END(addr) \
do { \
- uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
- ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
- if (fifo_pipe_state_addr != __end_addr) \
- NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
- addr, fifo_pipe_state_addr, __end_addr); \
+ u32 *__end_addr = pipe_state->pipe_##addr + \
+ ARRAY_SIZE(pipe_state->pipe_##addr); \
+ if (pipe_state_addr != __end_addr) \
+ nv_error(priv, "incomplete pipe init for 0x%x : %p/%p\n", \
+ addr, pipe_state_addr, __end_addr); \
} while (0)
-#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+#define NV_WRITE_PIPE_INIT(value) *(pipe_state_addr++) = value
PIPE_INIT(0x0200);
for (i = 0; i < 48; i++)
@@ -634,34 +840,36 @@ static void nv10_graph_create_pipe(struct nouveau_channel *chan)
#undef NV_WRITE_PIPE_INIT
}
-static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+static int
+nv10_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
if (nv10_graph_ctx_regs[i] == reg)
return i;
}
- NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
+ nv_error(priv, "unknow offset nv10_ctx_regs %d\n", reg);
return -1;
}
-static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+static int
+nv17_graph_ctx_regs_find_offset(struct nv10_graph_priv *priv, int reg)
{
int i;
for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
if (nv17_graph_ctx_regs[i] == reg)
return i;
}
- NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
+ nv_error(priv, "unknow offset nv17_ctx_regs %d\n", reg);
return -1;
}
-static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
- uint32_t inst)
+static void
+nv10_graph_load_dma_vtxbuf(struct nv10_graph_chan *chan, int chid, u32 inst)
{
- struct drm_device *dev = chan->dev;
- uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
- uint32_t ctx_user, ctx_switch[5];
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+ u32 ctx_user, ctx_switch[5];
int i, subchan = -1;
/* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
@@ -671,7 +879,7 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
/* Look for a celsius object */
for (i = 0; i < 8; i++) {
- int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+ int class = nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
if (class == 0x56 || class == 0x96 || class == 0x99) {
subchan = i;
@@ -683,168 +891,158 @@ static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
return;
/* Save the current ctx object */
- ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ ctx_user = nv_rd32(priv, NV10_PGRAPH_CTX_USER);
for (i = 0; i < 5; i++)
- ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
+ ctx_switch[i] = nv_rd32(priv, NV10_PGRAPH_CTX_SWITCH(i));
/* Save the FIFO state */
- st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
- st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
- st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
- fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+ st2 = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2);
+ st2_dl = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DL);
+ st2_dh = nv_rd32(priv, NV10_PGRAPH_FFINTFC_ST2_DH);
+ fifo_ptr = nv_rd32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR);
for (i = 0; i < ARRAY_SIZE(fifo); i++)
- fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
+ fifo[i] = nv_rd32(priv, 0x4007a0 + 4 * i);
/* Switch to the celsius subchannel */
for (i = 0; i < 5; i++)
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
- nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
- nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i),
+ nv_rd32(priv, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+ nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
/* Inject NV10TCL_DMA_VTXBUF */
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
- 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
- nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2,
+ 0x2c000000 | chid << 20 | subchan << 16 | 0x18c);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+ nv_mask(priv, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
/* Restore the FIFO state */
for (i = 0; i < ARRAY_SIZE(fifo); i++)
- nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
+ nv_wr32(priv, 0x4007a0 + 4 * i, fifo[i]);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, st2);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
/* Restore the current ctx object */
for (i = 0; i < 5; i++)
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+ nv_wr32(priv, NV10_PGRAPH_CTX_USER, ctx_user);
}
static int
-nv10_graph_load_context(struct nouveau_channel *chan)
+nv10_graph_load_context(struct nv10_graph_chan *chan, int chid)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- uint32_t tmp;
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
+ u32 inst;
int i;
for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
- if (dev_priv->chipset >= 0x17) {
+ nv_wr32(priv, nv10_graph_ctx_regs[i], chan->nv10[i]);
+
+ if (nv_device(priv)->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- nv_wr32(dev, nv17_graph_ctx_regs[i],
- pgraph_ctx->nv17[i]);
+ nv_wr32(priv, nv17_graph_ctx_regs[i], chan->nv17[i]);
}
nv10_graph_load_pipe(chan);
- nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
- & 0xffff));
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
- tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
+
+ inst = nv_rd32(priv, NV10_PGRAPH_GLOBALSTATE1) & 0xffff;
+ nv10_graph_load_dma_vtxbuf(chan, chid, inst);
+
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, chid << 24);
+ nv_mask(priv, NV10_PGRAPH_FFINTFC_ST2, 0x30000000, 0x00000000);
return 0;
}
static int
-nv10_graph_unload_context(struct drm_device *dev)
+nv10_graph_unload_context(struct nv10_graph_chan *chan)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- struct graph_state *ctx;
- uint32_t tmp;
+ struct nv10_graph_priv *priv = nv10_graph_priv(chan);
int i;
- chan = nv10_graph_channel(dev);
- if (!chan)
- return 0;
- ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
+ chan->nv10[i] = nv_rd32(priv, nv10_graph_ctx_regs[i]);
- if (dev_priv->chipset >= 0x17) {
+ if (nv_device(priv)->chipset >= 0x17) {
for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
+ chan->nv17[i] = nv_rd32(priv, nv17_graph_ctx_regs[i]);
}
nv10_graph_save_pipe(chan);
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
return 0;
}
static void
-nv10_graph_context_switch(struct drm_device *dev)
+nv10_graph_context_switch(struct nv10_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
+ struct nv10_graph_chan *prev = NULL;
+ struct nv10_graph_chan *next = NULL;
+ unsigned long flags;
int chid;
- nouveau_wait_for_idle(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ nv04_graph_idle(priv);
/* If previous context is valid, we need to save it */
- nv10_graph_unload_context(dev);
+ prev = nv10_graph_channel(priv);
+ if (prev)
+ nv10_graph_unload_context(prev);
+
+ /* load context for next channel */
+ chid = (nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ next = priv->chan[chid];
+ if (next)
+ nv10_graph_load_context(next, chid);
- /* Load context for next channel */
- chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- chan = dev_priv->channels.ptr[chid];
- if (chan && chan->engctx[NVOBJ_ENGINE_GR])
- nv10_graph_load_context(chan);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
#define NV_WRITE_CTX(reg, val) do { \
- int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+ int offset = nv10_graph_ctx_regs_find_offset(priv, reg); \
if (offset > 0) \
- pgraph_ctx->nv10[offset] = val; \
+ chan->nv10[offset] = val; \
} while (0)
#define NV17_WRITE_CTX(reg, val) do { \
- int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+ int offset = nv17_graph_ctx_regs_find_offset(priv, reg); \
if (offset > 0) \
- pgraph_ctx->nv17[offset] = val; \
+ chan->nv17[offset] = val; \
} while (0)
-struct nouveau_channel *
-nv10_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = 31;
-
- if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
- chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
-
- if (chid >= 31)
- return NULL;
-
- return dev_priv->channels.ptr[chid];
-}
-
static int
-nv10_graph_context_new(struct nouveau_channel *chan, int engine)
+nv10_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx;
-
- NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
-
- pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
- if (pgraph_ctx == NULL)
- return -ENOMEM;
- chan->engctx[engine] = pgraph_ctx;
+ struct nouveau_fifo_chan *fifo = (void *)parent;
+ struct nv10_graph_priv *priv = (void *)engine;
+ struct nv10_graph_chan *chan;
+ unsigned long flags;
+ int ret;
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->chan[fifo->chid]) {
+ *pobject = nv_object(priv->chan[fifo->chid]);
+ atomic_inc(&(*pobject)->refcount);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ nouveau_object_destroy(&chan->base);
+ return 1;
+ }
NV_WRITE_CTX(0x00400e88, 0x08000000);
NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@@ -853,212 +1051,91 @@ nv10_graph_context_new(struct nouveau_channel *chan, int engine)
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
- if (dev_priv->chipset >= 0x17) {
+ if (nv_device(priv)->chipset >= 0x17) {
/* is it really needed ??? */
NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
- nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
- NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
+ nv_rd32(priv, NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, nv_rd32(priv, 0x004006b0));
NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
NV17_WRITE_CTX(0x00400ec0, 0x00000080);
NV17_WRITE_CTX(0x00400ed0, 0x00000080);
}
- NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->chid << 24);
nv10_graph_create_pipe(chan);
+
+ priv->chan[fifo->chid] = chan;
+ chan->chid = fifo->chid;
+ spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static void
-nv10_graph_context_del(struct nouveau_channel *chan, int engine)
+nv10_graph_context_dtor(struct nouveau_object *object)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[engine];
+ struct nv10_graph_priv *priv = (void *)object->engine;
+ struct nv10_graph_chan *chan = (void *)object;
unsigned long flags;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Unload the context if it's the currently active one */
- if (nv10_graph_channel(dev) == chan)
- nv10_graph_unload_context(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->chan[chan->chid] = NULL;
+ spin_unlock_irqrestore(&priv->lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- chan->engctx[engine] = NULL;
- kfree(pgraph_ctx);
-}
-
-static void
-nv10_graph_set_tile_region(struct drm_device *dev, int i)
-{
- struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
+ nouveau_object_destroy(&chan->base);
}
static int
-nv10_graph_init(struct drm_device *dev, int engine)
+nv10_graph_context_fini(struct nouveau_object *object, bool suspend)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 tmp;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
- /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
- nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
- (1<<29) |
- (1<<31));
- if (dev_priv->chipset >= 0x17) {
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
- nv_wr32(dev, 0x400a10, 0x3ff3fb6);
- nv_wr32(dev, 0x400838, 0x2f8684);
- nv_wr32(dev, 0x40083c, 0x115f3f);
- nv_wr32(dev, 0x004006b0, 0x40000020);
- } else
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ struct nv10_graph_priv *priv = (void *)object->engine;
+ struct nv10_graph_chan *chan = (void *)object;
+ unsigned long flags;
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv10_graph_set_tile_region(dev, i);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
-
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+ spin_lock_irqsave(&priv->lock, flags);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (nv10_graph_channel(priv) == chan)
+ nv10_graph_unload_context(chan);
+ nv_mask(priv, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ return nouveau_object_fini(&chan->base, suspend);
}
-static int
-nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv10_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
-
-static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- struct pipe_state *pipe = &ctx->pipe_state;
- uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
- uint32_t xfmode0, xfmode1;
- int i;
-
- ctx->lma_window[(mthd - 0x1638) / 4] = data;
-
- if (mthd != 0x1644)
- return 0;
-
- nouveau_wait_for_idle(dev);
-
- PIPE_SAVE(dev, pipe_0x0040, 0x0040);
- PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
-
- PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
-
- nouveau_wait_for_idle(dev);
-
- xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
-
- PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
- PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
- PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
-
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
-
- PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
-
- nouveau_wait_for_idle(dev);
-
- PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
-
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
-
- PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
- PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
- PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
- PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nouveau_wait_for_idle(dev);
+static struct nouveau_oclass
+nv10_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_graph_context_ctor,
+ .dtor = nv10_graph_context_dtor,
+ .init = nouveau_object_init,
+ .fini = nv10_graph_context_fini,
+ },
+};
- return 0;
-}
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
-static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
+static void
+nv10_graph_tile_prog(struct nouveau_engine *engine, int i)
{
- struct drm_device *dev = chan->dev;
+ struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+ struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+ struct nv10_graph_priv *priv = (void *)engine;
+ unsigned long flags;
- nouveau_wait_for_idle(dev);
+ pfifo->pause(pfifo, &flags);
+ nv04_graph_idle(priv);
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
- nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
- nv_wr32(dev, 0x004006b0,
- nv_rd32(dev, 0x004006b0) | 0x8 << 24);
+ nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr);
- return 0;
+ pfifo->start(pfifo, &flags);
}
-struct nouveau_bitfield nv10_graph_intr[] = {
+struct nouveau_bitfield nv10_graph_intr_name[] = {
{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
{ NV_PGRAPH_INTR_ERROR, "ERROR" },
{}
@@ -1073,115 +1150,165 @@ struct nouveau_bitfield nv10_graph_nstatus[] = {
};
static void
-nv10_graph_isr(struct drm_device *dev)
+nv10_graph_intr(struct nouveau_subdev *subdev)
{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x01f00000) >> 20;
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- }
- }
+ struct nv10_graph_priv *priv = (void *)subdev;
+ struct nv10_graph_chan *chan = NULL;
+ struct nouveau_namedb *namedb = NULL;
+ struct nouveau_handle *handle = NULL;
+ u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+ u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+ unsigned long flags;
- if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv10_graph_context_switch(dev);
+ spin_lock_irqsave(&priv->lock, flags);
+ chan = priv->chan[chid];
+ if (chan)
+ namedb = (void *)nv_pclass(nv_object(chan), NV_NAMEDB_CLASS);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (chan && (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) {
+ handle = nouveau_namedb_get_class(namedb, class);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
}
+ }
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(priv, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv10_graph_context_switch(priv);
+ }
+
+ nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+ nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show) {
+ nv_error(priv, "");
+ nouveau_bitfield_print(nv10_graph_intr_name, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ nv_error(priv, "ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
}
+
+ nouveau_namedb_put(handle);
}
-static void
-nv10_graph_destroy(struct drm_device *dev, int engine)
+static int
+nv10_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
+ struct nv10_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv10_graph_intr;
+ nv_engine(priv)->cclass = &nv10_graph_cclass;
+
+ if (nv_device(priv)->chipset <= 0x10)
+ nv_engine(priv)->sclass = nv10_graph_sclass;
+ else
+ if (nv_device(priv)->chipset < 0x17 ||
+ nv_device(priv)->chipset == 0x1a)
+ nv_engine(priv)->sclass = nv15_graph_sclass;
+ else
+ nv_engine(priv)->sclass = nv17_graph_sclass;
+
+ nv_engine(priv)->tile_prog = nv10_graph_tile_prog;
+ spin_lock_init(&priv->lock);
+ return 0;
+}
- nouveau_irq_unregister(dev, 12);
- kfree(pgraph);
+static void
+nv10_graph_dtor(struct nouveau_object *object)
+{
+ struct nv10_graph_priv *priv = (void *)object;
+ nouveau_graph_destroy(&priv->base);
}
-int
-nv10_graph_create(struct drm_device *dev)
+static int
+nv10_graph_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv10_graph_destroy;
- pgraph->base.init = nv10_graph_init;
- pgraph->base.fini = nv10_graph_fini;
- pgraph->base.context_new = nv10_graph_context_new;
- pgraph->base.context_del = nv10_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
- pgraph->base.set_tile_region = nv10_graph_set_tile_region;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv10_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
- NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
- NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
- NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
-
- /* celcius */
- if (dev_priv->chipset <= 0x10) {
- NVOBJ_CLASS(dev, 0x0056, GR);
- } else
- if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
- NVOBJ_CLASS(dev, 0x0096, GR);
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nv10_graph_priv *priv = (void *)engine;
+ int ret, i;
+
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ /* nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0x55DE0830 | (1 << 29) | (1 << 31));
+
+ if (nv_device(priv)->chipset >= 0x17) {
+ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(priv, 0x400a10, 0x03ff3fb6);
+ nv_wr32(priv, 0x400838, 0x002f8684);
+ nv_wr32(priv, 0x40083c, 0x00115f3f);
+ nv_wr32(priv, 0x4006b0, 0x40000020);
} else {
- NVOBJ_CLASS(dev, 0x0099, GR);
- NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
}
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->tile.regions; i++)
+ engine->tile_prog(engine, i);
+
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+ nv_wr32(priv, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+
+ nv_mask(priv, NV10_PGRAPH_CTX_USER, 0xff000000, 0x1f000000);
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(priv, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
return 0;
}
+
+static int
+nv10_graph_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv10_graph_priv *priv = (void *)object;
+ return nouveau_graph_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv10_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_graph_ctor,
+ .dtor = nv10_graph_dtor,
+ .init = nv10_graph_init,
+ .fini = nv10_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 0d874b8b18e5..61faef976aee 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,836 +1,378 @@
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <nouveau_drm.h>
-
-/*
- * NV20
- * -----
- * There are 3 families :
- * NV20 is 0x10de:0x020*
- * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
- * NV2A is 0x10de:0x02A0
- *
- * NV30
- * -----
- * There are 3 families :
- * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
- * NV34 is 0x10de:0x032*
- * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
- *
- * Not seen in the wild, no dumps (probably NV35) :
- * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
- * NV38 is 0x10de:0x0333, 0x10de:0x00fe
- *
- */
-
-struct nv20_graph_engine {
- struct nouveau_exec_engine base;
- struct nouveau_gpuobj *ctxtab;
- void (*grctx_init)(struct nouveau_gpuobj *);
- u32 grctx_size;
- u32 grctx_user;
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv20_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+ { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+ { 0x0097, &nv04_graph_ofuncs, NULL }, /* kelvin */
+ { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+ {},
};
-#define NV20_GRCTX_SIZE (3580*4)
-#define NV25_GRCTX_SIZE (3529*4)
-#define NV2A_GRCTX_SIZE (3500*4)
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
-#define NV30_31_GRCTX_SIZE (24392)
-#define NV34_GRCTX_SIZE (18140)
-#define NV35_36_GRCTX_SIZE (22396)
-
-int
-nv20_graph_unload_context(struct drm_device *dev)
+static int
+nv20_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_channel *chan;
- struct nouveau_gpuobj *grctx;
- u32 tmp;
-
- chan = nv10_graph_channel(dev);
- if (!chan)
- return 0;
- grctx = chan->engctx[NVOBJ_ENGINE_GR];
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->addr >> 4);
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
- NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
-
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
- return 0;
-}
+ struct nv20_graph_chan *chan;
+ int ret, i;
-static void
-nv20_graph_rdi(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, writecount = 32;
- uint32_t rdi_index = 0x2c80000;
-
- if (dev_priv->chipset == 0x20) {
- rdi_index = 0x3d0000;
- writecount = 15;
- }
-
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
- for (i = 0; i < writecount; i++)
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+ 0x37f0, 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
- nouveau_wait_for_idle(dev);
-}
+ chan->chid = nouveau_fifo_chan(parent)->chid;
-static void
-nv20_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x033c, 0xffff0000);
- nv_wo32(ctx, 0x03a0, 0x0fff0000);
- nv_wo32(ctx, 0x03a4, 0x0fff0000);
- nv_wo32(ctx, 0x047c, 0x00000101);
- nv_wo32(ctx, 0x0490, 0x00000111);
- nv_wo32(ctx, 0x04a8, 0x44400000);
+ nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x033c, 0xffff0000);
+ nv_wo32(chan, 0x03a0, 0x0fff0000);
+ nv_wo32(chan, 0x03a4, 0x0fff0000);
+ nv_wo32(chan, 0x047c, 0x00000101);
+ nv_wo32(chan, 0x0490, 0x00000111);
+ nv_wo32(chan, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(ctx, i, 0x00030303);
+ nv_wo32(chan, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(ctx, i, 0x00080000);
+ nv_wo32(chan, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(ctx, i, 0x01012000);
+ nv_wo32(chan, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
+ nv_wo32(chan, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(chan, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05a4, 0x4b7fffff);
- nv_wo32(ctx, 0x05fc, 0x00000001);
- nv_wo32(ctx, 0x0604, 0x00004000);
- nv_wo32(ctx, 0x0610, 0x00000001);
- nv_wo32(ctx, 0x0618, 0x00040000);
- nv_wo32(ctx, 0x061c, 0x00010000);
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x05a4, 0x4b7fffff);
+ nv_wo32(chan, 0x05fc, 0x00000001);
+ nv_wo32(chan, 0x0604, 0x00004000);
+ nv_wo32(chan, 0x0610, 0x00000001);
+ nv_wo32(chan, 0x0618, 0x00040000);
+ nv_wo32(chan, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
+ nv_wo32(chan, (i + 0), 0x10700ff9);
+ nv_wo32(chan, (i + 4), 0x0436086c);
+ nv_wo32(chan, (i + 8), 0x000c001b);
}
- nv_wo32(ctx, 0x281c, 0x3f800000);
- nv_wo32(ctx, 0x2830, 0x3f800000);
- nv_wo32(ctx, 0x285c, 0x40000000);
- nv_wo32(ctx, 0x2860, 0x3f800000);
- nv_wo32(ctx, 0x2864, 0x3f000000);
- nv_wo32(ctx, 0x286c, 0x40000000);
- nv_wo32(ctx, 0x2870, 0x3f800000);
- nv_wo32(ctx, 0x2878, 0xbf800000);
- nv_wo32(ctx, 0x2880, 0xbf800000);
- nv_wo32(ctx, 0x34a4, 0x000fe000);
- nv_wo32(ctx, 0x3530, 0x000003f8);
- nv_wo32(ctx, 0x3540, 0x002fe000);
+ nv_wo32(chan, 0x281c, 0x3f800000);
+ nv_wo32(chan, 0x2830, 0x3f800000);
+ nv_wo32(chan, 0x285c, 0x40000000);
+ nv_wo32(chan, 0x2860, 0x3f800000);
+ nv_wo32(chan, 0x2864, 0x3f000000);
+ nv_wo32(chan, 0x286c, 0x40000000);
+ nv_wo32(chan, 0x2870, 0x3f800000);
+ nv_wo32(chan, 0x2878, 0xbf800000);
+ nv_wo32(chan, 0x2880, 0xbf800000);
+ nv_wo32(chan, 0x34a4, 0x000fe000);
+ nv_wo32(chan, 0x3530, 0x000003f8);
+ nv_wo32(chan, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
+ nv_wo32(chan, i, 0x001c527c);
+ return 0;
}
-static void
-nv25_graph_context_init(struct nouveau_gpuobj *ctx)
+int
+nv20_graph_context_init(struct nouveau_object *object)
{
- int i;
-
- nv_wo32(ctx, 0x035c, 0xffff0000);
- nv_wo32(ctx, 0x03c0, 0x0fff0000);
- nv_wo32(ctx, 0x03c4, 0x0fff0000);
- nv_wo32(ctx, 0x049c, 0x00000101);
- nv_wo32(ctx, 0x04b0, 0x00000111);
- nv_wo32(ctx, 0x04c8, 0x00000080);
- nv_wo32(ctx, 0x04cc, 0xffff0000);
- nv_wo32(ctx, 0x04d0, 0x00000001);
- nv_wo32(ctx, 0x04e4, 0x44400000);
- nv_wo32(ctx, 0x04fc, 0x4b800000);
- for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(ctx, i, 0x00080000);
- for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
- for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05e0, 0x4b7fffff);
- nv_wo32(ctx, 0x0620, 0x00000080);
- nv_wo32(ctx, 0x0624, 0x30201000);
- nv_wo32(ctx, 0x0628, 0x70605040);
- nv_wo32(ctx, 0x062c, 0xb0a09080);
- nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
- nv_wo32(ctx, 0x0664, 0x00000001);
- nv_wo32(ctx, 0x066c, 0x00004000);
- nv_wo32(ctx, 0x0678, 0x00000001);
- nv_wo32(ctx, 0x0680, 0x00040000);
- nv_wo32(ctx, 0x0684, 0x00010000);
- for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
- }
- nv_wo32(ctx, 0x2704, 0x3f800000);
- nv_wo32(ctx, 0x2718, 0x3f800000);
- nv_wo32(ctx, 0x2744, 0x40000000);
- nv_wo32(ctx, 0x2748, 0x3f800000);
- nv_wo32(ctx, 0x274c, 0x3f000000);
- nv_wo32(ctx, 0x2754, 0x40000000);
- nv_wo32(ctx, 0x2758, 0x3f800000);
- nv_wo32(ctx, 0x2760, 0xbf800000);
- nv_wo32(ctx, 0x2768, 0xbf800000);
- nv_wo32(ctx, 0x308c, 0x000fe000);
- nv_wo32(ctx, 0x3108, 0x000003f8);
- nv_wo32(ctx, 0x3468, 0x002fe000);
- for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
-}
+ struct nv20_graph_priv *priv = (void *)object->engine;
+ struct nv20_graph_chan *chan = (void *)object;
+ int ret;
-static void
-nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x033c, 0xffff0000);
- nv_wo32(ctx, 0x03a0, 0x0fff0000);
- nv_wo32(ctx, 0x03a4, 0x0fff0000);
- nv_wo32(ctx, 0x047c, 0x00000101);
- nv_wo32(ctx, 0x0490, 0x00000111);
- nv_wo32(ctx, 0x04a8, 0x44400000);
- for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(ctx, i, 0x00080000);
- for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
- for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05a4, 0x4b7fffff);
- nv_wo32(ctx, 0x05fc, 0x00000001);
- nv_wo32(ctx, 0x0604, 0x00004000);
- nv_wo32(ctx, 0x0610, 0x00000001);
- nv_wo32(ctx, 0x0618, 0x00040000);
- nv_wo32(ctx, 0x061c, 0x00010000);
- for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
- }
- nv_wo32(ctx, 0x269c, 0x3f800000);
- nv_wo32(ctx, 0x26b0, 0x3f800000);
- nv_wo32(ctx, 0x26dc, 0x40000000);
- nv_wo32(ctx, 0x26e0, 0x3f800000);
- nv_wo32(ctx, 0x26e4, 0x3f000000);
- nv_wo32(ctx, 0x26ec, 0x40000000);
- nv_wo32(ctx, 0x26f0, 0x3f800000);
- nv_wo32(ctx, 0x26f8, 0xbf800000);
- nv_wo32(ctx, 0x2700, 0xbf800000);
- nv_wo32(ctx, 0x3024, 0x000fe000);
- nv_wo32(ctx, 0x30a0, 0x000003f8);
- nv_wo32(ctx, 0x33fc, 0x002fe000);
- for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
-}
+ ret = nouveau_graph_context_init(&chan->base);
+ if (ret)
+ return ret;
-static void
-nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x0410, 0x00000101);
- nv_wo32(ctx, 0x0424, 0x00000111);
- nv_wo32(ctx, 0x0428, 0x00000060);
- nv_wo32(ctx, 0x0444, 0x00000080);
- nv_wo32(ctx, 0x0448, 0xffff0000);
- nv_wo32(ctx, 0x044c, 0x00000001);
- nv_wo32(ctx, 0x0460, 0x44400000);
- nv_wo32(ctx, 0x048c, 0xffff0000);
- for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04ec, 0x00011100);
- for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x0550, 0x4b7fffff);
- nv_wo32(ctx, 0x058c, 0x00000080);
- nv_wo32(ctx, 0x0590, 0x30201000);
- nv_wo32(ctx, 0x0594, 0x70605040);
- nv_wo32(ctx, 0x0598, 0xb8a89888);
- nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05b0, 0xb0000000);
- for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x085c, 0x00040000);
- nv_wo32(ctx, 0x0860, 0x00010000);
- for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 1, 0x0436086c);
- nv_wo32(ctx, i + 2, 0x000c001b);
- }
- for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x344c, 0x3f800000);
- nv_wo32(ctx, 0x3808, 0x3f800000);
- nv_wo32(ctx, 0x381c, 0x3f800000);
- nv_wo32(ctx, 0x3848, 0x40000000);
- nv_wo32(ctx, 0x384c, 0x3f800000);
- nv_wo32(ctx, 0x3850, 0x3f000000);
- nv_wo32(ctx, 0x3858, 0x40000000);
- nv_wo32(ctx, 0x385c, 0x3f800000);
- nv_wo32(ctx, 0x3864, 0xbf800000);
- nv_wo32(ctx, 0x386c, 0xbf800000);
+ nv_wo32(priv->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
+ return 0;
}
-static void
-nv34_graph_context_init(struct nouveau_gpuobj *ctx)
+int
+nv20_graph_context_fini(struct nouveau_object *object, bool suspend)
{
- int i;
-
- nv_wo32(ctx, 0x040c, 0x01000101);
- nv_wo32(ctx, 0x0420, 0x00000111);
- nv_wo32(ctx, 0x0424, 0x00000060);
- nv_wo32(ctx, 0x0440, 0x00000080);
- nv_wo32(ctx, 0x0444, 0xffff0000);
- nv_wo32(ctx, 0x0448, 0x00000001);
- nv_wo32(ctx, 0x045c, 0x44400000);
- nv_wo32(ctx, 0x0480, 0xffff0000);
- for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04e0, 0x00011100);
- for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x0544, 0x4b7fffff);
- nv_wo32(ctx, 0x057c, 0x00000080);
- nv_wo32(ctx, 0x0580, 0x30201000);
- nv_wo32(ctx, 0x0584, 0x70605040);
- nv_wo32(ctx, 0x0588, 0xb8a89888);
- nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05a0, 0xb0000000);
- for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x0850, 0x00040000);
- nv_wo32(ctx, 0x0854, 0x00010000);
- for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 1, 0x0436086c);
- nv_wo32(ctx, i + 2, 0x000c001b);
+ struct nv20_graph_priv *priv = (void *)object->engine;
+ struct nv20_graph_chan *chan = (void *)object;
+ int chid = -1;
+
+ nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+ if (nv_rd32(priv, 0x400144) & 0x00010000)
+ chid = (nv_rd32(priv, 0x400148) & 0x1f000000) >> 24;
+ if (chan->chid == chid) {
+ nv_wr32(priv, 0x400784, nv_gpuobj(chan)->addr >> 4);
+ nv_wr32(priv, 0x400788, 0x00000002);
+ nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+ nv_wr32(priv, 0x400144, 0x10000000);
+ nv_mask(priv, 0x400148, 0xff000000, 0x1f000000);
}
- for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x2ae0, 0x3f800000);
- nv_wo32(ctx, 0x2e9c, 0x3f800000);
- nv_wo32(ctx, 0x2eb0, 0x3f800000);
- nv_wo32(ctx, 0x2edc, 0x40000000);
- nv_wo32(ctx, 0x2ee0, 0x3f800000);
- nv_wo32(ctx, 0x2ee4, 0x3f000000);
- nv_wo32(ctx, 0x2eec, 0x40000000);
- nv_wo32(ctx, 0x2ef0, 0x3f800000);
- nv_wo32(ctx, 0x2ef8, 0xbf800000);
- nv_wo32(ctx, 0x2f00, 0xbf800000);
-}
+ nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
-static void
-nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x040c, 0x00000101);
- nv_wo32(ctx, 0x0420, 0x00000111);
- nv_wo32(ctx, 0x0424, 0x00000060);
- nv_wo32(ctx, 0x0440, 0x00000080);
- nv_wo32(ctx, 0x0444, 0xffff0000);
- nv_wo32(ctx, 0x0448, 0x00000001);
- nv_wo32(ctx, 0x045c, 0x44400000);
- nv_wo32(ctx, 0x0488, 0xffff0000);
- for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04e8, 0x00011100);
- for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x054c, 0x4b7fffff);
- nv_wo32(ctx, 0x0588, 0x00000080);
- nv_wo32(ctx, 0x058c, 0x30201000);
- nv_wo32(ctx, 0x0590, 0x70605040);
- nv_wo32(ctx, 0x0594, 0xb8a89888);
- nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05ac, 0xb0000000);
- for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x0860, 0x00040000);
- nv_wo32(ctx, 0x0864, 0x00010000);
- for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 4, 0x0436086c);
- nv_wo32(ctx, i + 8, 0x000c001b);
- }
- for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x3450, 0x3f800000);
- nv_wo32(ctx, 0x380c, 0x3f800000);
- nv_wo32(ctx, 0x3820, 0x3f800000);
- nv_wo32(ctx, 0x384c, 0x40000000);
- nv_wo32(ctx, 0x3850, 0x3f800000);
- nv_wo32(ctx, 0x3854, 0x3f000000);
- nv_wo32(ctx, 0x385c, 0x40000000);
- nv_wo32(ctx, 0x3860, 0x3f800000);
- nv_wo32(ctx, 0x3868, 0xbf800000);
- nv_wo32(ctx, 0x3870, 0xbf800000);
+ nv_wo32(priv->ctxtab, chan->chid * 4, 0x00000000);
+ return nouveau_graph_context_fini(&chan->base, suspend);
}
-int
-nv20_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct nouveau_gpuobj *grctx = NULL;
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &grctx);
- if (ret)
- return ret;
-
- /* Initialise default context values */
- pgraph->grctx_init(grctx);
-
- /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
- /* CTX_USER */
- nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
+static struct nouveau_oclass
+nv20_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x20),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv20_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
- nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->addr >> 4);
- chan->engctx[engine] = grctx;
- return 0;
-}
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
void
-nv20_graph_context_del(struct nouveau_channel *chan, int engine)
+nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
{
- struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+ struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+ struct nv20_graph_priv *priv = (void *)engine;
unsigned long flags;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ pfifo->pause(pfifo, &flags);
+ nv04_graph_idle(priv);
- /* Unload the context if it's the currently active one */
- if (nv10_graph_channel(dev) == chan)
- nv20_graph_unload_context(dev);
+ nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->limit);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->pitch);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
- /* Free the context resources */
- nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
+ if (nv_device(engine)->card_type == NV_20) {
+ nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ }
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
+ pfifo->start(pfifo, &flags);
}
-static void
-nv20_graph_set_tile_region(struct drm_device *dev, int i)
+void
+nv20_graph_intr(struct nouveau_subdev *subdev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
-
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
-
- if (dev_priv->card_type == NV_20) {
- nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ struct nv20_graph_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+ u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xfff;
+ u32 inst = nv_ro32(priv->ctxtab, (chid * 4)) << 4;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ handle = nouveau_engctx_lookup_class(engine, inst, class);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ nouveau_engctx_handle_put(handle);
+ }
}
-}
-
-int
-nv20_graph_init(struct drm_device *dev, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t tmp, vramsz;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
-
- nv20_graph_rdi(dev);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
- nv_wr32(dev, 0x40009C , 0x00000040);
-
- if (dev_priv->chipset >= 0x25) {
- nv_wr32(dev, 0x400890, 0x00a8cfff);
- nv_wr32(dev, 0x400610, 0x304B1FB6);
- nv_wr32(dev, 0x400B80, 0x1cbd3883);
- nv_wr32(dev, 0x400B84, 0x44000000);
- nv_wr32(dev, 0x400098, 0x40000080);
- nv_wr32(dev, 0x400B88, 0x000000ff);
- } else {
- nv_wr32(dev, 0x400880, 0x0008c7df);
- nv_wr32(dev, 0x400094, 0x00000005);
- nv_wr32(dev, 0x400B80, 0x45eae20e);
- nv_wr32(dev, 0x400B84, 0x24000000);
- nv_wr32(dev, 0x400098, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+ nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show) {
+ nv_info(priv, "");
+ nouveau_bitfield_print(nv10_graph_intr_name, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
}
+}
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_tile_region(dev, i);
-
- nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
-
- tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
- nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
- tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
- nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
-
- /* begin RAM config */
- vramsz = pci_resource_len(dev->pdev, 0) - 1;
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400820, 0);
- nv_wr32(dev, 0x400824, 0);
- nv_wr32(dev, 0x400864, vramsz - 1);
- nv_wr32(dev, 0x400868, vramsz - 1);
+static int
+nv20_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
- /* interesting.. the below overwrites some of the tile setup above.. */
- nv_wr32(dev, 0x400B20, 0x00000000);
- nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv20_graph_cclass;
+ nv_engine(priv)->sclass = nv20_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
return 0;
}
-int
-nv30_graph_init(struct drm_device *dev, int engine)
+void
+nv20_graph_dtor(struct nouveau_object *object)
{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->addr >> 4);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(dev, 0x400890, 0x01b463ff);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
- nv_wr32(dev, 0x400B80, 0x1003d888);
- nv_wr32(dev, 0x400B84, 0x0c000000);
- nv_wr32(dev, 0x400098, 0x00000000);
- nv_wr32(dev, 0x40009C, 0x0005ad00);
- nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
- nv_wr32(dev, 0x4000a0, 0x00000000);
- nv_wr32(dev, 0x4000a4, 0x00000008);
- nv_wr32(dev, 0x4008a8, 0xb784a400);
- nv_wr32(dev, 0x400ba0, 0x002f8685);
- nv_wr32(dev, 0x400ba4, 0x00231f3f);
- nv_wr32(dev, 0x4008a4, 0x40000020);
-
- if (dev_priv->chipset == 0x34) {
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
- }
+ struct nv20_graph_priv *priv = (void *)object;
+ nouveau_gpuobj_ref(NULL, &priv->ctxtab);
+ nouveau_graph_destroy(&priv->base);
+}
- nv_wr32(dev, 0x4000c0, 0x00000016);
+int
+nv20_graph_init(struct nouveau_object *object)
+{
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv20_graph_priv *priv = (void *)engine;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ u32 tmp, vramsz;
+ int ret, i;
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_tile_region(dev, i);
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(dev, 0x0040075c , 0x00000001);
+ nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
- /* begin RAM config */
- /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- if (dev_priv->chipset != 0x34) {
- nv_wr32(dev, 0x400750, 0x00EA0000);
- nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400750, 0x00EA0004);
- nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
+ if (nv_device(priv)->chipset == 0x20) {
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
+ for (i = 0; i < 15; i++)
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+ nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
+ } else {
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
+ for (i = 0; i < 32; i++)
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, 0x00000000);
+ nv_wait(priv, 0x400700, 0xffffffff, 0x00000000);
}
- return 0;
-}
+ nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-int
-nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv20_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nv_wr32(priv, 0x40009C , 0x00000040);
-static void
-nv20_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x01f00000) >> 20;
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- }
- }
+ if (nv_device(priv)->chipset >= 0x25) {
+ nv_wr32(priv, 0x400890, 0x00a8cfff);
+ nv_wr32(priv, 0x400610, 0x304B1FB6);
+ nv_wr32(priv, 0x400B80, 0x1cbd3883);
+ nv_wr32(priv, 0x400B84, 0x44000000);
+ nv_wr32(priv, 0x400098, 0x40000080);
+ nv_wr32(priv, 0x400B88, 0x000000ff);
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
+ } else {
+ nv_wr32(priv, 0x400880, 0x0008c7df);
+ nv_wr32(priv, 0x400094, 0x00000005);
+ nv_wr32(priv, 0x400B80, 0x45eae20e);
+ nv_wr32(priv, 0x400B84, 0x24000000);
+ nv_wr32(priv, 0x400098, 0x00000040);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000030);
}
-}
-
-static void
-nv20_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
- nouveau_irq_unregister(dev, 12);
- nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->tile.regions; i++)
+ engine->tile_prog(engine, i);
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
-}
+ nv_wr32(priv, 0x4009a0, nv_rd32(priv, 0x100324));
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA, nv_rd32(priv, 0x100324));
-int
-nv20_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv20_graph_engine *pgraph;
- int ret;
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv20_graph_destroy;
- pgraph->base.fini = nv20_graph_fini;
- pgraph->base.context_new = nv20_graph_context_new;
- pgraph->base.context_del = nv20_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
- pgraph->base.set_tile_region = nv20_graph_set_tile_region;
-
- pgraph->grctx_user = 0x0028;
- if (dev_priv->card_type == NV_20) {
- pgraph->base.init = nv20_graph_init;
- switch (dev_priv->chipset) {
- case 0x20:
- pgraph->grctx_init = nv20_graph_context_init;
- pgraph->grctx_size = NV20_GRCTX_SIZE;
- pgraph->grctx_user = 0x0000;
- break;
- case 0x25:
- case 0x28:
- pgraph->grctx_init = nv25_graph_context_init;
- pgraph->grctx_size = NV25_GRCTX_SIZE;
- break;
- case 0x2a:
- pgraph->grctx_init = nv2a_graph_context_init;
- pgraph->grctx_size = NV2A_GRCTX_SIZE;
- pgraph->grctx_user = 0x0000;
- break;
- default:
- NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
- return 0;
- }
- } else {
- pgraph->base.init = nv30_graph_init;
- switch (dev_priv->chipset) {
- case 0x30:
- case 0x31:
- pgraph->grctx_init = nv30_31_graph_context_init;
- pgraph->grctx_size = NV30_31_GRCTX_SIZE;
- break;
- case 0x34:
- pgraph->grctx_init = nv34_graph_context_init;
- pgraph->grctx_size = NV34_GRCTX_SIZE;
- break;
- case 0x35:
- case 0x36:
- pgraph->grctx_init = nv35_36_graph_context_init;
- pgraph->grctx_size = NV35_36_GRCTX_SIZE;
- break;
- default:
- NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
- return 0;
- }
- }
+ tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nv_rd32(priv, NV10_PGRAPH_SURFACE) | 0x00020100;
+ nv_wr32(priv, NV10_PGRAPH_SURFACE, tmp);
- /* Create Context Pointer Table */
- ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctxtab);
- if (ret) {
- kfree(pgraph);
- return ret;
- }
+ /* begin RAM config */
+ vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+ nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100200));
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , nv_rd32(priv, 0x100204));
+ nv_wr32(priv, 0x400820, 0);
+ nv_wr32(priv, 0x400824, 0);
+ nv_wr32(priv, 0x400864, vramsz - 1);
+ nv_wr32(priv, 0x400868, vramsz - 1);
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv20_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- if (dev_priv->card_type == NV_20) {
- NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
- NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
-
- /* kelvin */
- if (dev_priv->chipset < 0x25)
- NVOBJ_CLASS(dev, 0x0097, GR);
- else
- NVOBJ_CLASS(dev, 0x0597, GR);
- } else {
- NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
- NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
- NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
- NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
-
- /* rankine */
- if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0397, GR);
- else
- if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0697, GR);
- else
- if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0497, GR);
- }
+ /* interesting.. the below overwrites some of the tile setup above.. */
+ nv_wr32(priv, 0x400B20, 0x00000000);
+ nv_wr32(priv, 0x400B04, 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ nv_wr32(priv, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
return 0;
}
+
+struct nouveau_oclass
+nv20_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x20),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv20_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv20_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
new file mode 100644
index 000000000000..2bea7313e03f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.h
@@ -0,0 +1,31 @@
+#ifndef __NV20_GRAPH_H__
+#define __NV20_GRAPH_H__
+
+#include <core/enum.h>
+
+#include <engine/graph.h>
+#include <engine/fifo.h>
+
+struct nv20_graph_priv {
+ struct nouveau_graph base;
+ struct nouveau_gpuobj *ctxtab;
+};
+
+struct nv20_graph_chan {
+ struct nouveau_graph_chan base;
+ int chid;
+};
+
+extern struct nouveau_oclass nv25_graph_sclass[];
+int nv20_graph_context_init(struct nouveau_object *);
+int nv20_graph_context_fini(struct nouveau_object *, bool);
+
+void nv20_graph_tile_prog(struct nouveau_engine *, int);
+void nv20_graph_intr(struct nouveau_subdev *);
+
+void nv20_graph_dtor(struct nouveau_object *);
+int nv20_graph_init(struct nouveau_object *);
+
+int nv30_graph_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
new file mode 100644
index 000000000000..b2b650dd8b28
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv25.c
@@ -0,0 +1,167 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+struct nouveau_oclass
+nv25_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+ { 0x0096, &nv04_graph_ofuncs, NULL }, /* celcius */
+ { 0x009e, &nv04_graph_ofuncs, NULL }, /* swzsurf */
+ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+ { 0x0597, &nv04_graph_ofuncs, NULL }, /* kelvin */
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv25_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_chan *chan;
+ int ret, i;
+
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x3724,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->chid = nouveau_fifo_chan(parent)->chid;
+
+ nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x035c, 0xffff0000);
+ nv_wo32(chan, 0x03c0, 0x0fff0000);
+ nv_wo32(chan, 0x03c4, 0x0fff0000);
+ nv_wo32(chan, 0x049c, 0x00000101);
+ nv_wo32(chan, 0x04b0, 0x00000111);
+ nv_wo32(chan, 0x04c8, 0x00000080);
+ nv_wo32(chan, 0x04cc, 0xffff0000);
+ nv_wo32(chan, 0x04d0, 0x00000001);
+ nv_wo32(chan, 0x04e4, 0x44400000);
+ nv_wo32(chan, 0x04fc, 0x4b800000);
+ for (i = 0x0510; i <= 0x051c; i += 4)
+ nv_wo32(chan, i, 0x00030303);
+ for (i = 0x0530; i <= 0x053c; i += 4)
+ nv_wo32(chan, i, 0x00080000);
+ for (i = 0x0548; i <= 0x0554; i += 4)
+ nv_wo32(chan, i, 0x01012000);
+ for (i = 0x0558; i <= 0x0564; i += 4)
+ nv_wo32(chan, i, 0x000105b8);
+ for (i = 0x0568; i <= 0x0574; i += 4)
+ nv_wo32(chan, i, 0x00080008);
+ for (i = 0x0598; i <= 0x05d4; i += 4)
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x05e0, 0x4b7fffff);
+ nv_wo32(chan, 0x0620, 0x00000080);
+ nv_wo32(chan, 0x0624, 0x30201000);
+ nv_wo32(chan, 0x0628, 0x70605040);
+ nv_wo32(chan, 0x062c, 0xb0a09080);
+ nv_wo32(chan, 0x0630, 0xf0e0d0c0);
+ nv_wo32(chan, 0x0664, 0x00000001);
+ nv_wo32(chan, 0x066c, 0x00004000);
+ nv_wo32(chan, 0x0678, 0x00000001);
+ nv_wo32(chan, 0x0680, 0x00040000);
+ nv_wo32(chan, 0x0684, 0x00010000);
+ for (i = 0x1b04; i <= 0x2374; i += 16) {
+ nv_wo32(chan, (i + 0), 0x10700ff9);
+ nv_wo32(chan, (i + 4), 0x0436086c);
+ nv_wo32(chan, (i + 8), 0x000c001b);
+ }
+ nv_wo32(chan, 0x2704, 0x3f800000);
+ nv_wo32(chan, 0x2718, 0x3f800000);
+ nv_wo32(chan, 0x2744, 0x40000000);
+ nv_wo32(chan, 0x2748, 0x3f800000);
+ nv_wo32(chan, 0x274c, 0x3f000000);
+ nv_wo32(chan, 0x2754, 0x40000000);
+ nv_wo32(chan, 0x2758, 0x3f800000);
+ nv_wo32(chan, 0x2760, 0xbf800000);
+ nv_wo32(chan, 0x2768, 0xbf800000);
+ nv_wo32(chan, 0x308c, 0x000fe000);
+ nv_wo32(chan, 0x3108, 0x000003f8);
+ nv_wo32(chan, 0x3468, 0x002fe000);
+ for (i = 0x3484; i <= 0x34a0; i += 4)
+ nv_wo32(chan, i, 0x001c527c);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv25_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv25_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv25_graph_cclass;
+ nv_engine(priv)->sclass = nv25_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv25_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x25),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv25_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv20_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
new file mode 100644
index 000000000000..700462fa0ae0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv2a.c
@@ -0,0 +1,134 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv2a_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_chan *chan;
+ int ret, i;
+
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x36b0,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->chid = nouveau_fifo_chan(parent)->chid;
+
+ nv_wo32(chan, 0x0000, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x033c, 0xffff0000);
+ nv_wo32(chan, 0x03a0, 0x0fff0000);
+ nv_wo32(chan, 0x03a4, 0x0fff0000);
+ nv_wo32(chan, 0x047c, 0x00000101);
+ nv_wo32(chan, 0x0490, 0x00000111);
+ nv_wo32(chan, 0x04a8, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(chan, i, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(chan, i, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(chan, i, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(chan, i, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(chan, i, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x05a4, 0x4b7fffff);
+ nv_wo32(chan, 0x05fc, 0x00000001);
+ nv_wo32(chan, 0x0604, 0x00004000);
+ nv_wo32(chan, 0x0610, 0x00000001);
+ nv_wo32(chan, 0x0618, 0x00040000);
+ nv_wo32(chan, 0x061c, 0x00010000);
+ for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+ nv_wo32(chan, (i + 0), 0x10700ff9);
+ nv_wo32(chan, (i + 4), 0x0436086c);
+ nv_wo32(chan, (i + 8), 0x000c001b);
+ }
+ nv_wo32(chan, 0x269c, 0x3f800000);
+ nv_wo32(chan, 0x26b0, 0x3f800000);
+ nv_wo32(chan, 0x26dc, 0x40000000);
+ nv_wo32(chan, 0x26e0, 0x3f800000);
+ nv_wo32(chan, 0x26e4, 0x3f000000);
+ nv_wo32(chan, 0x26ec, 0x40000000);
+ nv_wo32(chan, 0x26f0, 0x3f800000);
+ nv_wo32(chan, 0x26f8, 0xbf800000);
+ nv_wo32(chan, 0x2700, 0xbf800000);
+ nv_wo32(chan, 0x3024, 0x000fe000);
+ nv_wo32(chan, 0x30a0, 0x000003f8);
+ nv_wo32(chan, 0x33fc, 0x002fe000);
+ for (i = 0x341c; i <= 0x3438; i += 4)
+ nv_wo32(chan, i, 0x001c527c);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv2a_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x2a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv2a_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv2a_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv2a_graph_cclass;
+ nv_engine(priv)->sclass = nv25_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv2a_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x2a),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv2a_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv20_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
new file mode 100644
index 000000000000..cedadaa92d3f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv30.c
@@ -0,0 +1,238 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv30_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0397, &nv04_graph_ofuncs, NULL }, /* rankine */
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv30_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_chan *chan;
+ int ret, i;
+
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x5f48,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->chid = nouveau_fifo_chan(parent)->chid;
+
+ nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x0410, 0x00000101);
+ nv_wo32(chan, 0x0424, 0x00000111);
+ nv_wo32(chan, 0x0428, 0x00000060);
+ nv_wo32(chan, 0x0444, 0x00000080);
+ nv_wo32(chan, 0x0448, 0xffff0000);
+ nv_wo32(chan, 0x044c, 0x00000001);
+ nv_wo32(chan, 0x0460, 0x44400000);
+ nv_wo32(chan, 0x048c, 0xffff0000);
+ for (i = 0x04e0; i < 0x04e8; i += 4)
+ nv_wo32(chan, i, 0x0fff0000);
+ nv_wo32(chan, 0x04ec, 0x00011100);
+ for (i = 0x0508; i < 0x0548; i += 4)
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x0550, 0x4b7fffff);
+ nv_wo32(chan, 0x058c, 0x00000080);
+ nv_wo32(chan, 0x0590, 0x30201000);
+ nv_wo32(chan, 0x0594, 0x70605040);
+ nv_wo32(chan, 0x0598, 0xb8a89888);
+ nv_wo32(chan, 0x059c, 0xf8e8d8c8);
+ nv_wo32(chan, 0x05b0, 0xb0000000);
+ for (i = 0x0600; i < 0x0640; i += 4)
+ nv_wo32(chan, i, 0x00010588);
+ for (i = 0x0640; i < 0x0680; i += 4)
+ nv_wo32(chan, i, 0x00030303);
+ for (i = 0x06c0; i < 0x0700; i += 4)
+ nv_wo32(chan, i, 0x0008aae4);
+ for (i = 0x0700; i < 0x0740; i += 4)
+ nv_wo32(chan, i, 0x01012000);
+ for (i = 0x0740; i < 0x0780; i += 4)
+ nv_wo32(chan, i, 0x00080008);
+ nv_wo32(chan, 0x085c, 0x00040000);
+ nv_wo32(chan, 0x0860, 0x00010000);
+ for (i = 0x0864; i < 0x0874; i += 4)
+ nv_wo32(chan, i, 0x00040004);
+ for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ nv_wo32(chan, i + 0, 0x10700ff9);
+ nv_wo32(chan, i + 1, 0x0436086c);
+ nv_wo32(chan, i + 2, 0x000c001b);
+ }
+ for (i = 0x30b8; i < 0x30c8; i += 4)
+ nv_wo32(chan, i, 0x0000ffff);
+ nv_wo32(chan, 0x344c, 0x3f800000);
+ nv_wo32(chan, 0x3808, 0x3f800000);
+ nv_wo32(chan, 0x381c, 0x3f800000);
+ nv_wo32(chan, 0x3848, 0x40000000);
+ nv_wo32(chan, 0x384c, 0x3f800000);
+ nv_wo32(chan, 0x3850, 0x3f000000);
+ nv_wo32(chan, 0x3858, 0x40000000);
+ nv_wo32(chan, 0x385c, 0x3f800000);
+ nv_wo32(chan, 0x3864, 0xbf800000);
+ nv_wo32(chan, 0x386c, 0xbf800000);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv30_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x30),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv30_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv30_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv30_graph_cclass;
+ nv_engine(priv)->sclass = nv30_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ return 0;
+}
+
+int
+nv30_graph_init(struct nouveau_object *object)
+{
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv20_graph_priv *priv = (void *)engine;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ int ret, i;
+
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, NV20_PGRAPH_CHANNEL_CTX_TABLE, priv->ctxtab->addr >> 4);
+
+ nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(priv, 0x400890, 0x01b463ff);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ nv_wr32(priv, 0x400B80, 0x1003d888);
+ nv_wr32(priv, 0x400B84, 0x0c000000);
+ nv_wr32(priv, 0x400098, 0x00000000);
+ nv_wr32(priv, 0x40009C, 0x0005ad00);
+ nv_wr32(priv, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+ nv_wr32(priv, 0x4000a0, 0x00000000);
+ nv_wr32(priv, 0x4000a4, 0x00000008);
+ nv_wr32(priv, 0x4008a8, 0xb784a400);
+ nv_wr32(priv, 0x400ba0, 0x002f8685);
+ nv_wr32(priv, 0x400ba4, 0x00231f3f);
+ nv_wr32(priv, 0x4008a4, 0x40000020);
+
+ if (nv_device(priv)->chipset == 0x34) {
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00200201);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000008);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000032);
+ nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ nv_wr32(priv, NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
+ nv_wr32(priv, 0x4000c0, 0x00000016);
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->tile.regions; i++)
+ engine->tile_prog(engine, i);
+
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(priv, 0x0040075c , 0x00000001);
+
+ /* begin RAM config */
+ /* vramsz = pci_resource_len(priv->dev->pdev, 0) - 1; */
+ nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+ if (nv_device(priv)->chipset != 0x34) {
+ nv_wr32(priv, 0x400750, 0x00EA0000);
+ nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x400750, 0x00EA0004);
+ nv_wr32(priv, 0x400754, nv_rd32(priv, 0x100204));
+ }
+ return 0;
+}
+
+struct nouveau_oclass
+nv30_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x30),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv30_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv30_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
new file mode 100644
index 000000000000..273f6320027b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv34.c
@@ -0,0 +1,168 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include <engine/graph.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv34_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0697, &nv04_graph_ofuncs, NULL }, /* rankine */
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv34_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_chan *chan;
+ int ret, i;
+
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x46dc,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->chid = nouveau_fifo_chan(parent)->chid;
+
+ nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x040c, 0x01000101);
+ nv_wo32(chan, 0x0420, 0x00000111);
+ nv_wo32(chan, 0x0424, 0x00000060);
+ nv_wo32(chan, 0x0440, 0x00000080);
+ nv_wo32(chan, 0x0444, 0xffff0000);
+ nv_wo32(chan, 0x0448, 0x00000001);
+ nv_wo32(chan, 0x045c, 0x44400000);
+ nv_wo32(chan, 0x0480, 0xffff0000);
+ for (i = 0x04d4; i < 0x04dc; i += 4)
+ nv_wo32(chan, i, 0x0fff0000);
+ nv_wo32(chan, 0x04e0, 0x00011100);
+ for (i = 0x04fc; i < 0x053c; i += 4)
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x0544, 0x4b7fffff);
+ nv_wo32(chan, 0x057c, 0x00000080);
+ nv_wo32(chan, 0x0580, 0x30201000);
+ nv_wo32(chan, 0x0584, 0x70605040);
+ nv_wo32(chan, 0x0588, 0xb8a89888);
+ nv_wo32(chan, 0x058c, 0xf8e8d8c8);
+ nv_wo32(chan, 0x05a0, 0xb0000000);
+ for (i = 0x05f0; i < 0x0630; i += 4)
+ nv_wo32(chan, i, 0x00010588);
+ for (i = 0x0630; i < 0x0670; i += 4)
+ nv_wo32(chan, i, 0x00030303);
+ for (i = 0x06b0; i < 0x06f0; i += 4)
+ nv_wo32(chan, i, 0x0008aae4);
+ for (i = 0x06f0; i < 0x0730; i += 4)
+ nv_wo32(chan, i, 0x01012000);
+ for (i = 0x0730; i < 0x0770; i += 4)
+ nv_wo32(chan, i, 0x00080008);
+ nv_wo32(chan, 0x0850, 0x00040000);
+ nv_wo32(chan, 0x0854, 0x00010000);
+ for (i = 0x0858; i < 0x0868; i += 4)
+ nv_wo32(chan, i, 0x00040004);
+ for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ nv_wo32(chan, i + 0, 0x10700ff9);
+ nv_wo32(chan, i + 1, 0x0436086c);
+ nv_wo32(chan, i + 2, 0x000c001b);
+ }
+ for (i = 0x274c; i < 0x275c; i += 4)
+ nv_wo32(chan, i, 0x0000ffff);
+ nv_wo32(chan, 0x2ae0, 0x3f800000);
+ nv_wo32(chan, 0x2e9c, 0x3f800000);
+ nv_wo32(chan, 0x2eb0, 0x3f800000);
+ nv_wo32(chan, 0x2edc, 0x40000000);
+ nv_wo32(chan, 0x2ee0, 0x3f800000);
+ nv_wo32(chan, 0x2ee4, 0x3f000000);
+ nv_wo32(chan, 0x2eec, 0x40000000);
+ nv_wo32(chan, 0x2ef0, 0x3f800000);
+ nv_wo32(chan, 0x2ef8, 0xbf800000);
+ nv_wo32(chan, 0x2f00, 0xbf800000);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv34_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x34),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv34_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv34_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv34_graph_cclass;
+ nv_engine(priv)->sclass = nv34_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv34_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x34),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv34_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv30_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
new file mode 100644
index 000000000000..f40ee2116ee1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv35.c
@@ -0,0 +1,166 @@
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+#include <subdev/timer.h>
+#include <subdev/fb.h>
+
+#include "nv20.h"
+#include "regs.h"
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv35_graph_sclass[] = {
+ { 0x0012, &nv04_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv04_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv04_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv04_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv04_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv04_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv04_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv04_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv04_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv04_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv04_graph_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv04_graph_ofuncs, NULL }, /* imageblit */
+ { 0x0362, &nv04_graph_ofuncs, NULL }, /* surf2d (nv30) */
+ { 0x0389, &nv04_graph_ofuncs, NULL }, /* sifm (nv30) */
+ { 0x038a, &nv04_graph_ofuncs, NULL }, /* ifc (nv30) */
+ { 0x039e, &nv04_graph_ofuncs, NULL }, /* swzsurf (nv30) */
+ { 0x0497, &nv04_graph_ofuncs, NULL }, /* rankine */
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static int
+nv35_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_chan *chan;
+ int ret, i;
+
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL, 0x577c,
+ 16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->chid = nouveau_fifo_chan(parent)->chid;
+
+ nv_wo32(chan, 0x0028, 0x00000001 | (chan->chid << 24));
+ nv_wo32(chan, 0x040c, 0x00000101);
+ nv_wo32(chan, 0x0420, 0x00000111);
+ nv_wo32(chan, 0x0424, 0x00000060);
+ nv_wo32(chan, 0x0440, 0x00000080);
+ nv_wo32(chan, 0x0444, 0xffff0000);
+ nv_wo32(chan, 0x0448, 0x00000001);
+ nv_wo32(chan, 0x045c, 0x44400000);
+ nv_wo32(chan, 0x0488, 0xffff0000);
+ for (i = 0x04dc; i < 0x04e4; i += 4)
+ nv_wo32(chan, i, 0x0fff0000);
+ nv_wo32(chan, 0x04e8, 0x00011100);
+ for (i = 0x0504; i < 0x0544; i += 4)
+ nv_wo32(chan, i, 0x07ff0000);
+ nv_wo32(chan, 0x054c, 0x4b7fffff);
+ nv_wo32(chan, 0x0588, 0x00000080);
+ nv_wo32(chan, 0x058c, 0x30201000);
+ nv_wo32(chan, 0x0590, 0x70605040);
+ nv_wo32(chan, 0x0594, 0xb8a89888);
+ nv_wo32(chan, 0x0598, 0xf8e8d8c8);
+ nv_wo32(chan, 0x05ac, 0xb0000000);
+ for (i = 0x0604; i < 0x0644; i += 4)
+ nv_wo32(chan, i, 0x00010588);
+ for (i = 0x0644; i < 0x0684; i += 4)
+ nv_wo32(chan, i, 0x00030303);
+ for (i = 0x06c4; i < 0x0704; i += 4)
+ nv_wo32(chan, i, 0x0008aae4);
+ for (i = 0x0704; i < 0x0744; i += 4)
+ nv_wo32(chan, i, 0x01012000);
+ for (i = 0x0744; i < 0x0784; i += 4)
+ nv_wo32(chan, i, 0x00080008);
+ nv_wo32(chan, 0x0860, 0x00040000);
+ nv_wo32(chan, 0x0864, 0x00010000);
+ for (i = 0x0868; i < 0x0878; i += 4)
+ nv_wo32(chan, i, 0x00040004);
+ for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+ nv_wo32(chan, i + 0, 0x10700ff9);
+ nv_wo32(chan, i + 4, 0x0436086c);
+ nv_wo32(chan, i + 8, 0x000c001b);
+ }
+ for (i = 0x30bc; i < 0x30cc; i += 4)
+ nv_wo32(chan, i, 0x0000ffff);
+ nv_wo32(chan, 0x3450, 0x3f800000);
+ nv_wo32(chan, 0x380c, 0x3f800000);
+ nv_wo32(chan, 0x3820, 0x3f800000);
+ nv_wo32(chan, 0x384c, 0x40000000);
+ nv_wo32(chan, 0x3850, 0x3f800000);
+ nv_wo32(chan, 0x3854, 0x3f000000);
+ nv_wo32(chan, 0x385c, 0x40000000);
+ nv_wo32(chan, 0x3860, 0x3f800000);
+ nv_wo32(chan, 0x3868, 0xbf800000);
+ nv_wo32(chan, 0x3870, 0xbf800000);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv35_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = nv20_graph_context_init,
+ .fini = nv20_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv35_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv20_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 32 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv20_graph_intr;
+ nv_engine(priv)->cclass = &nv35_graph_cclass;
+ nv_engine(priv)->sclass = nv35_graph_sclass;
+ nv_engine(priv)->tile_prog = nv20_graph_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv35_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x35),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv35_graph_ctor,
+ .dtor = nv20_graph_dtor,
+ .init = nv30_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 466d21514b2c..2f9f2c69d1e3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -1,151 +1,238 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+
+#include <engine/graph.h>
#include <engine/fifo.h>
-#include <core/ramht.h>
-struct nv40_graph_engine {
- struct nouveau_exec_engine base;
- u32 grctx_size;
+#include "nv40.h"
+#include "regs.h"
+
+struct nv40_graph_priv {
+ struct nouveau_graph base;
+ u32 size;
};
+struct nv40_graph_chan {
+ struct nouveau_graph_chan base;
+};
+
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
static int
-nv40_graph_context_new(struct nouveau_channel *chan, int engine)
+nv40_graph_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx = NULL;
- unsigned long flags;
+ struct nouveau_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &grctx);
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 20, 16, 0, &obj);
+ *pobject = nv_object(obj);
if (ret)
return ret;
- /* Initialise default context values */
- nv40_grctx_fill(dev, grctx);
- nv_wo32(grctx, 0, grctx->addr);
-
- /* init grctx pointer in ramfc, and on PFIFO if channel is
- * already active there
- */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wo32(chan->ramfc, 0x38, grctx->addr >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
- nv_wr32(dev, 0x0032e0, grctx->addr >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- chan->engctx[engine] = grctx;
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+#ifdef __BIG_ENDIAN
+ nv_mo32(obj, 0x08, 0x01000000, 0x01000000);
+#endif
+ nv_wo32(obj, 0x0c, 0x00000000);
+ nv_wo32(obj, 0x10, 0x00000000);
return 0;
}
-static void
-nv40_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 inst = 0x01000000 | (grctx->addr >> 4);
- unsigned long flags;
+struct nouveau_ofuncs
+nv40_graph_ofuncs = {
+ .ctor = nv40_graph_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
- if (nv_rd32(dev, 0x40032c) == inst)
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- if (nv_rd32(dev, 0x400330) == inst)
- nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
- nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
-}
+static struct nouveau_oclass
+nv40_graph_sclass[] = {
+ { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+ { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+ { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+ { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+ { 0x4097, &nv40_graph_ofuncs, NULL }, /* curie */
+ {},
+};
+
+static struct nouveau_oclass
+nv44_graph_sclass[] = {
+ { 0x0012, &nv40_graph_ofuncs, NULL }, /* beta1 */
+ { 0x0019, &nv40_graph_ofuncs, NULL }, /* clip */
+ { 0x0030, &nv40_graph_ofuncs, NULL }, /* null */
+ { 0x0039, &nv40_graph_ofuncs, NULL }, /* m2mf */
+ { 0x0043, &nv40_graph_ofuncs, NULL }, /* rop */
+ { 0x0044, &nv40_graph_ofuncs, NULL }, /* patt */
+ { 0x004a, &nv40_graph_ofuncs, NULL }, /* gdi */
+ { 0x0062, &nv40_graph_ofuncs, NULL }, /* surf2d */
+ { 0x0072, &nv40_graph_ofuncs, NULL }, /* beta4 */
+ { 0x0089, &nv40_graph_ofuncs, NULL }, /* sifm */
+ { 0x008a, &nv40_graph_ofuncs, NULL }, /* ifc */
+ { 0x009f, &nv40_graph_ofuncs, NULL }, /* imageblit */
+ { 0x3062, &nv40_graph_ofuncs, NULL }, /* surf2d (nv40) */
+ { 0x3089, &nv40_graph_ofuncs, NULL }, /* sifm (nv40) */
+ { 0x309e, &nv40_graph_ofuncs, NULL }, /* swzsurf (nv40) */
+ { 0x4497, &nv40_graph_ofuncs, NULL }, /* curie */
+ {},
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
-int
-nv40_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+static int
+nv40_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
+ struct nv40_graph_priv *priv = (void *)engine;
+ struct nv40_graph_chan *chan;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+ priv->size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
if (ret)
return ret;
- obj->engine = 1;
- obj->class = class;
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
-#ifndef __BIG_ENDIAN
- nv_wo32(obj, 0x08, 0x00000000);
-#else
- nv_wo32(obj, 0x08, 0x01000000);
-#endif
- nv_wo32(obj, 0x0c, 0x00000000);
- nv_wo32(obj, 0x10, 0x00000000);
+ nv40_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+ nv_wo32(chan, 0x00000, nv_gpuobj(chan)->addr >> 4);
+ return 0;
+}
+
+static int
+nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv04_graph_priv *priv = (void *)object->engine;
+ struct nv04_graph_chan *chan = (void *)object;
+ u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
+ int ret = 0;
+
+ nv_mask(priv, 0x400720, 0x00000001, 0x00000000);
+
+ if (nv_rd32(priv, 0x40032c) == inst) {
+ if (suspend) {
+ nv_wr32(priv, 0x400720, 0x00000000);
+ nv_wr32(priv, 0x400784, inst);
+ nv_mask(priv, 0x400310, 0x00000020, 0x00000020);
+ nv_mask(priv, 0x400304, 0x00000001, 0x00000001);
+ if (!nv_wait(priv, 0x400300, 0x00000001, 0x00000000)) {
+ u32 insn = nv_rd32(priv, 0x400308);
+ nv_warn(priv, "ctxprog timeout 0x%08x\n", insn);
+ ret = -EBUSY;
+ }
+ }
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
+ nv_mask(priv, 0x40032c, 0x01000000, 0x00000000);
+ }
+
+ if (nv_rd32(priv, 0x400330) == inst)
+ nv_mask(priv, 0x400330, 0x01000000, 0x00000000);
+
+ nv_mask(priv, 0x400720, 0x00000001, 0x00000001);
return ret;
}
+static struct nouveau_oclass
+nv40_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = nv40_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
static void
-nv40_graph_set_tile_region(struct drm_device *dev, int i)
+nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
+ struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+ struct nouveau_fifo *pfifo = nouveau_fifo(engine);
+ struct nv40_graph_priv *priv = (void *)engine;
+ unsigned long flags;
+
+ pfifo->pause(pfifo, &flags);
+ nv04_graph_idle(priv);
- switch (dev_priv->chipset) {
+ switch (nv_device(priv)->chipset) {
case 0x40:
case 0x41: /* guess */
case 0x42:
case 0x43:
case 0x45: /* guess */
case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
break;
case 0x44:
case 0x4a:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
break;
case 0x46:
case 0x47:
@@ -154,149 +241,213 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
case 0x4c:
case 0x67:
default:
- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
break;
}
+
+ pfifo->start(pfifo, &flags);
}
-/*
- * G70 0x47
- * G71 0x49
- * NV45 0x48
- * G72[M] 0x46
- * G73 0x4b
- * C51_G7X 0x4c
- * C51 0x4e
- */
-int
-nv40_graph_init(struct drm_device *dev, int engine)
+static void
+nv40_graph_intr(struct nouveau_subdev *subdev)
+{
+ struct nv40_graph_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u32 stat = nv_rd32(priv, NV03_PGRAPH_INTR);
+ u32 nsource = nv_rd32(priv, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(priv, NV03_PGRAPH_NSTATUS);
+ u32 inst = (nv_rd32(priv, 0x40032c) & 0x000fffff) << 4;
+ u32 addr = nv_rd32(priv, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(priv, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(priv, 0x400160 + subc * 4) & 0xffff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ handle = nouveau_engctx_lookup_class(engine, inst, class);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ nouveau_engctx_handle_put(handle);
+ }
+
+ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ nv_mask(priv, 0x402000, 0, 0);
+ }
+ }
+
+ nv_wr32(priv, NV03_PGRAPH_INTR, stat);
+ nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show) {
+ nv_info(priv, "");
+ nouveau_bitfield_print(nv10_graph_intr_name, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ nv_error(priv, "ch 0x%08x subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ }
+}
+
+static int
+nv40_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t vramsz;
- int i, j;
+ struct nv40_graph_priv *priv;
+ int ret;
+
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
+ nv_subdev(priv)->unit = 0x00001000;
+ nv_subdev(priv)->intr = nv40_graph_intr;
+ nv_engine(priv)->cclass = &nv40_graph_cclass;
+ if (nv44_graph_class(priv))
+ nv_engine(priv)->sclass = nv44_graph_sclass;
+ else
+ nv_engine(priv)->sclass = nv40_graph_sclass;
+ nv_engine(priv)->tile_prog = nv40_graph_tile_prog;
+ return 0;
+}
+
+static int
+nv40_graph_init(struct nouveau_object *object)
+{
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nv40_graph_priv *priv = (void *)engine;
+ int ret, i, j;
+ u32 vramsz;
+
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
/* generate and upload context program */
- nv40_grctx_init(dev, &pgraph->grctx_size);
+ nv40_grctx_init(nv_device(priv), &priv->size);
/* No context present currently */
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+ nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+ nv_wr32(priv, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(priv, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(priv, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ nv_wr32(priv, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(priv, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(priv, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(priv, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- j = nv_rd32(dev, 0x1540) & 0xff;
+ j = nv_rd32(priv, 0x1540) & 0xff;
if (j) {
for (i = 0; !(j & 1); j >>= 1, i++)
;
- nv_wr32(dev, 0x405000, i);
+ nv_wr32(priv, 0x405000, i);
}
- if (dev_priv->chipset == 0x40) {
- nv_wr32(dev, 0x4009b0, 0x83280fff);
- nv_wr32(dev, 0x4009b4, 0x000000a0);
+ if (nv_device(priv)->chipset == 0x40) {
+ nv_wr32(priv, 0x4009b0, 0x83280fff);
+ nv_wr32(priv, 0x4009b4, 0x000000a0);
} else {
- nv_wr32(dev, 0x400820, 0x83280eff);
- nv_wr32(dev, 0x400824, 0x000000a0);
+ nv_wr32(priv, 0x400820, 0x83280eff);
+ nv_wr32(priv, 0x400824, 0x000000a0);
}
- switch (dev_priv->chipset) {
+ switch (nv_device(priv)->chipset) {
case 0x40:
case 0x45:
- nv_wr32(dev, 0x4009b8, 0x0078e366);
- nv_wr32(dev, 0x4009bc, 0x0000014c);
+ nv_wr32(priv, 0x4009b8, 0x0078e366);
+ nv_wr32(priv, 0x4009bc, 0x0000014c);
break;
case 0x41:
case 0x42: /* pciid also 0x00Cx */
/* case 0x0120: XXX (pciid) */
- nv_wr32(dev, 0x400828, 0x007596ff);
- nv_wr32(dev, 0x40082c, 0x00000108);
+ nv_wr32(priv, 0x400828, 0x007596ff);
+ nv_wr32(priv, 0x40082c, 0x00000108);
break;
case 0x43:
- nv_wr32(dev, 0x400828, 0x0072cb77);
- nv_wr32(dev, 0x40082c, 0x00000108);
+ nv_wr32(priv, 0x400828, 0x0072cb77);
+ nv_wr32(priv, 0x40082c, 0x00000108);
break;
case 0x44:
case 0x46: /* G72 */
case 0x4a:
case 0x4c: /* G7x-based C51 */
case 0x4e:
- nv_wr32(dev, 0x400860, 0);
- nv_wr32(dev, 0x400864, 0);
+ nv_wr32(priv, 0x400860, 0);
+ nv_wr32(priv, 0x400864, 0);
break;
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
- nv_wr32(dev, 0x400828, 0x07830610);
- nv_wr32(dev, 0x40082c, 0x0000016A);
+ nv_wr32(priv, 0x400828, 0x07830610);
+ nv_wr32(priv, 0x40082c, 0x0000016A);
break;
default:
break;
}
- nv_wr32(dev, 0x400b38, 0x2ffff800);
- nv_wr32(dev, 0x400b3c, 0x00006000);
+ nv_wr32(priv, 0x400b38, 0x2ffff800);
+ nv_wr32(priv, 0x400b3c, 0x00006000);
/* Tiling related stuff. */
- switch (dev_priv->chipset) {
+ switch (nv_device(priv)->chipset) {
case 0x44:
case 0x4a:
- nv_wr32(dev, 0x400bc4, 0x1003d888);
- nv_wr32(dev, 0x400bbc, 0xb7a7b500);
+ nv_wr32(priv, 0x400bc4, 0x1003d888);
+ nv_wr32(priv, 0x400bbc, 0xb7a7b500);
break;
case 0x46:
- nv_wr32(dev, 0x400bc4, 0x0000e024);
- nv_wr32(dev, 0x400bbc, 0xb7a7b520);
+ nv_wr32(priv, 0x400bc4, 0x0000e024);
+ nv_wr32(priv, 0x400bbc, 0xb7a7b520);
break;
case 0x4c:
case 0x4e:
case 0x67:
- nv_wr32(dev, 0x400bc4, 0x1003d888);
- nv_wr32(dev, 0x400bbc, 0xb7a7b540);
+ nv_wr32(priv, 0x400bc4, 0x1003d888);
+ nv_wr32(priv, 0x400bbc, 0xb7a7b540);
break;
default:
break;
}
/* Turn all the tiling regions off. */
- for (i = 0; i < nvfb_tile_nr(dev); i++)
- nv40_graph_set_tile_region(dev, i);
+ for (i = 0; i < pfb->tile.regions; i++)
+ engine->tile_prog(engine, i);
/* begin RAM config */
- vramsz = pci_resource_len(dev->pdev, 0) - 1;
- switch (dev_priv->chipset) {
+ vramsz = pci_resource_len(nv_device(priv)->pdev, 0) - 1;
+ switch (nv_device(priv)->chipset) {
case 0x40:
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400820, 0);
- nv_wr32(dev, 0x400824, 0);
- nv_wr32(dev, 0x400864, vramsz);
- nv_wr32(dev, 0x400868, vramsz);
+ nv_wr32(priv, 0x4009A4, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4009A8, nv_rd32(priv, 0x100204));
+ nv_wr32(priv, 0x4069A4, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4069A8, nv_rd32(priv, 0x100204));
+ nv_wr32(priv, 0x400820, 0);
+ nv_wr32(priv, 0x400824, 0);
+ nv_wr32(priv, 0x400864, vramsz);
+ nv_wr32(priv, 0x400868, vramsz);
break;
default:
- switch (dev_priv->chipset) {
+ switch (nv_device(priv)->chipset) {
case 0x41:
case 0x42:
case 0x43:
@@ -304,163 +455,33 @@ nv40_graph_init(struct drm_device *dev, int engine)
case 0x4e:
case 0x44:
case 0x4a:
- nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(priv, 0x4009F0, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4009F4, nv_rd32(priv, 0x100204));
break;
default:
- nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(priv, 0x400DF0, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x400DF4, nv_rd32(priv, 0x100204));
break;
}
- nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400840, 0);
- nv_wr32(dev, 0x400844, 0);
- nv_wr32(dev, 0x4008A0, vramsz);
- nv_wr32(dev, 0x4008A4, vramsz);
+ nv_wr32(priv, 0x4069F0, nv_rd32(priv, 0x100200));
+ nv_wr32(priv, 0x4069F4, nv_rd32(priv, 0x100204));
+ nv_wr32(priv, 0x400840, 0);
+ nv_wr32(priv, 0x400844, 0);
+ nv_wr32(priv, 0x4008A0, vramsz);
+ nv_wr32(priv, 0x4008A4, vramsz);
break;
}
return 0;
}
-static int
-nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- u32 inst = nv_rd32(dev, 0x40032c);
- if (inst & 0x01000000) {
- nv_wr32(dev, 0x400720, 0x00000000);
- nv_wr32(dev, 0x400784, inst);
- nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
- nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
- u32 insn = nv_rd32(dev, 0x400308);
- NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
- }
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- }
- return 0;
-}
-
-static int
-nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
- grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
-
- if (grctx && grctx->addr == inst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nv40_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
- u32 chid = nv40_graph_isr_chid(dev, inst);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- } else
- if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- nv_mask(dev, 0x402000, 0, 0);
- }
- }
-
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- }
-}
-
-static void
-nv40_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 12);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
-}
-
-int
-nv40_graph_create(struct drm_device *dev)
-{
- struct nv40_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv40_graph_destroy;
- pgraph->base.init = nv40_graph_init;
- pgraph->base.fini = nv40_graph_fini;
- pgraph->base.context_new = nv40_graph_context_new;
- pgraph->base.context_del = nv40_graph_context_del;
- pgraph->base.object_new = nv40_graph_object_new;
- pgraph->base.set_tile_region = nv40_graph_set_tile_region;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv40_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
-
- /* curie */
- if (nv44_graph_class(dev))
- NVOBJ_CLASS(dev, 0x4497, GR);
- else
- NVOBJ_CLASS(dev, 0x4097, GR);
-
- return 0;
-}
+struct nouveau_oclass
+nv40_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_graph_ctor,
+ .dtor = _nouveau_graph_dtor,
+ .init = nv40_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
new file mode 100644
index 000000000000..d2ac975afc2e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -0,0 +1,21 @@
+#ifndef __NV40_GRAPH_H__
+#define __NV40_GRAPH_H__
+
+/* returns 1 if device is one of the nv4x using the 0x4497 object class,
+ * helpful to determine a number of other hardware features
+ */
+static inline int
+nv44_graph_class(void *priv)
+{
+ struct nouveau_device *device = nv_device(priv);
+
+ if ((device->chipset & 0xf0) == 0x60)
+ return 1;
+
+ return !(0x0baf & (1 << (device->chipset & 0x0f)));
+}
+
+void nv40_grctx_init(struct nouveau_device *, u32 *size);
+void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 28932c4662e9..8955bdd3551c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -1,266 +1,234 @@
/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
+ * Copyright 2012 Red Hat Inc.
*
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include <core/ramht.h>
-#include "nouveau_dma.h"
-#include "nv50_evo.h"
-
-struct nv50_graph_engine {
- struct nouveau_exec_engine base;
- u32 ctxprog[512];
- u32 ctxprog_size;
- u32 grctx_size;
-};
-
-static int
-nv50_graph_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- u32 units = nv_rd32(dev, 0x001540);
- int i;
+#include <core/os.h>
+#include <core/class.h>
+#include <core/handle.h>
+#include <core/engctx.h>
+#include <core/enum.h>
- NV_DEBUG(dev, "\n");
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/timer.h>
- /* master reset */
- nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
- nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
-
- /* reset/enable traps and interrupts */
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x402000, 0xc0000000);
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
+#include <engine/graph.h>
- if (dev_priv->chipset < 0xa0) {
- nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
- } else {
- nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
- }
- }
+#include "nv50.h"
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
- nv_wr32(dev, 0x400500, 0x00010001);
+struct nv50_graph_priv {
+ struct nouveau_graph base;
+ spinlock_t lock;
+ u32 size;
+};
- /* upload context program, initialise ctxctl defaults */
- nv_wr32(dev, 0x400324, 0x00000000);
- for (i = 0; i < pgraph->ctxprog_size; i++)
- nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
- nv_wr32(dev, 0x400824, 0x00000000);
- nv_wr32(dev, 0x400828, 0x00000000);
- nv_wr32(dev, 0x40082c, 0x00000000);
- nv_wr32(dev, 0x400830, 0x00000000);
- nv_wr32(dev, 0x400724, 0x00000000);
- nv_wr32(dev, 0x40032c, 0x00000000);
- nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+struct nv50_graph_chan {
+ struct nouveau_graph_chan base;
+};
- /* some unknown zcull magic */
- switch (dev_priv->chipset & 0xf0) {
- case 0x50:
- case 0x80:
- case 0x90:
- nv_wr32(dev, 0x402ca8, 0x00000800);
- break;
- case 0xa0:
- default:
- nv_wr32(dev, 0x402cc0, 0x00000000);
- if (dev_priv->chipset == 0xa0 ||
- dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac) {
- nv_wr32(dev, 0x402ca8, 0x00000802);
- } else {
- nv_wr32(dev, 0x402cc0, 0x00000000);
- nv_wr32(dev, 0x402ca8, 0x00000002);
- }
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
- break;
- }
+static int
+nv50_graph_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nouveau_gpuobj *obj;
+ int ret;
- /* zero out zcull regions */
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
- }
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
+ if (ret)
+ return ret;
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
-static int
-nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_wr32(dev, 0x40013c, 0x00000000);
- return 0;
-}
+struct nouveau_ofuncs
+nv50_graph_ofuncs = {
+ .ctor = nv50_graph_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
-static int
-nv50_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *grctx = NULL;
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- int hdr, ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &grctx);
- if (ret)
- return ret;
+static struct nouveau_oclass
+nv50_graph_sclass[] = {
+ { 0x0030, &nv50_graph_ofuncs },
+ { 0x502d, &nv50_graph_ofuncs },
+ { 0x5039, &nv50_graph_ofuncs },
+ { 0x5097, &nv50_graph_ofuncs },
+ { 0x50c0, &nv50_graph_ofuncs },
+ {}
+};
- hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- nv_wo32(ramin, hdr + 0x00, 0x00190002);
- nv_wo32(ramin, hdr + 0x04, grctx->addr + grctx->size - 1);
- nv_wo32(ramin, hdr + 0x08, grctx->addr);
- nv_wo32(ramin, hdr + 0x0c, 0);
- nv_wo32(ramin, hdr + 0x10, 0);
- nv_wo32(ramin, hdr + 0x14, 0x00010000);
+static struct nouveau_oclass
+nv84_graph_sclass[] = {
+ { 0x0030, &nv50_graph_ofuncs },
+ { 0x502d, &nv50_graph_ofuncs },
+ { 0x5039, &nv50_graph_ofuncs },
+ { 0x50c0, &nv50_graph_ofuncs },
+ { 0x8297, &nv50_graph_ofuncs },
+ {}
+};
- nv50_grctx_fill(dev, grctx);
- nv_wo32(grctx, 0x00000, chan->ramin->addr >> 12);
+static struct nouveau_oclass
+nva0_graph_sclass[] = {
+ { 0x0030, &nv50_graph_ofuncs },
+ { 0x502d, &nv50_graph_ofuncs },
+ { 0x5039, &nv50_graph_ofuncs },
+ { 0x50c0, &nv50_graph_ofuncs },
+ { 0x8397, &nv50_graph_ofuncs },
+ {}
+};
- nvimem_flush(dev);
+static struct nouveau_oclass
+nva3_graph_sclass[] = {
+ { 0x0030, &nv50_graph_ofuncs },
+ { 0x502d, &nv50_graph_ofuncs },
+ { 0x5039, &nv50_graph_ofuncs },
+ { 0x50c0, &nv50_graph_ofuncs },
+ { 0x8597, &nv50_graph_ofuncs },
+ { 0x85c0, &nv50_graph_ofuncs },
+ {}
+};
- nvvm_engref(chan->vm, engine, 1);
- chan->engctx[NVOBJ_ENGINE_GR] = grctx;
- return 0;
-}
+static struct nouveau_oclass
+nvaf_graph_sclass[] = {
+ { 0x0030, &nv50_graph_ofuncs },
+ { 0x502d, &nv50_graph_ofuncs },
+ { 0x5039, &nv50_graph_ofuncs },
+ { 0x50c0, &nv50_graph_ofuncs },
+ { 0x85c0, &nv50_graph_ofuncs },
+ { 0x8697, &nv50_graph_ofuncs },
+ {}
+};
-static void
-nv50_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
-
- for (i = hdr; i < hdr + 24; i += 4)
- nv_wo32(chan->ramin, i, 0);
- nvimem_flush(dev);
-
- nvvm_engref(chan->vm, engine, -1);
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
-}
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
static int
-nv50_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+nv50_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
+ struct nv50_graph_priv *priv = (void *)engine;
+ struct nv50_graph_chan *chan;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+ priv->size, 0,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
if (ret)
return ret;
- obj->engine = 1;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- nvimem_flush(dev);
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ nv50_grctx_fill(nv_device(priv), nv_gpuobj(chan));
+ return 0;
}
-static void
-nv50_graph_tlb_flush(struct drm_device *dev, int engine)
+static struct nouveau_oclass
+nv50_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_graph_context_ctor,
+ .dtor = _nouveau_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_graph_tlb_flush(struct nouveau_engine *engine)
{
- nv50_vm_flush_engine(dev, 0);
+ nv50_vm_flush_engine(&engine->base, 0x00);
+ return 0;
}
-static void
-nv84_graph_tlb_flush(struct drm_device *dev, int engine)
+static int
+nv84_graph_tlb_flush(struct nouveau_engine *engine)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer *ptimer = nouveau_timer(engine);
+ struct nv50_graph_priv *priv = (void *)engine;
bool idle, timeout = false;
unsigned long flags;
u64 start;
u32 tmp;
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
+ spin_lock_irqsave(&priv->lock, flags);
+ nv_mask(priv, 0x400500, 0x00000001, 0x00000000);
- start = nv_timer_read(dev);
+ start = ptimer->read(ptimer);
do {
idle = true;
- for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
+ for (tmp = nv_rd32(priv, 0x400380); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
- for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
+ for (tmp = nv_rd32(priv, 0x400384); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
- for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
+ for (tmp = nv_rd32(priv, 0x400388); tmp && idle; tmp >>= 3) {
if ((tmp & 7) == 1)
idle = false;
}
- } while (!idle && !(timeout = nv_timer_read(dev) - start > 2000000000));
+ } while (!idle &&
+ !(timeout = ptimer->read(ptimer) - start > 2000000000));
if (timeout) {
- NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
+ nv_error(priv, "PGRAPH TLB flush idle timeout fail: "
"0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
- nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
+ nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380),
+ nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388));
}
- nv50_vm_flush_engine(dev, 0);
+ nv50_vm_flush_engine(&engine->base, 0x00);
- nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+ nv_mask(priv, 0x400500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return timeout ? -EBUSY : 0;
}
static struct nouveau_enum nv50_mp_exec_error_names[] = {
@@ -341,7 +309,7 @@ struct nouveau_enum nv50_data_error_names[] = {
{}
};
-static struct nouveau_bitfield nv50_graph_intr[] = {
+static struct nouveau_bitfield nv50_graph_intr_name[] = {
{ 0x00000001, "NOTIFY" },
{ 0x00000002, "COMPUTE_QUERY" },
{ 0x00000010, "ILLEGAL_MTHD" },
@@ -356,95 +324,93 @@ static struct nouveau_bitfield nv50_graph_intr[] = {
};
static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
- uint32_t addr, mp10, status, pc, oplow, ophigh;
+ u32 units = nv_rd32(priv, 0x1540);
+ u32 addr, mp10, status, pc, oplow, ophigh;
int i;
int mps = 0;
for (i = 0; i < 4; i++) {
if (!(units & 1 << (i+24)))
continue;
- if (dev_priv->chipset < 0xa0)
+ if (nv_device(priv)->chipset < 0xa0)
addr = 0x408200 + (tpid << 12) + (i << 7);
else
addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(dev, addr + 0x10);
- status = nv_rd32(dev, addr + 0x14);
+ mp10 = nv_rd32(priv, addr + 0x10);
+ status = nv_rd32(priv, addr + 0x14);
if (!status)
continue;
if (display) {
- nv_rd32(dev, addr + 0x20);
- pc = nv_rd32(dev, addr + 0x24);
- oplow = nv_rd32(dev, addr + 0x70);
- ophigh = nv_rd32(dev, addr + 0x74);
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ nv_rd32(priv, addr + 0x20);
+ pc = nv_rd32(priv, addr + 0x24);
+ oplow = nv_rd32(priv, addr + 0x70);
+ ophigh = nv_rd32(priv, addr + 0x74);
+ nv_error(priv, "TRAP_MP_EXEC - "
"TP %d MP %d: ", tpid, i);
nouveau_enum_print(nv50_mp_exec_error_names, status);
printk(" at %06x warp %d, opcode %08x %08x\n",
pc&0xffffff, pc >> 24,
oplow, ophigh);
}
- nv_wr32(dev, addr + 0x10, mp10);
- nv_wr32(dev, addr + 0x14, 0);
+ nv_wr32(priv, addr + 0x10, mp10);
+ nv_wr32(priv, addr + 0x14, 0);
mps++;
}
if (!mps && display)
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ nv_error(priv, "TRAP_MP_EXEC - TP %d: "
"No MPs claiming errors?\n", tpid);
}
static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
- uint32_t ustatus_new, int display, const char *name)
+nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
+ u32 ustatus_new, int display, const char *name)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int tps = 0;
- uint32_t units = nv_rd32(dev, 0x1540);
+ u32 units = nv_rd32(priv, 0x1540);
int i, r;
- uint32_t ustatus_addr, ustatus;
+ u32 ustatus_addr, ustatus;
for (i = 0; i < 16; i++) {
if (!(units & (1 << i)))
continue;
- if (dev_priv->chipset < 0xa0)
+ if (nv_device(priv)->chipset < 0xa0)
ustatus_addr = ustatus_old + (i << 12);
else
ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ ustatus = nv_rd32(priv, ustatus_addr) & 0x7fffffff;
if (!ustatus)
continue;
tps++;
switch (type) {
case 6: /* texture error... unknown for now */
if (display) {
- NV_ERROR(dev, "magic set %d:\n", i);
+ nv_error(priv, "magic set %d:\n", i);
for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
+ nv_error(priv, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(priv, r));
}
break;
case 7: /* MP error */
if (ustatus & 0x04030000) {
- nv50_pgraph_mp_trap(dev, i, display);
+ nv50_priv_mp_trap(priv, i, display);
ustatus &= ~0x04030000;
}
break;
case 8: /* TPDMA error */
{
- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ u32 e0c = nv_rd32(priv, ustatus_addr + 4);
+ u32 e10 = nv_rd32(priv, ustatus_addr + 8);
+ u32 e14 = nv_rd32(priv, ustatus_addr + 0xc);
+ u32 e18 = nv_rd32(priv, ustatus_addr + 0x10);
+ u32 e1c = nv_rd32(priv, ustatus_addr + 0x14);
+ u32 e20 = nv_rd32(priv, ustatus_addr + 0x18);
+ u32 e24 = nv_rd32(priv, ustatus_addr + 0x1c);
/* 2d engine destination */
if (ustatus & 0x00000010) {
if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ nv_error(priv, "TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ nv_error(priv, "TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000010;
@@ -452,9 +418,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
/* Render target */
if (ustatus & 0x00000040) {
if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ nv_error(priv, "TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ nv_error(priv, "TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000040;
@@ -464,19 +430,19 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
if (display) {
if (e18 & 0x80000000) {
/* g[] read fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ nv_error(priv, "TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 24) & 0x1f));
e18 &= ~0x1f000000;
} else if (e18 & 0xc) {
/* g[] write fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ nv_error(priv, "TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
i, e14, e10 | ((e18 >> 7) & 0x1f));
e18 &= ~0x00000f80;
} else {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ nv_error(priv, "TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
i, e14, e10);
}
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ nv_error(priv, "TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
i, e0c, e18, e1c, e20, e24);
}
ustatus &= ~0x00000080;
@@ -486,23 +452,23 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
}
if (ustatus) {
if (display)
- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
}
- nv_wr32(dev, ustatus_addr, 0xc0000000);
+ nv_wr32(priv, ustatus_addr, 0xc0000000);
}
if (!tps && display)
- NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+ nv_info(priv, "%s - No TPs claiming errors?\n", name);
}
static int
-nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, u64 inst)
{
- u32 status = nv_rd32(dev, 0x400108);
+ u32 status = nv_rd32(priv, 0x400108);
u32 ustatus;
if (!status && display) {
- NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+ nv_error(priv, "TRAP: no units reporting traps?\n");
return 1;
}
@@ -510,72 +476,72 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
* COND, QUERY. If you get a trap from it, the command is still stuck
* in DISPATCH and you need to do something about it. */
if (status & 0x001) {
- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ ustatus = nv_rd32(priv, 0x400804) & 0x7fffffff;
if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ nv_error(priv, "TRAP_DISPATCH - no ustatus?\n");
}
- nv_wr32(dev, 0x400500, 0x00000000);
+ nv_wr32(priv, 0x400500, 0x00000000);
/* Known to be triggered by screwed up NOTIFY and COND... */
if (ustatus & 0x00000001) {
- u32 addr = nv_rd32(dev, 0x400808);
+ u32 addr = nv_rd32(priv, 0x400808);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 datal = nv_rd32(dev, 0x40080c);
- u32 datah = nv_rd32(dev, 0x400810);
- u32 class = nv_rd32(dev, 0x400814);
- u32 r848 = nv_rd32(dev, 0x400848);
+ u32 datal = nv_rd32(priv, 0x40080c);
+ u32 datah = nv_rd32(priv, 0x400810);
+ u32 class = nv_rd32(priv, 0x400814);
+ u32 r848 = nv_rd32(priv, 0x400848);
- NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+ nv_error(priv, "TRAP DISPATCH_FAULT\n");
if (display && (addr & 0x80000000)) {
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ nv_error(priv, "ch 0x%010llx "
"subc %d class 0x%04x mthd 0x%04x "
"data 0x%08x%08x "
"400808 0x%08x 400848 0x%08x\n",
- chid, inst, subc, class, mthd, datah,
+ inst, subc, class, mthd, datah,
datal, addr, r848);
} else
if (display) {
- NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ nv_error(priv, "no stuck command?\n");
}
- nv_wr32(dev, 0x400808, 0);
- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
- nv_wr32(dev, 0x400848, 0);
+ nv_wr32(priv, 0x400808, 0);
+ nv_wr32(priv, 0x4008e8, nv_rd32(priv, 0x4008e8) & 3);
+ nv_wr32(priv, 0x400848, 0);
ustatus &= ~0x00000001;
}
if (ustatus & 0x00000002) {
- u32 addr = nv_rd32(dev, 0x40084c);
+ u32 addr = nv_rd32(priv, 0x40084c);
u32 subc = (addr & 0x00070000) >> 16;
u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, 0x40085c);
- u32 class = nv_rd32(dev, 0x400814);
+ u32 data = nv_rd32(priv, 0x40085c);
+ u32 class = nv_rd32(priv, 0x400814);
- NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+ nv_error(priv, "TRAP DISPATCH_QUERY\n");
if (display && (addr & 0x80000000)) {
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ nv_error(priv, "ch 0x%010llx "
"subc %d class 0x%04x mthd 0x%04x "
"data 0x%08x 40084c 0x%08x\n",
- chid, inst, subc, class, mthd,
+ inst, subc, class, mthd,
data, addr);
} else
if (display) {
- NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ nv_error(priv, "no stuck command?\n");
}
- nv_wr32(dev, 0x40084c, 0);
+ nv_wr32(priv, 0x40084c, 0);
ustatus &= ~0x00000002;
}
if (ustatus && display) {
- NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+ nv_error(priv, "TRAP_DISPATCH (unknown "
"0x%08x)\n", ustatus);
}
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x001);
+ nv_wr32(priv, 0x400804, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x001);
status &= ~0x001;
if (!status)
return 0;
@@ -583,81 +549,81 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
/* M2MF: Memory to memory copy engine. */
if (status & 0x002) {
- u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ u32 ustatus = nv_rd32(priv, 0x406800) & 0x7fffffff;
if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+ nv_error(priv, "TRAP_M2MF");
nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
- nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+ nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
+ nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
+ nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
}
/* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 2);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x002);
+ nv_wr32(priv, 0x400040, 2);
+ nv_wr32(priv, 0x400040, 0);
+ nv_wr32(priv, 0x406800, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x002);
status &= ~0x002;
}
/* VFETCH: Fetches data from vertex buffers. */
if (status & 0x004) {
- u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ u32 ustatus = nv_rd32(priv, 0x400c04) & 0x7fffffff;
if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+ nv_error(priv, "TRAP_VFETCH");
nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
- nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+ nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
+ nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
+ nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
}
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x004);
+ nv_wr32(priv, 0x400c04, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x004);
status &= ~0x004;
}
/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
if (status & 0x008) {
- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ ustatus = nv_rd32(priv, 0x401800) & 0x7fffffff;
if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+ nv_error(priv, "TRAP_STRMOUT");
nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
- nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+ nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
+ nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
}
/* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 0x80);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x008);
+ nv_wr32(priv, 0x400040, 0x80);
+ nv_wr32(priv, 0x400040, 0);
+ nv_wr32(priv, 0x401800, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x008);
status &= ~0x008;
}
/* CCACHE: Handles code and c[] caches and fills them. */
if (status & 0x010) {
- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ ustatus = nv_rd32(priv, 0x405018) & 0x7fffffff;
if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+ nv_error(priv, "TRAP_CCACHE");
nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+ nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
" %08x %08x %08x\n",
- nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
- nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
- nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
- nv_rd32(dev, 0x40501c));
+ nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
+ nv_rd32(priv, 0x405008), nv_rd32(priv, 0x40500c),
+ nv_rd32(priv, 0x405010), nv_rd32(priv, 0x405014),
+ nv_rd32(priv, 0x40501c));
}
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x010);
+ nv_wr32(priv, 0x405018, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x010);
status &= ~0x010;
}
@@ -665,201 +631,248 @@ nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid
* remaining, so try to handle it anyway. Perhaps related to that
* unknown DMA slot on tesla? */
if (status & 0x20) {
- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ ustatus = nv_rd32(priv, 0x402000) & 0x7fffffff;
if (display)
- NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
- nv_wr32(dev, 0x402000, 0xc0000000);
+ nv_error(priv, "TRAP_UNKC04 0x%08x\n", ustatus);
+ nv_wr32(priv, 0x402000, 0xc0000000);
/* no status modifiction on purpose */
}
/* TEXTURE: CUDA texturing units */
if (status & 0x040) {
- nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
- "PGRAPH - TRAP_TEXTURE");
- nv_wr32(dev, 0x400108, 0x040);
+ nv50_priv_tp_trap(priv, 6, 0x408900, 0x408600, display,
+ "TRAP_TEXTURE");
+ nv_wr32(priv, 0x400108, 0x040);
status &= ~0x040;
}
/* MP: CUDA execution engines. */
if (status & 0x080) {
- nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
- "PGRAPH - TRAP_MP");
- nv_wr32(dev, 0x400108, 0x080);
+ nv50_priv_tp_trap(priv, 7, 0x408314, 0x40831c, display,
+ "TRAP_MP");
+ nv_wr32(priv, 0x400108, 0x080);
status &= ~0x080;
}
/* TPDMA: Handles TP-initiated uncached memory accesses:
* l[], g[], stack, 2d surfaces, render targets. */
if (status & 0x100) {
- nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
- "PGRAPH - TRAP_TPDMA");
- nv_wr32(dev, 0x400108, 0x100);
+ nv50_priv_tp_trap(priv, 8, 0x408e08, 0x408708, display,
+ "TRAP_TPDMA");
+ nv_wr32(priv, 0x400108, 0x100);
status &= ~0x100;
}
if (status) {
if (display)
- NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
- nv_wr32(dev, 0x400108, status);
+ nv_error(priv, "TRAP: unknown 0x%08x\n", status);
+ nv_wr32(priv, 0x400108, status);
}
return 1;
}
-int
-nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+static void
+nv50_graph_intr(struct nouveau_subdev *subdev)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
+ struct nv50_graph_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u32 stat = nv_rd32(priv, 0x400100);
+ u64 inst = (u64)(nv_rd32(priv, 0x40032c) & 0x0fffffff) << 12;
+ u32 addr = nv_rd32(priv, 0x400704);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(priv, 0x400708);
+ u32 class = nv_rd32(priv, 0x400814);
+ u32 show = stat;
+
+ if (stat & 0x00000010) {
+ handle = nouveau_engctx_lookup_class(engine, inst, class);
+ if (handle && !nv_call(handle->object, mthd, data))
+ show &= ~0x00000010;
+ nouveau_engctx_handle_put(handle);
+ }
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
+ if (show & 0x00100000) {
+ u32 ecode = nv_rd32(priv, 0x400110);
+ nv_error(priv, "DATA_ERROR ");
+ nouveau_enum_print(nv50_data_error_names, ecode);
+ printk("\n");
+ }
- if (inst == chan->ramin->addr)
- break;
+ if (stat & 0x00200000) {
+ if (!nv50_graph_trap_handler(priv, show, inst))
+ show &= ~0x00200000;
+ }
+
+ nv_wr32(priv, 0x400100, stat);
+ nv_wr32(priv, 0x400500, 0x00010001);
+
+ if (show) {
+ nv_info(priv, "");
+ nouveau_bitfield_print(nv50_graph_intr_name, show);
+ printk("\n");
+ nv_error(priv, "ch 0x%010llx subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ nv50_fb_trap(nouveau_fb(priv), 1);
}
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
+
+ if (nv_rd32(priv, 0x400824) & (1 << 31))
+ nv_wr32(priv, 0x400824, nv_rd32(priv, 0x400824) & ~(1 << 31));
}
-static void
-nv50_graph_isr(struct drm_device *dev)
+static int
+nv50_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- u32 stat;
-
- while ((stat = nv_rd32(dev, 0x400100))) {
- u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
- u32 chid = nv50_graph_isr_chid(dev, inst);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400814);
- u32 show = stat;
-
- if (stat & 0x00000010) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
- mthd, data))
- show &= ~0x00000010;
- }
+ struct nv50_graph_priv *priv;
+ int ret;
- show = (show && nouveau_ratelimit()) ? show : 0;
+ ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- if (show & 0x00100000) {
- u32 ecode = nv_rd32(dev, 0x400110);
- NV_INFO(dev, "PGRAPH - DATA_ERROR ");
- nouveau_enum_print(nv50_data_error_names, ecode);
- printk("\n");
- }
+ nv_subdev(priv)->unit = 0x00201000;
+ nv_subdev(priv)->intr = nv50_graph_intr;
+ nv_engine(priv)->cclass = &nv50_graph_cclass;
- if (stat & 0x00200000) {
- if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
- show &= ~0x00200000;
- }
+ switch (nv_device(priv)->chipset) {
+ case 0x50:
+ nv_engine(priv)->sclass = nv50_graph_sclass;
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ nv_engine(priv)->sclass = nv84_graph_sclass;
+ break;
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ nv_engine(priv)->sclass = nva0_graph_sclass;
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_engine(priv)->sclass = nva3_graph_sclass;
+ break;
+ case 0xaf:
+ nv_engine(priv)->sclass = nvaf_graph_sclass;
+ break;
- nv_wr32(dev, 0x400100, stat);
- nv_wr32(dev, 0x400500, 0x00010001);
+ };
- if (show) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv50_graph_intr, show);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv50_fb_vm_trap(dev, 1);
- }
- }
+ if (nv_device(priv)->chipset == 0x50 ||
+ nv_device(priv)->chipset == 0xac)
+ nv_engine(priv)->tlb_flush = nv50_graph_tlb_flush;
+ else
+ nv_engine(priv)->tlb_flush = nv84_graph_tlb_flush;
- if (nv_rd32(dev, 0x400824) & (1 << 31))
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ spin_lock_init(&priv->lock);
+ return 0;
}
-static void
-nv50_graph_destroy(struct drm_device *dev, int engine)
+static int
+nv50_graph_init(struct nouveau_object *object)
{
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
+ struct nv50_graph_priv *priv = (void *)object;
+ int ret, units, i;
- NVOBJ_ENGINE_DEL(dev, GR);
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
- nouveau_irq_unregister(dev, 12);
- kfree(pgraph);
-}
+ /* NV_PGRAPH_DEBUG_3_HW_CTX_SWITCH_ENABLED */
+ nv_wr32(priv, 0x40008c, 0x00000004);
-int
-nv50_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_graph_engine *pgraph;
- int ret;
+ /* reset/enable traps and interrupts */
+ nv_wr32(priv, 0x400804, 0xc0000000);
+ nv_wr32(priv, 0x406800, 0xc0000000);
+ nv_wr32(priv, 0x400c04, 0xc0000000);
+ nv_wr32(priv, 0x401800, 0xc0000000);
+ nv_wr32(priv, 0x405018, 0xc0000000);
+ nv_wr32(priv, 0x402000, 0xc0000000);
+
+ units = nv_rd32(priv, 0x001540);
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
- pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
- &pgraph->ctxprog_size,
- &pgraph->grctx_size);
- if (ret) {
- NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
- kfree(pgraph);
- return 0;
+ if (nv_device(priv)->chipset < 0xa0) {
+ nv_wr32(priv, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(priv, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(priv, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(priv, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(priv, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(priv, 0x40831c + (i << 11), 0xc0000000);
+ }
}
- pgraph->base.destroy = nv50_graph_destroy;
- pgraph->base.init = nv50_graph_init;
- pgraph->base.fini = nv50_graph_fini;
- pgraph->base.context_new = nv50_graph_context_new;
- pgraph->base.context_del = nv50_graph_context_del;
- pgraph->base.object_new = nv50_graph_object_new;
- if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
- pgraph->base.tlb_flush = nv50_graph_tlb_flush;
- else
- pgraph->base.tlb_flush = nv84_graph_tlb_flush;
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
+ nv_wr32(priv, 0x400500, 0x00010001);
- nouveau_irq_register(dev, 12, nv50_graph_isr);
+ /* upload context program, initialise ctxctl defaults */
+ ret = nv50_grctx_init(nv_device(priv), &priv->size);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+ nv_wr32(priv, 0x400824, 0x00000000);
+ nv_wr32(priv, 0x400828, 0x00000000);
+ nv_wr32(priv, 0x40082c, 0x00000000);
+ nv_wr32(priv, 0x400830, 0x00000000);
+ nv_wr32(priv, 0x400724, 0x00000000);
+ nv_wr32(priv, 0x40032c, 0x00000000);
+ nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
- /* tesla */
- if (dev_priv->chipset == 0x50)
- NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
- else
- if (dev_priv->chipset < 0xa0)
- NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
- else {
- switch (dev_priv->chipset) {
- case 0xa0:
- case 0xaa:
- case 0xac:
- NVOBJ_CLASS(dev, 0x8397, GR);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- NVOBJ_CLASS(dev, 0x8597, GR);
- break;
- case 0xaf:
- NVOBJ_CLASS(dev, 0x8697, GR);
- break;
+ /* some unknown zcull magic */
+ switch (nv_device(priv)->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(priv, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(priv, 0x402cc0, 0x00000000);
+ if (nv_device(priv)->chipset == 0xa0 ||
+ nv_device(priv)->chipset == 0xaa ||
+ nv_device(priv)->chipset == 0xac) {
+ nv_wr32(priv, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(priv, 0x402cc0, 0x00000000);
+ nv_wr32(priv, 0x402ca8, 0x00000002);
}
- }
- /* compute */
- NVOBJ_CLASS(dev, 0x50c0, GR);
- if (dev_priv->chipset > 0xa0 &&
- dev_priv->chipset != 0xaa &&
- dev_priv->chipset != 0xac)
- NVOBJ_CLASS(dev, 0x85c0, GR);
+ break;
+ }
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+ }
return 0;
}
+
+struct nouveau_oclass
+nv50_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_graph_ctor,
+ .dtor = _nouveau_graph_dtor,
+ .init = nv50_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
new file mode 100644
index 000000000000..0505fb419bde
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.h
@@ -0,0 +1,7 @@
+#ifndef __NV50_GRAPH_H__
+#define __NV50_GRAPH_H__
+
+int nv50_grctx_init(struct nouveau_device *, u32 *size);
+void nv50_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index f994d2f7e8d5..db8aefc3cf3e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,94 +22,92 @@
* Authors: Ben Skeggs
*/
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include <core/mm.h>
-#include <engine/fifo.h>
-
#include "nvc0.h"
#include "fuc/hubnvc0.fuc.h"
#include "fuc/gpcnvc0.fuc.h"
-static void
-nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
- NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
- nv_rd32(dev, base + 0x400));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
- nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
- nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-void
-nvc0_graph_ctxctl_debug(struct drm_device *dev)
-{
- u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
- u32 gpc;
-
- nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
- for (gpc = 0; gpc < gpcnr; gpc++)
- nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nvc0_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0x9039, &nouveau_object_ofuncs },
+ { 0x9097, &nouveau_object_ofuncs },
+ { 0x90c0, &nouveau_object_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nvc1_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0x9039, &nouveau_object_ofuncs },
+ { 0x9097, &nouveau_object_ofuncs },
+ { 0x90c0, &nouveau_object_ofuncs },
+ { 0x9197, &nouveau_object_ofuncs },
+ {}
+};
+
+static struct nouveau_oclass
+nvc8_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0x9039, &nouveau_object_ofuncs },
+ { 0x9097, &nouveau_object_ofuncs },
+ { 0x90c0, &nouveau_object_ofuncs },
+ { 0x9197, &nouveau_object_ofuncs },
+ { 0x9297, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
int
-nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
+nvc0_graph_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *args, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nvc0_graph_priv *priv = nv_engine(dev, engine);
+ struct nouveau_vm *vm = nouveau_client(parent)->vm;
+ struct nvc0_graph_priv *priv = (void *)engine;
struct nvc0_graph_data *data = priv->mmio_data;
struct nvc0_graph_mmio *mmio = priv->mmio_list;
- struct nvc0_graph_chan *grch;
- struct nouveau_gpuobj *grctx;
+ struct nvc0_graph_chan *chan;
int ret, i;
- grch = kzalloc(sizeof(*grch), GFP_KERNEL);
- if (!grch)
- return -ENOMEM;
- chan->engctx[NVOBJ_ENGINE_GR] = grch;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->size, 256, 0, &grch->grctx);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_map_vm(grch->grctx, chan->vm, NV_MEM_ACCESS_RW |
- NV_MEM_ACCESS_SYS, &grch->grctx_vma);
+ /* allocate memory for context, and fill with default values */
+ ret = nouveau_graph_context_create(parent, engine, oclass, NULL,
+ priv->size, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
if (ret)
return ret;
- grctx = grch->grctx;
-
/* allocate memory for a "mmio list" buffer that's used by the HUB
* fuc to modify some per-context register settings on first load
* of the context.
*/
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x100, 0, &grch->mmio);
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 0x100, 0, &chan->mmio);
if (ret)
return ret;
- ret = nouveau_gpuobj_map_vm(grch->mmio, chan->vm,
+ ret = nouveau_gpuobj_map_vm(nv_gpuobj(chan->mmio), vm,
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
- &grch->mmio_vma);
+ &chan->mmio_vma);
if (ret)
return ret;
/* allocate buffers referenced by mmio list */
for (i = 0; data->size && i < ARRAY_SIZE(priv->mmio_data); i++) {
- ret = nouveau_gpuobj_new(dev, NULL, data->size, data->align,
- 0, &grch->data[i].mem);
+ ret = nouveau_gpuobj_new(parent, NULL, data->size, data->align,
+ 0, &chan->data[i].mem);
if (ret)
return ret;
- ret = nouveau_gpuobj_map_vm(grch->data[i].mem, chan->vm,
- data->access,
- &grch->data[i].vma);
+ ret = nouveau_gpuobj_map_vm(chan->data[i].mem, vm, data->access,
+ &chan->data[i].vma);
if (ret)
return ret;
@@ -122,117 +120,378 @@ nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
u32 data = mmio->data;
if (mmio->shift) {
- u64 info = grch->data[mmio->buffer].vma.offset;
+ u64 info = chan->data[mmio->buffer].vma.offset;
data |= info >> mmio->shift;
}
- nv_wo32(grch->mmio, grch->mmio_nr++ * 4, addr);
- nv_wo32(grch->mmio, grch->mmio_nr++ * 4, data);
+ nv_wo32(chan->mmio, chan->mmio_nr++ * 4, addr);
+ nv_wo32(chan->mmio, chan->mmio_nr++ * 4, data);
mmio++;
}
for (i = 0; i < priv->size; i += 4)
- nv_wo32(grch->grctx, i, priv->data[i / 4]);
-
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grch->grctx_vma.offset) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grch->grctx_vma.offset));
- nvimem_flush(dev);
+ nv_wo32(chan, i, priv->data[i / 4]);
if (!priv->firmware) {
- nv_wo32(grctx, 0x00, grch->mmio_nr / 2);
- nv_wo32(grctx, 0x04, grch->mmio_vma.offset >> 8);
+ nv_wo32(chan, 0x00, chan->mmio_nr / 2);
+ nv_wo32(chan, 0x04, chan->mmio_vma.offset >> 8);
} else {
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr / 2);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio_vma.offset));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio_vma.offset));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
+ nv_wo32(chan, 0xf4, 0);
+ nv_wo32(chan, 0xf8, 0);
+ nv_wo32(chan, 0x10, chan->mmio_nr / 2);
+ nv_wo32(chan, 0x14, lower_32_bits(chan->mmio_vma.offset));
+ nv_wo32(chan, 0x18, upper_32_bits(chan->mmio_vma.offset));
+ nv_wo32(chan, 0x1c, 1);
+ nv_wo32(chan, 0x20, 0);
+ nv_wo32(chan, 0x28, 0);
+ nv_wo32(chan, 0x2c, 0);
}
- nvimem_flush(dev);
- return 0;
-error:
- priv->base.context_del(chan, engine);
- return ret;
+ return 0;
}
void
-nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
+nvc0_graph_context_dtor(struct nouveau_object *object)
{
- struct nvc0_graph_chan *grch = chan->engctx[engine];
+ struct nvc0_graph_chan *chan = (void *)object;
int i;
- for (i = 0; i < ARRAY_SIZE(grch->data); i++) {
- nouveau_gpuobj_unmap(&grch->data[i].vma);
- nouveau_gpuobj_ref(NULL, &grch->data[i].mem);
+ for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
+ nouveau_gpuobj_unmap(&chan->data[i].vma);
+ nouveau_gpuobj_ref(NULL, &chan->data[i].mem);
}
- nouveau_gpuobj_unmap(&grch->mmio_vma);
- nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_unmap(&chan->mmio_vma);
+ nouveau_gpuobj_ref(NULL, &chan->mmio);
- nouveau_gpuobj_unmap(&grch->grctx_vma);
- nouveau_gpuobj_ref(NULL, &grch->grctx);
- chan->engctx[engine] = NULL;
+ nouveau_graph_context_destroy(&chan->base);
}
-static int
-nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+static struct nouveau_oclass
+nvc0_graph_cclass = {
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct nvc0_graph_priv *priv, u32 base)
{
- return 0;
+ nv_error(priv, "%06x - done 0x%08x\n", base,
+ nv_rd32(priv, base + 0x400));
+ nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(priv, base + 0x800), nv_rd32(priv, base + 0x804),
+ nv_rd32(priv, base + 0x808), nv_rd32(priv, base + 0x80c));
+ nv_error(priv, "%06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(priv, base + 0x810), nv_rd32(priv, base + 0x814),
+ nv_rd32(priv, base + 0x818), nv_rd32(priv, base + 0x81c));
+}
+
+void
+nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
+{
+ u32 gpcnr = nv_rd32(priv, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nvc0_graph_ctxctl_debug_unit(priv, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nvc0_graph_ctxctl_debug_unit(priv, 0x502000 + (gpc * 0x8000));
+}
+
+static void
+nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
+{
+ u32 ustat = nv_rd32(priv, 0x409c18);
+
+ if (ustat & 0x00000001)
+ nv_error(priv, "CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ nv_error(priv, "CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, ustat);
+}
+
+static void
+nvc0_graph_intr(struct nouveau_subdev *subdev)
+{
+ struct nvc0_graph_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u64 inst = (u64)(nv_rd32(priv, 0x409b00) & 0x0fffffff) << 12;
+ u32 stat = nv_rd32(priv, 0x400100);
+ u32 addr = nv_rd32(priv, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(priv, 0x400708);
+ u32 code = nv_rd32(priv, 0x400110);
+ u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ handle = nouveau_engctx_lookup_class(engine, inst, class);
+ if (!handle || nv_call(handle->object, mthd, data)) {
+ nv_error(priv, "ILLEGAL_MTHD ch 0x%010llx "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ }
+ nouveau_engctx_handle_put(handle);
+ nv_wr32(priv, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ nv_error(priv, "ILLEGAL_CLASS ch 0x%010llx subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ nv_wr32(priv, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ nv_error(priv, "DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch 0x%010llx subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ nv_wr32(priv, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ u32 trap = nv_rd32(priv, 0x400108);
+ nv_error(priv, "TRAP ch 0x%010llx status 0x%08x\n", inst, trap);
+ nv_wr32(priv, 0x400108, trap);
+ nv_wr32(priv, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nvc0_graph_ctxctl_isr(priv);
+ nv_wr32(priv, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ nv_error(priv, "unknown stat 0x%08x\n", stat);
+ nv_wr32(priv, 0x400100, stat);
+ }
+
+ nv_wr32(priv, 0x400500, 0x00010001);
+}
+
+int
+nvc0_graph_ctor_fw(struct nvc0_graph_priv *priv, const char *fwname,
+ struct nvc0_graph_fuc *fuc)
+{
+ struct nouveau_device *device = nv_device(priv);
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", device->chipset, fwname);
+ ret = request_firmware(&fw, f, &device->pdev->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, &device->pdev->dev);
+ if (ret) {
+ nv_error(priv, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
}
static int
-nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nouveau_device *device = nv_device(parent);
+ struct nvc0_graph_priv *priv;
+ bool enable = true;
+ int ret, i;
+
+ switch (device->chipset) {
+ case 0xd9: /* known broken without binary driver firmware */
+ enable = false;
+ break;
+ default:
+ break;
+ }
+
+ ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x18001000;
+ nv_subdev(priv)->intr = nvc0_graph_intr;
+ nv_engine(priv)->cclass = &nvc0_graph_cclass;
+
+ if (nouveau_boolopt(device->cfgopt, "NvGrUseFW", false)) {
+ nv_info(priv, "using external firmware\n");
+ if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+ return -EINVAL;
+ priv->firmware = true;
+ }
+
+ switch (nvc0_graph_class(priv)) {
+ case 0x9097:
+ nv_engine(priv)->sclass = nvc0_graph_sclass;
+ break;
+ case 0x9197:
+ nv_engine(priv)->sclass = nvc1_graph_sclass;
+ break;
+ case 0x9297:
+ nv_engine(priv)->sclass = nvc8_graph_sclass;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+ priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
+ for (i = 0; i < priv->gpc_nr; i++) {
+ priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+ priv->tpc_total += priv->tpc_nr[i];
+ }
+
+ /*XXX: these need figuring out... though it might not even matter */
+ switch (nv_device(priv)->chipset) {
+ case 0xc0:
+ if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
+ priv->magic_not_rop_nr = 0x07;
+ } else
+ if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
+ priv->magic_not_rop_nr = 0x05;
+ } else
+ if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
+ priv->magic_not_rop_nr = 0x06;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc1: /* 2/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc8: /* 4/4/3/4, 5 */
+ priv->magic_not_rop_nr = 0x06;
+ break;
+ case 0xce: /* 4/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xcf: /* 4/0/0/0, 3 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xd9: /* 1/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ }
+
return 0;
}
static void
-nvc0_graph_init_obj418880(struct drm_device *dev)
+nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+void
+nvc0_graph_dtor(struct nouveau_object *object)
+{
+ struct nvc0_graph_priv *priv = (void *)object;
+
+ if (priv->data)
+ kfree(priv->data);
+
+ nvc0_graph_dtor_fw(&priv->fuc409c);
+ nvc0_graph_dtor_fw(&priv->fuc409d);
+ nvc0_graph_dtor_fw(&priv->fuc41ac);
+ nvc0_graph_dtor_fw(&priv->fuc41ad);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ nouveau_graph_destroy(&priv->base);
+}
+
+static void
+nvc0_graph_init_obj418880(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int i;
- nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
- nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
- nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+ nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
-nvc0_graph_init_regs(struct drm_device *dev)
+nvc0_graph_init_regs(struct nvc0_graph_priv *priv)
{
- nv_wr32(dev, 0x400080, 0x003083c2);
- nv_wr32(dev, 0x400088, 0x00006fe7);
- nv_wr32(dev, 0x40008c, 0x00000000);
- nv_wr32(dev, 0x400090, 0x00000030);
- nv_wr32(dev, 0x40013c, 0x013901f7);
- nv_wr32(dev, 0x400140, 0x00000100);
- nv_wr32(dev, 0x400144, 0x00000000);
- nv_wr32(dev, 0x400148, 0x00000110);
- nv_wr32(dev, 0x400138, 0x00000000);
- nv_wr32(dev, 0x400130, 0x00000000);
- nv_wr32(dev, 0x400134, 0x00000000);
- nv_wr32(dev, 0x400124, 0x00000002);
+ nv_wr32(priv, 0x400080, 0x003083c2);
+ nv_wr32(priv, 0x400088, 0x00006fe7);
+ nv_wr32(priv, 0x40008c, 0x00000000);
+ nv_wr32(priv, 0x400090, 0x00000030);
+ nv_wr32(priv, 0x40013c, 0x013901f7);
+ nv_wr32(priv, 0x400140, 0x00000100);
+ nv_wr32(priv, 0x400144, 0x00000000);
+ nv_wr32(priv, 0x400148, 0x00000110);
+ nv_wr32(priv, 0x400138, 0x00000000);
+ nv_wr32(priv, 0x400130, 0x00000000);
+ nv_wr32(priv, 0x400134, 0x00000000);
+ nv_wr32(priv, 0x400124, 0x00000002);
}
static void
-nvc0_graph_init_gpc_0(struct drm_device *dev)
+nvc0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8];
- u8 tpnr[GPC_MAX];
+ u8 tpcnr[GPC_MAX];
int i, gpc, tpc;
- nv_wr32(dev, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+ nv_wr32(priv, TPC_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
/*
* TP ROP UNKVAL(magic_not_rop_nr)
@@ -244,205 +503,208 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
*/
memset(data, 0x00, sizeof(data));
- memcpy(tpnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
do {
gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpnr[gpc]--;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
data[i / 8] |= tpc << ((i % 8) * 4);
}
- nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
- nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
- nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
- nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+ nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tpc_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+ nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}
static void
-nvc0_graph_init_units(struct drm_device *dev)
+nvc0_graph_init_units(struct nvc0_graph_priv *priv)
{
- nv_wr32(dev, 0x409c24, 0x000f0000);
- nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
- nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
- nv_wr32(dev, 0x408030, 0xc0000000);
- nv_wr32(dev, 0x40601c, 0xc0000000);
- nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
- nv_wr32(dev, 0x406018, 0xc0000000);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x405844, 0x00ffffff);
- nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+ nv_wr32(priv, 0x409c24, 0x000f0000);
+ nv_wr32(priv, 0x404000, 0xc0000000); /* DISPATCH */
+ nv_wr32(priv, 0x404600, 0xc0000000); /* M2MF */
+ nv_wr32(priv, 0x408030, 0xc0000000);
+ nv_wr32(priv, 0x40601c, 0xc0000000);
+ nv_wr32(priv, 0x404490, 0xc0000000); /* MACRO */
+ nv_wr32(priv, 0x406018, 0xc0000000);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x405844, 0x00ffffff);
+ nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
}
static void
-nvc0_graph_init_gpc_1(struct drm_device *dev)
+nvc0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int gpc, tp;
+ int gpc, tpc;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tp = 0; tp < priv->tpc_nr[gpc]; tp++) {
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x508), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x50c), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x224), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x48c), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x084), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x644), 0x001ffffe);
- nv_wr32(dev, TPC_UNIT(gpc, tp, 0x64c), 0x0000000f);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
}
static void
-nvc0_graph_init_rop(struct drm_device *dev)
+nvc0_graph_init_rop(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int rop;
for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
}
}
-static void
-nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
- struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
+void
+nvc0_graph_init_fw(struct nvc0_graph_priv *priv, u32 fuc_base,
+ struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
{
int i;
- nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ nv_wr32(priv, fuc_base + 0x01c0, 0x01000000);
for (i = 0; i < data->size / 4; i++)
- nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+ nv_wr32(priv, fuc_base + 0x01c4, data->data[i]);
- nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ nv_wr32(priv, fuc_base + 0x0180, 0x01000000);
for (i = 0; i < code->size / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(dev, fuc_base + 0x0188, i >> 6);
- nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ nv_wr32(priv, fuc_base + 0x0188, i >> 6);
+ nv_wr32(priv, fuc_base + 0x0184, code->data[i]);
}
}
static int
-nvc0_graph_init_ctxctl(struct drm_device *dev)
+nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
int i;
if (priv->firmware) {
/* load fuc microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c,
+ r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c,
&priv->fuc409d);
- nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac,
+ nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac,
&priv->fuc41ad);
- nv_wr32(dev, 0x000260, r000260);
+ nv_wr32(priv, 0x000260, r000260);
/* start both of them running */
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x41a10c, 0x00000000);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x41a100, 0x00000002);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
- NV_INFO(dev, "0x409800 wait failed\n");
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x7fffffff);
- nv_wr32(dev, 0x409504, 0x00000021);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000010);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x41a10c, 0x00000000);
+ nv_wr32(priv, 0x40910c, 0x00000000);
+ nv_wr32(priv, 0x41a100, 0x00000002);
+ nv_wr32(priv, 0x409100, 0x00000002);
+ if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+ nv_info(priv, "0x409800 wait failed\n");
+
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x7fffffff);
+ nv_wr32(priv, 0x409504, 0x00000021);
+
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000010);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x10 timeout\n");
return -EBUSY;
}
- priv->size = nv_rd32(dev, 0x409800);
+ priv->size = nv_rd32(priv, 0x409800);
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000016);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000016);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x16 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000025);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000025);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x25 timeout\n");
return -EBUSY;
}
- goto done;
+ if (priv->data == NULL) {
+ int ret = nvc0_grctx_generate(priv);
+ if (ret) {
+ nv_error(priv, "failed to construct context\n");
+ return ret;
+ }
+ }
+
+ return 0;
}
/* load HUB microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x4091c0, 0x01000000);
+ r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(priv, 0x4091c0, 0x01000000);
for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
- nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
+ nv_wr32(priv, 0x4091c4, nvc0_grhub_data[i]);
- nv_wr32(dev, 0x409180, 0x01000000);
+ nv_wr32(priv, 0x409180, 0x01000000);
for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x409188, i >> 6);
- nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
+ nv_wr32(priv, 0x409188, i >> 6);
+ nv_wr32(priv, 0x409184, nvc0_grhub_code[i]);
}
/* load GPC microcode */
- nv_wr32(dev, 0x41a1c0, 0x01000000);
+ nv_wr32(priv, 0x41a1c0, 0x01000000);
for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
- nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
+ nv_wr32(priv, 0x41a1c4, nvc0_grgpc_data[i]);
- nv_wr32(dev, 0x41a180, 0x01000000);
+ nv_wr32(priv, 0x41a180, 0x01000000);
for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x41a188, i >> 6);
- nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
+ nv_wr32(priv, 0x41a188, i >> 6);
+ nv_wr32(priv, 0x41a184, nvc0_grgpc_code[i]);
}
- nv_wr32(dev, 0x000260, r000260);
+ nv_wr32(priv, 0x000260, r000260);
/* start HUB ucode running, it'll init the GPCs */
- nv_wr32(dev, 0x409800, dev_priv->chipset);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
- nvc0_graph_ctxctl_debug(dev);
+ nv_wr32(priv, 0x409800, nv_device(priv)->chipset);
+ nv_wr32(priv, 0x40910c, 0x00000000);
+ nv_wr32(priv, 0x409100, 0x00000002);
+ if (!nv_wait(priv, 0x409800, 0x80000000, 0x80000000)) {
+ nv_error(priv, "HUB_INIT timed out\n");
+ nvc0_graph_ctxctl_debug(priv);
return -EBUSY;
}
- priv->size = nv_rd32(dev, 0x409804);
-done:
+ priv->size = nv_rd32(priv, 0x409804);
if (priv->data == NULL) {
- int ret = nvc0_grctx_generate(dev);
+ int ret = nvc0_grctx_generate(priv);
if (ret) {
- NV_ERROR(dev, "PGRAPH: failed to construct context\n");
+ nv_error(priv, "failed to construct context\n");
return ret;
}
@@ -453,37 +715,39 @@ done:
}
static int
-nvc0_graph_init(struct drm_device *dev, int engine)
+nvc0_graph_init(struct nouveau_object *object)
{
+ struct nvc0_graph_priv *priv = (void *)object;
int ret;
reset:
- nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
- nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
- nvc0_graph_init_obj418880(dev);
- nvc0_graph_init_regs(dev);
- /*nvc0_graph_init_unitplemented_magics(dev);*/
- nvc0_graph_init_gpc_0(dev);
- /*nvc0_graph_init_unitplemented_c242(dev);*/
-
- nv_wr32(dev, 0x400500, 0x00010001);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
-
- nvc0_graph_init_units(dev);
- nvc0_graph_init_gpc_1(dev);
- nvc0_graph_init_rop(dev);
-
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400118, 0xffffffff);
- nv_wr32(dev, 0x400130, 0xffffffff);
- nv_wr32(dev, 0x40011c, 0xffffffff);
- nv_wr32(dev, 0x400134, 0xffffffff);
- nv_wr32(dev, 0x400054, 0x34ce3464);
-
- ret = nvc0_graph_init_ctxctl(dev);
+ ret = nouveau_graph_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nvc0_graph_init_obj418880(priv);
+ nvc0_graph_init_regs(priv);
+ /*nvc0_graph_init_unitplemented_magics(priv);*/
+ nvc0_graph_init_gpc_0(priv);
+ /*nvc0_graph_init_unitplemented_c242(priv);*/
+
+ nv_wr32(priv, 0x400500, 0x00010001);
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
+
+ nvc0_graph_init_units(priv);
+ nvc0_graph_init_gpc_1(priv);
+ nvc0_graph_init_rop(priv);
+
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400118, 0xffffffff);
+ nv_wr32(priv, 0x400130, 0xffffffff);
+ nv_wr32(priv, 0x40011c, 0xffffffff);
+ nv_wr32(priv, 0x400134, 0xffffffff);
+ nv_wr32(priv, 0x400054, 0x34ce3464);
+
+ ret = nvc0_graph_init_ctxctl(priv);
if (ret) {
if (ret == 1)
goto reset;
@@ -493,279 +757,13 @@ reset:
return 0;
}
-int
-nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->addr)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nvc0_graph_ctxctl_isr(struct drm_device *dev)
-{
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- if (ustat & 0x00000001)
- NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
- if (ustat & 0x00080000)
- NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
- if (ustat & ~0x00080001)
- NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
- nvc0_graph_ctxctl_debug(dev);
- nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nvc0_graph_isr(struct drm_device *dev)
-{
- u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
- u32 chid = nvc0_graph_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x400100);
- u32 addr = nv_rd32(dev, 0x400704);
- u32 mthd = (addr & 0x00003ffc);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 data = nv_rd32(dev, 0x400708);
- u32 code = nv_rd32(dev, 0x400110);
- u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
- if (stat & 0x00000010) {
- if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- nv_wr32(dev, 0x400100, 0x00000010);
- stat &= ~0x00000010;
- }
-
- if (stat & 0x00000020) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00000020);
- stat &= ~0x00000020;
- }
-
- if (stat & 0x00100000) {
- NV_INFO(dev, "PGRAPH: DATA_ERROR [");
- nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00100000);
- stat &= ~0x00100000;
- }
-
- if (stat & 0x00200000) {
- u32 trap = nv_rd32(dev, 0x400108);
- NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
- nv_wr32(dev, 0x400108, trap);
- nv_wr32(dev, 0x400100, 0x00200000);
- stat &= ~0x00200000;
- }
-
- if (stat & 0x00080000) {
- nvc0_graph_ctxctl_isr(dev);
- nv_wr32(dev, 0x400100, 0x00080000);
- stat &= ~0x00080000;
- }
-
- if (stat) {
- NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
- nv_wr32(dev, 0x400100, stat);
- }
-
- nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
- struct nvc0_graph_fuc *fuc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- const struct firmware *fw;
- char f[32];
- int ret;
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
- if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "failed to load %s\n", fwname);
- return ret;
- }
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
-{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
-}
-
-static void
-nvc0_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, engine);
-
- nvc0_graph_destroy_fw(&priv->fuc409c);
- nvc0_graph_destroy_fw(&priv->fuc409d);
- nvc0_graph_destroy_fw(&priv->fuc41ac);
- nvc0_graph_destroy_fw(&priv->fuc41ad);
-
- nouveau_irq_unregister(dev, 12);
-
- nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
- if (priv->data)
- kfree(priv->data);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(priv);
-}
-
-int
-nvc0_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_graph_priv *priv;
- int ret, gpc, i;
- u32 fermi;
-
- fermi = nvc0_graph_class(dev);
- if (!fermi) {
- NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
- return 0;
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.destroy = nvc0_graph_destroy;
- priv->base.init = nvc0_graph_init;
- priv->base.fini = nvc0_graph_fini;
- priv->base.context_new = nvc0_graph_context_new;
- priv->base.context_del = nvc0_graph_context_del;
- priv->base.object_new = nvc0_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
- nouveau_irq_register(dev, 12, nvc0_graph_isr);
-
- if (nouveau_ctxfw) {
- NV_INFO(dev, "PGRAPH: using external firmware\n");
- if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
- }
- priv->firmware = true;
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
- if (ret)
- goto error;
-
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(priv->unk4188b4, i, 0x00000010);
- nv_wo32(priv->unk4188b8, i, 0x00000010);
- }
-
- priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
- priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
- priv->tpc_total += priv->tpc_nr[gpc];
- }
-
- /*XXX: these need figuring out... */
- switch (dev_priv->chipset) {
- case 0xc0:
- if (priv->tpc_total == 11) { /* 465, 3/4/4/0, 4 */
- priv->magic_not_rop_nr = 0x07;
- } else
- if (priv->tpc_total == 14) { /* 470, 3/3/4/4, 5 */
- priv->magic_not_rop_nr = 0x05;
- } else
- if (priv->tpc_total == 15) { /* 480, 3/4/4/4, 6 */
- priv->magic_not_rop_nr = 0x06;
- }
- break;
- case 0xc3: /* 450, 4/0/0/0, 2 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xc4: /* 460, 3/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc1: /* 2/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc8: /* 4/4/3/4, 5 */
- priv->magic_not_rop_nr = 0x06;
- break;
- case 0xce: /* 4/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xcf: /* 4/0/0/0, 3 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xd9: /* 1/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- }
-
- if (!priv->magic_not_rop_nr) {
- NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
- priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
- priv->tpc_nr[3], priv->rop_nr);
- priv->magic_not_rop_nr = 0x00;
- }
-
- NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
- NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
- NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
- if (fermi >= 0x9197)
- NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
- if (fermi >= 0x9297)
- NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
- NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
- return 0;
-
-error:
- nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
- return ret;
-}
+struct nouveau_oclass
+nvc0_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nvc0_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 30ea3ab135c6..26f8268cc8c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -25,6 +25,18 @@
#ifndef __NVC0_GRAPH_H__
#define __NVC0_GRAPH_H__
+#include <core/client.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+#include <core/option.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/graph.h>
+
#define GPC_MAX 4
#define TPC_MAX 32
@@ -53,7 +65,7 @@ struct nvc0_graph_fuc {
};
struct nvc0_graph_priv {
- struct nouveau_exec_engine base;
+ struct nouveau_graph base;
struct nvc0_graph_fuc fuc409c;
struct nvc0_graph_fuc fuc409d;
@@ -78,11 +90,10 @@ struct nvc0_graph_priv {
};
struct nvc0_graph_chan {
- struct nouveau_gpuobj *grctx;
- struct nouveau_vma grctx_vma;
+ struct nouveau_graph_chan base;
struct nouveau_gpuobj *mmio;
- struct nouveau_vma mmio_vma;
+ struct nouveau_vma mmio_vma;
int mmio_nr;
struct {
struct nouveau_gpuobj *mem;
@@ -91,11 +102,11 @@ struct nvc0_graph_chan {
};
static inline u32
-nvc0_graph_class(struct drm_device *priv)
+nvc0_graph_class(void *obj)
{
- struct drm_nouveau_private *dev_priv = priv->dev_private;
+ struct nouveau_device *device = nv_device(obj);
- switch (dev_priv->chipset) {
+ switch (device->chipset) {
case 0xc0:
case 0xc3:
case 0xc4:
@@ -115,17 +126,16 @@ nvc0_graph_class(struct drm_device *priv)
}
}
-void nv_icmd(struct drm_device *priv, u32 icmd, u32 data);
+void nv_icmd(struct nvc0_graph_priv *priv, u32 icmd, u32 data);
static inline void
-nv_mthd(struct drm_device *priv, u32 class, u32 mthd, u32 data)
+nv_mthd(struct nvc0_graph_priv *priv, u32 class, u32 mthd, u32 data)
{
nv_wr32(priv, 0x40448c, data);
nv_wr32(priv, 0x404488, 0x80000000 | (mthd << 14) | class);
}
struct nvc0_grctx {
- struct drm_device *dev;
struct nvc0_graph_priv *priv;
struct nvc0_graph_data *data;
struct nvc0_graph_mmio *mmio;
@@ -135,18 +145,18 @@ struct nvc0_grctx {
u64 addr;
};
-int nvc0_grctx_generate(struct drm_device *);
-int nvc0_grctx_init(struct drm_device *, struct nvc0_graph_priv *,
- struct nvc0_grctx *);
+int nvc0_grctx_generate(struct nvc0_graph_priv *);
+int nvc0_grctx_init(struct nvc0_graph_priv *, struct nvc0_grctx *);
void nvc0_grctx_data(struct nvc0_grctx *, u32, u32, u32);
void nvc0_grctx_mmio(struct nvc0_grctx *, u32, u32, u32, u32);
int nvc0_grctx_fini(struct nvc0_grctx *);
-int nve0_grctx_generate(struct drm_device *);
+int nve0_grctx_generate(struct nvc0_graph_priv *);
#define mmio_data(s,a,p) nvc0_grctx_data(&info, (s), (a), (p))
#define mmio_list(r,d,s,b) nvc0_grctx_mmio(&info, (r), (d), (s), (b))
+void nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *);
int nvc0_graph_ctor_fw(struct nvc0_graph_priv *, const char *,
struct nvc0_graph_fuc *);
void nvc0_graph_dtor(struct nouveau_object *);
@@ -157,9 +167,4 @@ int nvc0_graph_context_ctor(struct nouveau_object *, struct nouveau_object *,
struct nouveau_object **);
void nvc0_graph_context_dtor(struct nouveau_object *);
-void nvc0_graph_ctxctl_debug(struct drm_device *);
-
-int nvc0_graph_context_new(struct nouveau_channel *, int);
-void nvc0_graph_context_del(struct nouveau_channel *, int);
-
#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index a3a4ee7c0b2e..c79748a6fa2b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,116 +22,290 @@
* Authors: Ben Skeggs
*/
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include <core/mm.h>
-#include <engine/fifo.h>
-
#include "nvc0.h"
+/*******************************************************************************
+ * Graphics object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_sclass[] = {
+ { 0x902d, &nouveau_object_ofuncs },
+ { 0xa040, &nouveau_object_ofuncs },
+ { 0xa097, &nouveau_object_ofuncs },
+ { 0xa0c0, &nouveau_object_ofuncs },
+ { 0xa0b5, &nouveau_object_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PGRAPH context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nve0_graph_cclass = {
+ .handle = NV_ENGCTX(GR, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_graph_context_ctor,
+ .dtor = nvc0_graph_context_dtor,
+ .init = _nouveau_graph_context_init,
+ .fini = _nouveau_graph_context_fini,
+ .rd32 = _nouveau_graph_context_rd32,
+ .wr32 = _nouveau_graph_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PGRAPH engine/subdev functions
+ ******************************************************************************/
+
static void
-nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
{
- NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
- nv_rd32(dev, base + 0x400));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
- nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
- nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+ u32 ustat = nv_rd32(priv, 0x409c18);
+
+ if (ustat & 0x00000001)
+ nv_error(priv, "CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ nv_error(priv, "CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ nv_error(priv, "CTXCTRL 0x%08x\n", ustat);
+
+ nvc0_graph_ctxctl_debug(priv);
+ nv_wr32(priv, 0x409c20, ustat);
}
static void
-nve0_graph_ctxctl_debug(struct drm_device *dev)
+nve0_graph_trap_isr(struct nvc0_graph_priv *priv, u64 inst)
{
- u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
- u32 gpc;
+ u32 trap = nv_rd32(priv, 0x400108);
+ int rop;
+
+ if (trap & 0x00000001) {
+ u32 stat = nv_rd32(priv, 0x404000);
+ nv_error(priv, "DISPATCH ch 0x%010llx 0x%08x\n", inst, stat);
+ nv_wr32(priv, 0x404000, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x00000001);
+ trap &= ~0x00000001;
+ }
- nve0_graph_ctxctl_debug_unit(dev, 0x409000);
- for (gpc = 0; gpc < gpcnr; gpc++)
- nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+ if (trap & 0x00000010) {
+ u32 stat = nv_rd32(priv, 0x405840);
+ nv_error(priv, "SHADER ch 0x%010llx 0x%08x\n", inst, stat);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x400108, 0x00000010);
+ trap &= ~0x00000010;
+ }
+
+ if (trap & 0x02000000) {
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
+ u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
+ nv_error(priv, "ROP%d ch 0x%010llx 0x%08x 0x%08x\n",
+ rop, inst, statz, statc);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+ }
+ nv_wr32(priv, 0x400108, 0x02000000);
+ trap &= ~0x02000000;
+ }
+
+ if (trap) {
+ nv_error(priv, "TRAP ch 0x%010llx 0x%08x\n", inst, trap);
+ nv_wr32(priv, 0x400108, trap);
+ }
}
-static int
-nve0_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+static void
+nve0_graph_intr(struct nouveau_subdev *subdev)
{
- return 0;
+ struct nvc0_graph_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u64 inst = (u64)(nv_rd32(priv, 0x409b00) & 0x0fffffff) << 12;
+ u32 stat = nv_rd32(priv, 0x400100);
+ u32 addr = nv_rd32(priv, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(priv, 0x400708);
+ u32 code = nv_rd32(priv, 0x400110);
+ u32 class = nv_rd32(priv, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ handle = nouveau_engctx_lookup_class(engine, inst, class);
+ if (!handle || nv_call(handle->object, mthd, data)) {
+ nv_error(priv, "ILLEGAL_MTHD ch 0x%010llx "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ }
+ nouveau_engctx_handle_put(handle);
+ nv_wr32(priv, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ nv_error(priv, "ILLEGAL_CLASS ch 0x%010llx subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ nv_wr32(priv, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ nv_error(priv, "DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch 0x%010llx subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ inst, subc, class, mthd, data);
+ nv_wr32(priv, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ nve0_graph_trap_isr(priv, inst);
+ nv_wr32(priv, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nve0_graph_ctxctl_isr(priv);
+ nv_wr32(priv, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ nv_error(priv, "unknown stat 0x%08x\n", stat);
+ nv_wr32(priv, 0x400100, stat);
+ }
+
+ nv_wr32(priv, 0x400500, 0x00010001);
}
static int
-nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
+ struct nvc0_graph_priv *priv;
+ int ret, i;
+
+ ret = nouveau_graph_create(parent, engine, oclass, false, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x18001000;
+ nv_subdev(priv)->intr = nve0_graph_intr;
+ nv_engine(priv)->cclass = &nve0_graph_cclass;
+ nv_engine(priv)->sclass = nve0_graph_sclass;
+
+ nv_info(priv, "using external firmware\n");
+ if (nvc0_graph_ctor_fw(priv, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
+ return -EINVAL;
+ priv->firmware = true;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(parent, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(priv, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(priv, 0x409604) & 0x001f0000) >> 16;
+ for (i = 0; i < priv->gpc_nr; i++) {
+ priv->tpc_nr[i] = nv_rd32(priv, GPC_UNIT(i, 0x2608));
+ priv->tpc_total += priv->tpc_nr[i];
+ }
+
+ switch (nv_device(priv)->chipset) {
+ case 0xe4:
+ if (priv->tpc_total == 8)
+ priv->magic_not_rop_nr = 3;
+ else
+ if (priv->tpc_total == 7)
+ priv->magic_not_rop_nr = 1;
+ break;
+ case 0xe7:
+ priv->magic_not_rop_nr = 1;
+ break;
+ default:
+ break;
+ }
+
return 0;
}
static void
-nve0_graph_init_obj418880(struct drm_device *dev)
+nve0_graph_init_obj418880(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int i;
- nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x08a4), 0x00000000);
for (i = 0; i < 4; i++)
- nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
- nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
+ nv_wr32(priv, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(priv, GPC_BCAST(0x08b4), priv->unk4188b4->addr >> 8);
+ nv_wr32(priv, GPC_BCAST(0x08b8), priv->unk4188b8->addr >> 8);
}
static void
-nve0_graph_init_regs(struct drm_device *dev)
+nve0_graph_init_regs(struct nvc0_graph_priv *priv)
{
- nv_wr32(dev, 0x400080, 0x003083c2);
- nv_wr32(dev, 0x400088, 0x0001ffe7);
- nv_wr32(dev, 0x40008c, 0x00000000);
- nv_wr32(dev, 0x400090, 0x00000030);
- nv_wr32(dev, 0x40013c, 0x003901f7);
- nv_wr32(dev, 0x400140, 0x00000100);
- nv_wr32(dev, 0x400144, 0x00000000);
- nv_wr32(dev, 0x400148, 0x00000110);
- nv_wr32(dev, 0x400138, 0x00000000);
- nv_wr32(dev, 0x400130, 0x00000000);
- nv_wr32(dev, 0x400134, 0x00000000);
- nv_wr32(dev, 0x400124, 0x00000002);
+ nv_wr32(priv, 0x400080, 0x003083c2);
+ nv_wr32(priv, 0x400088, 0x0001ffe7);
+ nv_wr32(priv, 0x40008c, 0x00000000);
+ nv_wr32(priv, 0x400090, 0x00000030);
+ nv_wr32(priv, 0x40013c, 0x003901f7);
+ nv_wr32(priv, 0x400140, 0x00000100);
+ nv_wr32(priv, 0x400144, 0x00000000);
+ nv_wr32(priv, 0x400148, 0x00000110);
+ nv_wr32(priv, 0x400138, 0x00000000);
+ nv_wr32(priv, 0x400130, 0x00000000);
+ nv_wr32(priv, 0x400134, 0x00000000);
+ nv_wr32(priv, 0x400124, 0x00000002);
}
static void
-nve0_graph_init_units(struct drm_device *dev)
+nve0_graph_init_units(struct nvc0_graph_priv *priv)
{
- nv_wr32(dev, 0x409ffc, 0x00000000);
- nv_wr32(dev, 0x409c14, 0x00003e3e);
- nv_wr32(dev, 0x409c24, 0x000f0000);
-
- nv_wr32(dev, 0x404000, 0xc0000000);
- nv_wr32(dev, 0x404600, 0xc0000000);
- nv_wr32(dev, 0x408030, 0xc0000000);
- nv_wr32(dev, 0x404490, 0xc0000000);
- nv_wr32(dev, 0x406018, 0xc0000000);
- nv_wr32(dev, 0x407020, 0xc0000000);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x405844, 0x00ffffff);
-
- nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+ nv_wr32(priv, 0x409ffc, 0x00000000);
+ nv_wr32(priv, 0x409c14, 0x00003e3e);
+ nv_wr32(priv, 0x409c24, 0x000f0000);
+
+ nv_wr32(priv, 0x404000, 0xc0000000);
+ nv_wr32(priv, 0x404600, 0xc0000000);
+ nv_wr32(priv, 0x408030, 0xc0000000);
+ nv_wr32(priv, 0x404490, 0xc0000000);
+ nv_wr32(priv, 0x406018, 0xc0000000);
+ nv_wr32(priv, 0x407020, 0xc0000000);
+ nv_wr32(priv, 0x405840, 0xc0000000);
+ nv_wr32(priv, 0x405844, 0x00ffffff);
+
+ nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(priv, 0x419eb4, 0x00001000, 0x00001000);
}
static void
-nve0_graph_init_gpc_0(struct drm_device *dev)
+nve0_graph_init_gpc_0(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
u32 data[TPC_MAX / 8];
u8 tpcnr[GPC_MAX];
int i, gpc, tpc;
- nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
+ nv_wr32(priv, GPC_UNIT(0, 0x3018), 0x00000001);
memset(data, 0x00, sizeof(data));
memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
@@ -144,164 +318,143 @@ nve0_graph_init_gpc_0(struct drm_device *dev)
data[i / 8] |= tpc << ((i % 8) * 4);
}
- nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
- nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
- nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
- nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+ nv_wr32(priv, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(priv, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(priv, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(priv, GPC_BCAST(0x098c), data[3]);
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
priv->tpc_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0918), magicgpc918);
}
- nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+ nv_wr32(priv, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(priv, GPC_BCAST(0x08ac), nv_rd32(priv, 0x100800));
}
static void
-nve0_graph_init_gpc_1(struct drm_device *dev)
+nve0_graph_init_gpc_1(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int gpc, tpc;
for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x0824), 0xc0000000);
for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(priv, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
}
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(priv, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
}
}
static void
-nve0_graph_init_rop(struct drm_device *dev)
+nve0_graph_init_rop(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
int rop;
for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
- }
-}
-
-static void
-nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
- struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
-{
- int i;
-
- nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
- for (i = 0; i < data->size / 4; i++)
- nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
-
- nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
- for (i = 0; i < code->size / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, fuc_base + 0x0188, i >> 6);
- nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(priv, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(priv, ROP_UNIT(rop, 0x208), 0xffffffff);
}
}
static int
-nve0_graph_init_ctxctl(struct drm_device *dev)
+nve0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
u32 r000260;
/* load fuc microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
- nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
- nv_wr32(dev, 0x000260, r000260);
+ r000260 = nv_mask(priv, 0x000260, 0x00000001, 0x00000000);
+ nvc0_graph_init_fw(priv, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nvc0_graph_init_fw(priv, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(priv, 0x000260, r000260);
/* start both of them running */
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x41a10c, 0x00000000);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x41a100, 0x00000002);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
- NV_INFO(dev, "0x409800 wait failed\n");
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x7fffffff);
- nv_wr32(dev, 0x409504, 0x00000021);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000010);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x41a10c, 0x00000000);
+ nv_wr32(priv, 0x40910c, 0x00000000);
+ nv_wr32(priv, 0x41a100, 0x00000002);
+ nv_wr32(priv, 0x409100, 0x00000002);
+ if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
+ nv_error(priv, "0x409800 wait failed\n");
+
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x7fffffff);
+ nv_wr32(priv, 0x409504, 0x00000021);
+
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000010);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x10 timeout\n");
return -EBUSY;
}
- priv->size = nv_rd32(dev, 0x409800);
+ priv->size = nv_rd32(priv, 0x409800);
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000016);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000016);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x16 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000025);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ nv_wr32(priv, 0x409840, 0xffffffff);
+ nv_wr32(priv, 0x409500, 0x00000000);
+ nv_wr32(priv, 0x409504, 0x00000025);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x25 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000030);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
+ nv_wr32(priv, 0x409800, 0x00000000);
+ nv_wr32(priv, 0x409500, 0x00000001);
+ nv_wr32(priv, 0x409504, 0x00000030);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x30 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409810, 0xb00095c8);
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000031);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
+ nv_wr32(priv, 0x409810, 0xb00095c8);
+ nv_wr32(priv, 0x409800, 0x00000000);
+ nv_wr32(priv, 0x409500, 0x00000001);
+ nv_wr32(priv, 0x409504, 0x00000031);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x31 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409810, 0x00080420);
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000032);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
+ nv_wr32(priv, 0x409810, 0x00080420);
+ nv_wr32(priv, 0x409800, 0x00000000);
+ nv_wr32(priv, 0x409500, 0x00000001);
+ nv_wr32(priv, 0x409504, 0x00000032);
+ if (!nv_wait_ne(priv, 0x409800, 0xffffffff, 0x00000000)) {
+ nv_error(priv, "fuc09 req 0x32 timeout\n");
return -EBUSY;
}
- nv_wr32(dev, 0x409614, 0x00000070);
- nv_wr32(dev, 0x409614, 0x00000770);
- nv_wr32(dev, 0x40802c, 0x00000001);
+ nv_wr32(priv, 0x409614, 0x00000070);
+ nv_wr32(priv, 0x409614, 0x00000770);
+ nv_wr32(priv, 0x40802c, 0x00000001);
if (priv->data == NULL) {
- int ret = nve0_grctx_generate(dev);
+ int ret = nve0_grctx_generate(priv);
if (ret) {
- NV_ERROR(dev, "PGRAPH: failed to construct context\n");
+ nv_error(priv, "failed to construct context\n");
return ret;
}
@@ -312,325 +465,53 @@ nve0_graph_init_ctxctl(struct drm_device *dev)
}
static int
-nve0_graph_init(struct drm_device *dev, int engine)
+nve0_graph_init(struct nouveau_object *object)
{
+ struct nvc0_graph_priv *priv = (void *)object;
int ret;
reset:
- nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
- nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
- nve0_graph_init_obj418880(dev);
- nve0_graph_init_regs(dev);
- nve0_graph_init_gpc_0(dev);
-
- nv_wr32(dev, 0x400500, 0x00010001);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
-
- nve0_graph_init_units(dev);
- nve0_graph_init_gpc_1(dev);
- nve0_graph_init_rop(dev);
-
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400118, 0xffffffff);
- nv_wr32(dev, 0x400130, 0xffffffff);
- nv_wr32(dev, 0x40011c, 0xffffffff);
- nv_wr32(dev, 0x400134, 0xffffffff);
- nv_wr32(dev, 0x400054, 0x34ce3464);
-
- ret = nve0_graph_init_ctxctl(dev);
- if (ret) {
- if (ret == 1)
- goto reset;
- return ret;
- }
-
- return 0;
-}
-
-int
-nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->addr)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nve0_graph_ctxctl_isr(struct drm_device *dev)
-{
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- if (ustat & 0x00000001)
- NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
- if (ustat & 0x00080000)
- NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
- if (ustat & ~0x00080001)
- NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
- nve0_graph_ctxctl_debug(dev);
- nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nve0_graph_trap_isr(struct drm_device *dev, int chid)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- u32 trap = nv_rd32(dev, 0x400108);
- int rop;
-
- if (trap & 0x00000001) {
- u32 stat = nv_rd32(dev, 0x404000);
- NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
- nv_wr32(dev, 0x404000, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x00000001);
- trap &= ~0x00000001;
- }
-
- if (trap & 0x00000010) {
- u32 stat = nv_rd32(dev, 0x405840);
- NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x00000010);
- trap &= ~0x00000010;
- }
-
- if (trap & 0x02000000) {
- for (rop = 0; rop < priv->rop_nr; rop++) {
- u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
- u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
- NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
- rop, chid, statz, statc);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- }
- nv_wr32(dev, 0x400108, 0x02000000);
- trap &= ~0x02000000;
- }
-
- if (trap) {
- NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
- nv_wr32(dev, 0x400108, trap);
- }
-}
-
-static void
-nve0_graph_isr(struct drm_device *dev)
-{
- u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
- u32 chid = nve0_graph_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x400100);
- u32 addr = nv_rd32(dev, 0x400704);
- u32 mthd = (addr & 0x00003ffc);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 data = nv_rd32(dev, 0x400708);
- u32 code = nv_rd32(dev, 0x400110);
- u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
- if (stat & 0x00000010) {
- if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- nv_wr32(dev, 0x400100, 0x00000010);
- stat &= ~0x00000010;
- }
-
- if (stat & 0x00000020) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00000020);
- stat &= ~0x00000020;
- }
-
- if (stat & 0x00100000) {
- NV_INFO(dev, "PGRAPH: DATA_ERROR [");
- nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00100000);
- stat &= ~0x00100000;
- }
-
- if (stat & 0x00200000) {
- nve0_graph_trap_isr(dev, chid);
- nv_wr32(dev, 0x400100, 0x00200000);
- stat &= ~0x00200000;
- }
-
- if (stat & 0x00080000) {
- nve0_graph_ctxctl_isr(dev);
- nv_wr32(dev, 0x400100, 0x00080000);
- stat &= ~0x00080000;
- }
-
- if (stat) {
- NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
- nv_wr32(dev, 0x400100, stat);
- }
-
- nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
- struct nvc0_graph_fuc *fuc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- const struct firmware *fw;
- char f[32];
- int ret;
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
+ ret = nouveau_graph_init(&priv->base);
if (ret)
return ret;
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nve0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
-{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
-}
-
-static void
-nve0_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, engine);
-
- nve0_graph_destroy_fw(&priv->fuc409c);
- nve0_graph_destroy_fw(&priv->fuc409d);
- nve0_graph_destroy_fw(&priv->fuc41ac);
- nve0_graph_destroy_fw(&priv->fuc41ad);
-
- nouveau_irq_unregister(dev, 12);
-
- nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
- if (priv->data)
- kfree(priv->data);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(priv);
-}
-
-int
-nve0_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_graph_priv *priv;
- int ret, gpc, i;
- u32 kepler;
-
- kepler = nvc0_graph_class(dev);
- if (!kepler) {
- NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
- return 0;
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.destroy = nve0_graph_destroy;
- priv->base.init = nve0_graph_init;
- priv->base.fini = nve0_graph_fini;
- priv->base.context_new = nvc0_graph_context_new;
- priv->base.context_del = nvc0_graph_context_del;
- priv->base.object_new = nve0_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
- nouveau_irq_register(dev, 12, nve0_graph_isr);
-
- NV_INFO(dev, "PGRAPH: using external firmware\n");
- if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
- }
- priv->firmware = true;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
- if (ret)
- goto error;
+ nve0_graph_init_obj418880(priv);
+ nve0_graph_init_regs(priv);
+ nve0_graph_init_gpc_0(priv);
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
- if (ret)
- goto error;
+ nv_wr32(priv, 0x400500, 0x00010001);
+ nv_wr32(priv, 0x400100, 0xffffffff);
+ nv_wr32(priv, 0x40013c, 0xffffffff);
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(priv->unk4188b4, i, 0x00000010);
- nv_wo32(priv->unk4188b8, i, 0x00000010);
- }
+ nve0_graph_init_units(priv);
+ nve0_graph_init_gpc_1(priv);
+ nve0_graph_init_rop(priv);
- priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
- priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
- priv->tpc_total += priv->tpc_nr[gpc];
- }
-
- switch (dev_priv->chipset) {
- case 0xe4:
- if (priv->tpc_total == 8)
- priv->magic_not_rop_nr = 3;
- else
- if (priv->tpc_total == 7)
- priv->magic_not_rop_nr = 1;
- break;
- case 0xe7:
- priv->magic_not_rop_nr = 1;
- break;
- default:
- break;
- }
+ nv_wr32(priv, 0x400108, 0xffffffff);
+ nv_wr32(priv, 0x400138, 0xffffffff);
+ nv_wr32(priv, 0x400118, 0xffffffff);
+ nv_wr32(priv, 0x400130, 0xffffffff);
+ nv_wr32(priv, 0x40011c, 0xffffffff);
+ nv_wr32(priv, 0x400134, 0xffffffff);
+ nv_wr32(priv, 0x400054, 0x34ce3464);
- if (!priv->magic_not_rop_nr) {
- NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
- priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
- priv->tpc_nr[3], priv->rop_nr);
- priv->magic_not_rop_nr = 0x00;
+ ret = nve0_graph_init_ctxctl(priv);
+ if (ret) {
+ if (ret == 1)
+ goto reset;
+ return ret;
}
- NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
- NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
- NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
- NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
- NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
return 0;
-
-error:
- nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
- return ret;
}
+
+struct nouveau_oclass
+nve0_graph_oclass = {
+ .handle = NV_ENGINE(GR, 0xe0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nve0_graph_ctor,
+ .dtor = nvc0_graph_dtor,
+ .init = nve0_graph_init,
+ .fini = _nouveau_graph_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
new file mode 100644
index 000000000000..9c715a25cecb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -0,0 +1,269 @@
+#ifndef __NOUVEAU_GRAPH_REGS_H__
+#define __NOUVEAU_GRAPH_REGS_H__
+
+#define NV04_PGRAPH_DEBUG_0 0x00400080
+#define NV04_PGRAPH_DEBUG_1 0x00400084
+#define NV04_PGRAPH_DEBUG_2 0x00400088
+#define NV04_PGRAPH_DEBUG_3 0x0040008c
+#define NV10_PGRAPH_DEBUG_4 0x00400090
+#define NV03_PGRAPH_INTR 0x00400100
+#define NV03_PGRAPH_NSTATUS 0x00400104
+# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
+# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
+# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
+# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
+# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
+# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
+# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
+# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
+#define NV03_PGRAPH_NSOURCE 0x00400108
+# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
+# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
+# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
+# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
+# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
+# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
+# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
+# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
+# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
+# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
+# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
+# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
+# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
+# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
+# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
+# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
+# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
+#define NV03_PGRAPH_INTR_EN 0x00400140
+#define NV40_PGRAPH_INTR_EN 0x0040013C
+# define NV_PGRAPH_INTR_NOTIFY (1<<0)
+# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
+# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
+# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
+# define NV_PGRAPH_INTR_ERROR (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL 0x00400144
+#define NV10_PGRAPH_CTX_USER 0x00400148
+#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
+#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
+ + 0x4*(i) + 0x20*(j))
+#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL 0x00400170
+#define NV04_PGRAPH_CTX_USER 0x00400174
+#define NV04_PGRAPH_CTX_CACHE1 0x00400180
+#define NV03_PGRAPH_CTX_CONTROL 0x00400190
+#define NV03_PGRAPH_CTX_USER 0x00400194
+#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304 0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310 0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
+#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
+#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
+#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
+#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
+#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
+#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
+#define NV03_PGRAPH_ABS_X_RAM 0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
+#define NV03_PGRAPH_X_MISC 0x00400500
+#define NV03_PGRAPH_Y_MISC 0x00400504
+#define NV04_PGRAPH_VALID1 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
+#define NV04_PGRAPH_MISC24_0 0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
+#define NV03_PGRAPH_CLIPX_0 0x00400524
+#define NV03_PGRAPH_CLIPX_1 0x00400528
+#define NV03_PGRAPH_CLIPY_0 0x0040052C
+#define NV03_PGRAPH_CLIPY_1 0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
+#define NV04_PGRAPH_MISC24_1 0x00400570
+#define NV04_PGRAPH_MISC24_2 0x00400574
+#define NV04_PGRAPH_VALID2 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1 0x00400580
+#define NV04_PGRAPH_PASSTHRU_2 0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
+#define NV04_PGRAPH_FORMAT_0 0x004005A8
+#define NV04_PGRAPH_FORMAT_1 0x004005AC
+#define NV04_PGRAPH_FILTER_0 0x004005B0
+#define NV04_PGRAPH_FILTER_1 0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0 0x00400600
+#define NV04_PGRAPH_ROP3 0x00400604
+#define NV04_PGRAPH_BETA_AND 0x00400608
+#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV04_PGRAPH_FORMATS 0x00400618
+#define NV10_PGRAPH_DEBUG_2 0x00400620
+#define NV04_PGRAPH_BOFFSET0 0x00400640
+#define NV04_PGRAPH_BOFFSET1 0x00400644
+#define NV04_PGRAPH_BOFFSET2 0x00400648
+#define NV04_PGRAPH_BOFFSET3 0x0040064C
+#define NV04_PGRAPH_BOFFSET4 0x00400650
+#define NV04_PGRAPH_BOFFSET5 0x00400654
+#define NV04_PGRAPH_BBASE0 0x00400658
+#define NV04_PGRAPH_BBASE1 0x0040065C
+#define NV04_PGRAPH_BBASE2 0x00400660
+#define NV04_PGRAPH_BBASE3 0x00400664
+#define NV04_PGRAPH_BBASE4 0x00400668
+#define NV04_PGRAPH_BBASE5 0x0040066C
+#define NV04_PGRAPH_BPITCH0 0x00400670
+#define NV04_PGRAPH_BPITCH1 0x00400674
+#define NV04_PGRAPH_BPITCH2 0x00400678
+#define NV04_PGRAPH_BPITCH3 0x0040067C
+#define NV04_PGRAPH_BPITCH4 0x00400680
+#define NV04_PGRAPH_BLIMIT0 0x00400684
+#define NV04_PGRAPH_BLIMIT1 0x00400688
+#define NV04_PGRAPH_BLIMIT2 0x0040068C
+#define NV04_PGRAPH_BLIMIT3 0x00400690
+#define NV04_PGRAPH_BLIMIT4 0x00400694
+#define NV04_PGRAPH_BLIMIT5 0x00400698
+#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
+#define NV03_PGRAPH_STATUS 0x004006B0
+#define NV04_PGRAPH_STATUS 0x00400700
+# define NV40_PGRAPH_STATUS_SYNC_STALL 0x00004000
+#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
+#define NV04_PGRAPH_SURFACE 0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
+#define NV04_PGRAPH_STATE 0x00400710
+#define NV10_PGRAPH_SURFACE 0x00400710
+#define NV04_PGRAPH_NOTIFY 0x00400714
+#define NV10_PGRAPH_STATE 0x00400714
+#define NV10_PGRAPH_NOTIFY 0x00400718
+
+#define NV04_PGRAPH_FIFO 0x00400720
+
+#define NV04_PGRAPH_BPIXEL 0x00400724
+#define NV10_PGRAPH_RDI_INDEX 0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
+#define NV10_PGRAPH_RDI_DATA 0x00400754
+#define NV04_PGRAPH_DMA_PITCH 0x00400760
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
+#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
+#define NV10_PGRAPH_DMA_PITCH 0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
+#define NV04_PGRAPH_PATT_COLOR0 0x00400800
+#define NV04_PGRAPH_PATT_COLOR1 0x00400804
+#define NV04_PGRAPH_PATTERN 0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV04_PGRAPH_CHROMA 0x00400814
+#define NV04_PGRAPH_CONTROL0 0x00400818
+#define NV04_PGRAPH_CONTROL1 0x0040081C
+#define NV04_PGRAPH_CONTROL2 0x00400820
+#define NV04_PGRAPH_BLEND 0x00400824
+#define NV04_PGRAPH_STORED_FMT 0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
+#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
+#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM 0x00400D00
+#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM 0x00400D40
+#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
+#define NV10_PGRAPH_XFMODE0 0x00400F40
+#define NV10_PGRAPH_XFMODE1 0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
+#define NV10_PGRAPH_PIPE_DATA 0x00400F54
+#define NV04_PGRAPH_DMA_START_0 0x00401000
+#define NV04_PGRAPH_DMA_START_1 0x00401004
+#define NV04_PGRAPH_DMA_LENGTH 0x00401008
+#define NV04_PGRAPH_DMA_MISC 0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0 0x00401020
+#define NV04_PGRAPH_DMA_DATA_1 0x00401024
+#define NV04_PGRAPH_DMA_RM 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index a0258c766850..7a1bc7641b58 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,159 +22,62 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include <engine/fifo.h>
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/handle.h>
+#include <core/engine/graph/nv40.h>
-struct nv31_mpeg_engine {
- struct nouveau_exec_engine base;
- atomic_t refcount;
-};
-
-
-static int
-nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
-
- if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
- return -EBUSY;
-
- chan->engctx[engine] = (void *)0xdeadcafe;
- return 0;
-}
-
-static void
-nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
- atomic_dec(&pmpeg->refcount);
- chan->engctx[engine] = NULL;
-}
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
-static int
-nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx = NULL;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
+#include <engine/mpeg.h>
- ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ctx, 0x78, 0x02001ec1);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
- nv_wr32(dev, 0x00330c, ctx->addr >> 4);
- nv_wo32(chan->ramfc, 0x54, ctx->addr >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+struct nv31_mpeg_priv {
+ struct nouveau_mpeg base;
+ atomic_t refcount;
+};
- chan->engctx[engine] = ctx;
- return 0;
-}
+struct nv31_mpeg_chan {
+ struct nouveau_object base;
+};
-static void
-nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst = 0x80000000 | (ctx->addr >> 4);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = NULL;
-}
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
static int
-nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+nv31_mpeg_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
+ struct nouveau_gpuobj *obj;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &obj);
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 20, 16, 0, &obj);
+ *pobject = nv_object(obj);
if (ret)
return ret;
- obj->engine = 2;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv31_mpeg_init(struct drm_device *dev, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
- int i;
-
- /* VPE init */
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
- nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
- nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
- for (i = 0; i < nvfb_tile_nr(dev); i++)
- pmpeg->base.set_tile_region(dev, i);
-
- /* PMPEG init */
- nv_wr32(dev, 0x00b32c, 0x00000000);
- nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
- nv_wr32(dev, 0x00b300, 0x02001ec1);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(dev, 0x00b100, 0xffffffff);
- nv_wr32(dev, 0x00b140, 0xffffffff);
-
- if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
- /*XXX: context save? */
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x00b140, 0x00000000);
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
static int
-nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+nv31_mpeg_mthd_dma(struct nouveau_object *object, u32 mthd, void *arg, u32 len)
{
- struct drm_device *dev = chan->dev;
- u32 inst = data << 4;
- u32 dma0 = nv_ri32(dev, inst + 0);
- u32 dma1 = nv_ri32(dev, inst + 4);
- u32 dma2 = nv_ri32(dev, inst + 8);
+ struct nouveau_instmem *imem = nouveau_instmem(object);
+ struct nv31_mpeg_priv *priv = (void *)object->engine;
+ u32 inst = *(u32 *)arg << 4;
+ u32 dma0 = nv_ro32(imem, inst + 0);
+ u32 dma1 = nv_ro32(imem, inst + 4);
+ u32 dma2 = nv_ro32(imem, inst + 8);
u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
u32 size = dma1 + 1;
@@ -184,160 +87,215 @@ nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
if (mthd == 0x0190) {
/* DMA_CMD */
- nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
- nv_wr32(dev, 0x00b334, base);
- nv_wr32(dev, 0x00b324, size);
+ nv_mask(priv, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+ nv_wr32(priv, 0x00b334, base);
+ nv_wr32(priv, 0x00b324, size);
} else
if (mthd == 0x01a0) {
/* DMA_DATA */
- nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
- nv_wr32(dev, 0x00b360, base);
- nv_wr32(dev, 0x00b364, size);
+ nv_mask(priv, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+ nv_wr32(priv, 0x00b360, base);
+ nv_wr32(priv, 0x00b364, size);
} else {
/* DMA_IMAGE, VRAM only */
if (dma0 & 0x000c0000)
return -EINVAL;
- nv_wr32(dev, 0x00b370, base);
- nv_wr32(dev, 0x00b374, size);
+ nv_wr32(priv, 0x00b370, base);
+ nv_wr32(priv, 0x00b374, size);
}
return 0;
}
+struct nouveau_ofuncs
+nv31_mpeg_ofuncs = {
+ .ctor = nv31_mpeg_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
+
+struct nouveau_omthds
+nv31_mpeg_omthds[] = {
+ { 0x0190, nv31_mpeg_mthd_dma },
+ { 0x01a0, nv31_mpeg_mthd_dma },
+ { 0x01b0, nv31_mpeg_mthd_dma },
+ {}
+};
+
+struct nouveau_oclass
+nv31_mpeg_sclass[] = {
+ { 0x3174, &nv31_mpeg_ofuncs, nv31_mpeg_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
static int
-nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
+nv31_mpeg_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx;
- unsigned long flags;
- int i;
-
- /* hardcode drm channel id on nv3x, so swmthd lookup works */
- if (dev_priv->card_type < NV_40)
- return 0;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
-
- ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
- if (ctx && ctx->addr == inst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
+ struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nv31_mpeg_chan *chan;
+ int ret;
+
+ if (!atomic_add_unless(&priv->refcount, 1, 1))
+ return -EBUSY;
+
+ ret = nouveau_object_create(parent, engine, oclass, 0, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ return 0;
}
static void
-nv31_vpe_set_tile_region(struct drm_device *dev, int i)
+nv31_mpeg_context_dtor(struct nouveau_object *object)
{
- struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
- nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
- nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
- nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
+ struct nv31_mpeg_priv *priv = (void *)object->engine;
+ struct nv31_mpeg_chan *chan = (void *)object;
+ atomic_dec(&priv->refcount);
+ nouveau_object_destroy(&chan->base);
}
-static void
-nv31_mpeg_isr(struct drm_device *dev)
+static struct nouveau_oclass
+nv31_mpeg_cclass = {
+ .handle = NV_ENGCTX(MPEG, 0x31),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv31_mpeg_context_ctor,
+ .dtor = nv31_mpeg_context_dtor,
+ .init = nouveau_object_init,
+ .fini = nouveau_object_fini,
+ },
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+void
+nv31_mpeg_tile_prog(struct nouveau_engine *engine, int i)
{
- u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
- u32 chid = nv31_mpeg_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x00b100);
- u32 type = nv_rd32(dev, 0x00b230);
- u32 mthd = nv_rd32(dev, 0x00b234);
- u32 data = nv_rd32(dev, 0x00b238);
+ struct nouveau_fb_tile *tile = &nouveau_fb(engine)->tile.region[i];
+ struct nv31_mpeg_priv *priv = (void *)engine;
+
+ nv_wr32(priv, 0x00b008 + (i * 0x10), tile->pitch);
+ nv_wr32(priv, 0x00b004 + (i * 0x10), tile->limit);
+ nv_wr32(priv, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+void
+nv31_mpeg_intr(struct nouveau_subdev *subdev)
+{
+ struct nv31_mpeg_priv *priv = (void *)subdev;
+ struct nouveau_engine *engine = nv_engine(subdev);
+ struct nouveau_handle *handle = NULL;
+ u32 inst = (nv_rd32(priv, 0x00b318) & 0x000fffff) << 4;
+ u32 stat = nv_rd32(priv, 0x00b100);
+ u32 type = nv_rd32(priv, 0x00b230);
+ u32 mthd = nv_rd32(priv, 0x00b234);
+ u32 data = nv_rd32(priv, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
- if (type == 0x00000020 && mthd == 0x0000) {
- nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
+ if (handle && type == 0x00000020 && mthd == 0x0000) {
+ nv_mask(priv, 0x00b308, 0x00000000, 0x00000000);
show &= ~0x01000000;
}
- if (type == 0x00000010) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
+ if (handle && type == 0x00000010) {
+ handle = nouveau_engctx_lookup_class(engine, inst, 0x3174);
+
+ if (handle && !nv_call(handle->object, mthd, data)) {
+ nouveau_engctx_handle_put(handle);
show &= ~0x01000000;
+ }
}
}
- nv_wr32(dev, 0x00b100, stat);
- nv_wr32(dev, 0x00b230, 0x00000001);
+ nv_wr32(priv, 0x00b100, stat);
+ nv_wr32(priv, 0x00b230, 0x00000001);
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst, stat, type, mthd, data);
+ if (show) {
+ nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ inst, stat, type, mthd, data);
}
}
-static void
-nv31_vpe_isr(struct drm_device *dev)
+static int
+nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- if (nv_rd32(dev, 0x00b100))
- nv31_mpeg_isr(dev);
+ struct nv31_mpeg_priv *priv;
+ int ret;
- if (nv_rd32(dev, 0x00b800)) {
- u32 stat = nv_rd32(dev, 0x00b800);
- NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(dev, 0xb800, stat);
- }
+ ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nv31_mpeg_intr;
+ nv_engine(priv)->cclass = &nv31_mpeg_cclass;
+ nv_engine(priv)->sclass = nv31_mpeg_sclass;
+ nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+ return 0;
}
-static void
-nv31_mpeg_destroy(struct drm_device *dev, int engine)
+int
+nv31_mpeg_init(struct nouveau_object *object)
{
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
+ struct nouveau_engine *engine = nv_engine(object->engine);
+ struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ int ret, i;
- nouveau_irq_unregister(dev, 0);
+ ret = nouveau_mpeg_init(&priv->base);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_DEL(dev, MPEG);
- kfree(pmpeg);
-}
+ /* VPE init */
+ nv_wr32(priv, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+ nv_wr32(priv, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-int
-nv31_mpeg_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv31_mpeg_engine *pmpeg;
-
- pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
- if (!pmpeg)
- return -ENOMEM;
- atomic_set(&pmpeg->refcount, 0);
-
- pmpeg->base.destroy = nv31_mpeg_destroy;
- pmpeg->base.init = nv31_mpeg_init;
- pmpeg->base.fini = nv31_mpeg_fini;
- if (dev_priv->card_type < NV_40) {
- pmpeg->base.context_new = nv31_mpeg_context_new;
- pmpeg->base.context_del = nv31_mpeg_context_del;
- } else {
- pmpeg->base.context_new = nv40_mpeg_context_new;
- pmpeg->base.context_del = nv40_mpeg_context_del;
+ for (i = 0; i < pfb->tile.regions; i++)
+ engine->tile_prog(engine, i);
+
+ /* PMPEG init */
+ nv_wr32(priv, 0x00b32c, 0x00000000);
+ nv_wr32(priv, 0x00b314, 0x00000100);
+ nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+ nv_wr32(priv, 0x00b300, 0x02001ec1);
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+ nv_wr32(priv, 0x00b100, 0xffffffff);
+ nv_wr32(priv, 0x00b140, 0xffffffff);
+
+ if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+ nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+ return -EBUSY;
}
- pmpeg->base.object_new = nv31_mpeg_object_new;
-
- /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
- * all VPE engines, for this driver's purposes the PMPEG engine
- * will be treated as the "master" and handle the global VPE
- * bits too
- */
- pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
- nouveau_irq_register(dev, 0, nv31_vpe_isr);
-
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x3174, MPEG);
- NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
-
-#if 0
- NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
- NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
- return 0;
+ return 0;
}
+
+struct nouveau_oclass
+nv31_mpeg_oclass = {
+ .handle = NV_ENGINE(MPEG, 0x31),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv31_mpeg_ctor,
+ .dtor = _nouveau_mpeg_dtor,
+ .init = nv31_mpeg_init,
+ .fini = _nouveau_mpeg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
new file mode 100644
index 000000000000..12418574efea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include <engine/mpeg.h>
+#include <engine/graph/nv40.h>
+
+struct nv40_mpeg_priv {
+ struct nouveau_mpeg base;
+};
+
+struct nv40_mpeg_chan {
+ struct nouveau_mpeg base;
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static int
+nv40_mpeg_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv40_mpeg_chan *chan;
+ int ret;
+
+ ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL,
+ 264 * 4, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+nv40_mpeg_context_fini(struct nouveau_object *object, bool suspend)
+{
+
+ struct nv40_mpeg_priv *priv = (void *)object->engine;
+ struct nv40_mpeg_chan *chan = (void *)object;
+ u32 inst = 0x80000000 | nv_gpuobj(chan)->addr >> 4;
+
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000000);
+ if (nv_rd32(priv, 0x00b318) == inst)
+ nv_mask(priv, 0x00b318, 0x80000000, 0x00000000);
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+ return 0;
+}
+
+static struct nouveau_oclass
+nv40_mpeg_cclass = {
+ .handle = NV_ENGCTX(MPEG, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_mpeg_context_ctor,
+ .dtor = _nouveau_mpeg_context_dtor,
+ .init = _nouveau_mpeg_context_init,
+ .fini = nv40_mpeg_context_fini,
+ .rd32 = _nouveau_mpeg_context_rd32,
+ .wr32 = _nouveau_mpeg_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static void
+nv40_mpeg_intr(struct nouveau_subdev *subdev)
+{
+ struct nv40_mpeg_priv *priv = (void *)subdev;
+ u32 stat;
+
+ if ((stat = nv_rd32(priv, 0x00b100)))
+ nv31_mpeg_intr(subdev);
+
+ if ((stat = nv_rd32(priv, 0x00b800))) {
+ nv_error(priv, "PMSRCH 0x%08x\n", stat);
+ nv_wr32(priv, 0x00b800, stat);
+ }
+}
+
+static int
+nv40_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv40_mpeg_priv *priv;
+ int ret;
+
+ ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nv40_mpeg_intr;
+ nv_engine(priv)->cclass = &nv40_mpeg_cclass;
+ nv_engine(priv)->sclass = nv31_mpeg_sclass;
+ nv_engine(priv)->tile_prog = nv31_mpeg_tile_prog;
+ return 0;
+}
+
+struct nouveau_oclass
+nv40_mpeg_oclass = {
+ .handle = NV_ENGINE(MPEG, 0x40),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv40_mpeg_ctor,
+ .dtor = _nouveau_mpeg_dtor,
+ .init = nv31_mpeg_init,
+ .fini = _nouveau_mpeg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 4e3292ed80c1..8678a9996d57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,218 +22,219 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
-struct nv50_mpeg_engine {
- struct nouveau_exec_engine base;
-};
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
-static inline u32
-CTX_PTR(struct drm_device *dev, u32 offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+#include <engine/mpeg.h>
- if (dev_priv->chipset == 0x50)
- offset += 0x0260;
- else
- offset += 0x0060;
+struct nv50_mpeg_priv {
+ struct nouveau_mpeg base;
+};
- return offset;
-}
+struct nv50_mpeg_chan {
+ struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
static int
-nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
+nv50_mpeg_object_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx = NULL;
+ struct nouveau_gpuobj *obj;
int ret;
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
+ ret = nouveau_gpuobj_create(parent, engine, oclass, 0, parent,
+ 16, 16, 0, &obj);
+ *pobject = nv_object(obj);
if (ret)
return ret;
- nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
- nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->addr + ctx->size - 1);
- nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->addr);
- nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
- nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
- nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
-
- nv_wo32(ctx, 0x70, 0x00801ec1);
- nv_wo32(ctx, 0x7c, 0x0000037c);
- nvimem_flush(dev);
-
- chan->engctx[engine] = ctx;
+ nv_wo32(obj, 0x00, nv_mclass(obj));
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
return 0;
}
-static void
-nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- int i;
+struct nouveau_ofuncs
+nv50_mpeg_ofuncs = {
+ .ctor = nv50_mpeg_object_ctor,
+ .dtor = _nouveau_gpuobj_dtor,
+ .init = _nouveau_gpuobj_init,
+ .fini = _nouveau_gpuobj_fini,
+ .rd32 = _nouveau_gpuobj_rd32,
+ .wr32 = _nouveau_gpuobj_wr32,
+};
- for (i = 0x00; i <= 0x14; i += 4)
- nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
+static struct nouveau_oclass
+nv50_mpeg_sclass[] = {
+ { 0x3174, &nv50_mpeg_ofuncs },
+ {}
+};
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = NULL;
-}
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
-static int
-nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
+int
+nv50_mpeg_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
+ struct nouveau_bar *bar = nouveau_bar(parent);
+ struct nv50_mpeg_chan *chan;
int ret;
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ ret = nouveau_mpeg_context_create(parent, engine, oclass, NULL, 128 * 4,
+ 0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
+ *pobject = nv_object(chan);
if (ret)
return ret;
- obj->engine = 2;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- nvimem_flush(dev);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-static void
-nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x08);
+ nv_wo32(chan, 0x0070, 0x00801ec1);
+ nv_wo32(chan, 0x007c, 0x0000037c);
+ bar->flush(bar);
+ return 0;
}
-static int
-nv50_mpeg_init(struct drm_device *dev, int engine)
-{
- nv_wr32(dev, 0x00b32c, 0x00000000);
- nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b0e0, 0x0000001a);
-
- nv_wr32(dev, 0x00b220, 0x00000044);
- nv_wr32(dev, 0x00b300, 0x00801ec1);
- nv_wr32(dev, 0x00b390, 0x00000000);
- nv_wr32(dev, 0x00b394, 0x00000000);
- nv_wr32(dev, 0x00b398, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(dev, 0x00b100, 0xffffffff);
- nv_wr32(dev, 0x00b140, 0xffffffff);
-
- if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
- return -EBUSY;
- }
+static struct nouveau_oclass
+nv50_mpeg_cclass = {
+ .handle = NV_ENGCTX(MPEG, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_mpeg_context_ctor,
+ .dtor = _nouveau_mpeg_context_dtor,
+ .init = _nouveau_mpeg_context_init,
+ .fini = _nouveau_mpeg_context_fini,
+ .rd32 = _nouveau_mpeg_context_rd32,
+ .wr32 = _nouveau_mpeg_context_wr32,
+ },
+};
- return 0;
-}
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
-static int
-nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+int
+nv50_mpeg_tlb_flush(struct nouveau_engine *engine)
{
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x00b140, 0x00000000);
+ nv50_vm_flush_engine(&engine->base, 0x08);
return 0;
}
-static void
-nv50_mpeg_isr(struct drm_device *dev)
+void
+nv50_mpeg_intr(struct nouveau_subdev *subdev)
{
- u32 stat = nv_rd32(dev, 0x00b100);
- u32 type = nv_rd32(dev, 0x00b230);
- u32 mthd = nv_rd32(dev, 0x00b234);
- u32 data = nv_rd32(dev, 0x00b238);
+ struct nv50_mpeg_priv *priv = (void *)subdev;
+ u32 stat = nv_rd32(priv, 0x00b100);
+ u32 type = nv_rd32(priv, 0x00b230);
+ u32 mthd = nv_rd32(priv, 0x00b234);
+ u32 data = nv_rd32(priv, 0x00b238);
u32 show = stat;
if (stat & 0x01000000) {
/* happens on initial binding of the object */
if (type == 0x00000020 && mthd == 0x0000) {
- nv_wr32(dev, 0x00b308, 0x00000100);
+ nv_wr32(priv, 0x00b308, 0x00000100);
show &= ~0x01000000;
}
}
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ if (show) {
+ nv_info(priv, "0x%08x 0x%08x 0x%08x 0x%08x\n",
stat, type, mthd, data);
}
- nv_wr32(dev, 0x00b100, stat);
- nv_wr32(dev, 0x00b230, 0x00000001);
- nv50_fb_vm_trap(dev, 1);
+ nv_wr32(priv, 0x00b100, stat);
+ nv_wr32(priv, 0x00b230, 0x00000001);
+ nv50_fb_trap(nouveau_fb(priv), 1);
}
static void
-nv50_vpe_isr(struct drm_device *dev)
+nv50_vpe_intr(struct nouveau_subdev *subdev)
{
- if (nv_rd32(dev, 0x00b100))
- nv50_mpeg_isr(dev);
+ struct nv50_mpeg_priv *priv = (void *)subdev;
- if (nv_rd32(dev, 0x00b800)) {
- u32 stat = nv_rd32(dev, 0x00b800);
- NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(dev, 0xb800, stat);
+ if (nv_rd32(priv, 0x00b100))
+ nv50_mpeg_intr(subdev);
+
+ if (nv_rd32(priv, 0x00b800)) {
+ u32 stat = nv_rd32(priv, 0x00b800);
+ nv_info(priv, "PMSRCH: 0x%08x\n", stat);
+ nv_wr32(priv, 0xb800, stat);
}
}
-static void
-nv50_mpeg_destroy(struct drm_device *dev, int engine)
+static int
+nv50_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
+ struct nv50_mpeg_priv *priv;
+ int ret;
- nouveau_irq_unregister(dev, 0);
+ ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_DEL(dev, MPEG);
- kfree(pmpeg);
+ nv_subdev(priv)->unit = 0x00400002;
+ nv_subdev(priv)->intr = nv50_vpe_intr;
+ nv_engine(priv)->cclass = &nv50_mpeg_cclass;
+ nv_engine(priv)->sclass = nv50_mpeg_sclass;
+ nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+ return 0;
}
int
-nv50_mpeg_create(struct drm_device *dev)
+nv50_mpeg_init(struct nouveau_object *object)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_mpeg_engine *pmpeg;
-
- pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
- if (!pmpeg)
- return -ENOMEM;
-
- pmpeg->base.destroy = nv50_mpeg_destroy;
- pmpeg->base.init = nv50_mpeg_init;
- pmpeg->base.fini = nv50_mpeg_fini;
- pmpeg->base.context_new = nv50_mpeg_context_new;
- pmpeg->base.context_del = nv50_mpeg_context_del;
- pmpeg->base.object_new = nv50_mpeg_object_new;
- pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
-
- if (dev_priv->chipset == 0x50) {
- nouveau_irq_register(dev, 0, nv50_vpe_isr);
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x3174, MPEG);
-#if 0
- NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
- NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
- } else {
- nouveau_irq_register(dev, 0, nv50_mpeg_isr);
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x8274, MPEG);
+ struct nv50_mpeg_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_mpeg_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x00b32c, 0x00000000);
+ nv_wr32(priv, 0x00b314, 0x00000100);
+ nv_wr32(priv, 0x00b0e0, 0x0000001a);
+
+ nv_wr32(priv, 0x00b220, 0x00000044);
+ nv_wr32(priv, 0x00b300, 0x00801ec1);
+ nv_wr32(priv, 0x00b390, 0x00000000);
+ nv_wr32(priv, 0x00b394, 0x00000000);
+ nv_wr32(priv, 0x00b398, 0x00000000);
+ nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
+
+ nv_wr32(priv, 0x00b100, 0xffffffff);
+ nv_wr32(priv, 0x00b140, 0xffffffff);
+
+ if (!nv_wait(priv, 0x00b200, 0x00000001, 0x00000000)) {
+ nv_error(priv, "timeout 0x%08x\n", nv_rd32(priv, 0x00b200));
+ return -EBUSY;
}
return 0;
-
}
+
+struct nouveau_oclass
+nv50_mpeg_oclass = {
+ .handle = NV_ENGINE(MPEG, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_mpeg_ctor,
+ .dtor = _nouveau_mpeg_dtor,
+ .init = nv50_mpeg_init,
+ .fini = _nouveau_mpeg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
new file mode 100644
index 000000000000..8f805b44d59e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv84.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+#include <subdev/timer.h>
+
+#include <engine/mpeg.h>
+
+struct nv84_mpeg_priv {
+ struct nouveau_mpeg base;
+};
+
+struct nv84_mpeg_chan {
+ struct nouveau_mpeg_chan base;
+};
+
+/*******************************************************************************
+ * MPEG object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_sclass[] = {
+ { 0x8274, &nv50_mpeg_ofuncs },
+ {}
+};
+
+/*******************************************************************************
+ * PMPEG context
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_mpeg_cclass = {
+ .handle = NV_ENGCTX(MPEG, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_mpeg_context_ctor,
+ .dtor = _nouveau_mpeg_context_dtor,
+ .init = _nouveau_mpeg_context_init,
+ .fini = _nouveau_mpeg_context_fini,
+ .rd32 = _nouveau_mpeg_context_rd32,
+ .wr32 = _nouveau_mpeg_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PMPEG engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv84_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_mpeg_priv *priv;
+ int ret;
+
+ ret = nouveau_mpeg_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_subdev(priv)->unit = 0x00000002;
+ nv_subdev(priv)->intr = nv50_mpeg_intr;
+ nv_engine(priv)->cclass = &nv84_mpeg_cclass;
+ nv_engine(priv)->sclass = nv84_mpeg_sclass;
+ nv_engine(priv)->tlb_flush = nv50_mpeg_tlb_flush;
+ return 0;
+}
+
+struct nouveau_oclass
+nv84_mpeg_oclass = {
+ .handle = NV_ENGINE(MPEG, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_mpeg_ctor,
+ .dtor = _nouveau_mpeg_dtor,
+ .init = nv50_mpeg_init,
+ .fini = _nouveau_mpeg_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 384de6deeeea..50e7e0da1981 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,56 +22,154 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
-struct nv98_ppp_engine {
- struct nouveau_exec_engine base;
+#include <engine/ppp.h>
+
+struct nv98_ppp_priv {
+ struct nouveau_ppp base;
+};
+
+struct nv98_ppp_chan {
+ struct nouveau_ppp_chan base;
};
+/*******************************************************************************
+ * PPP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv98_ppp_sclass[] = {
+ {},
+};
+
+/*******************************************************************************
+ * PPPP context
+ ******************************************************************************/
+
static int
-nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+nv98_ppp_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00000002))
- return 0;
+ struct nv98_ppp_chan *priv;
+ int ret;
+
+ ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
return 0;
}
+static void
+nv98_ppp_context_dtor(struct nouveau_object *object)
+{
+ struct nv98_ppp_chan *priv = (void *)object;
+ nouveau_ppp_context_destroy(&priv->base);
+}
+
static int
-nv98_ppp_init(struct drm_device *dev, int engine)
+nv98_ppp_context_init(struct nouveau_object *object)
{
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ struct nv98_ppp_chan *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_ppp_context_init(&priv->base);
+ if (ret)
+ return ret;
+
return 0;
}
+static int
+nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv98_ppp_chan *priv = (void *)object;
+ return nouveau_ppp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv98_ppp_cclass = {
+ .handle = NV_ENGCTX(PPP, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_ppp_context_ctor,
+ .dtor = nv98_ppp_context_dtor,
+ .init = nv98_ppp_context_init,
+ .fini = nv98_ppp_context_fini,
+ .rd32 = _nouveau_ppp_context_rd32,
+ .wr32 = _nouveau_ppp_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PPPP engine/subdev functions
+ ******************************************************************************/
+
static void
-nv98_ppp_destroy(struct drm_device *dev, int engine)
+nv98_ppp_intr(struct nouveau_subdev *subdev)
{
- struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+}
- NVOBJ_ENGINE_DEL(dev, PPP);
+static int
+nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv98_ppp_priv *priv;
+ int ret;
+
+ ret = nouveau_ppp_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- kfree(pppp);
+ nv_subdev(priv)->unit = 0x00400002;
+ nv_subdev(priv)->intr = nv98_ppp_intr;
+ nv_engine(priv)->cclass = &nv98_ppp_cclass;
+ nv_engine(priv)->sclass = nv98_ppp_sclass;
+ return 0;
}
-int
-nv98_ppp_create(struct drm_device *dev)
+static void
+nv98_ppp_dtor(struct nouveau_object *object)
{
- struct nv98_ppp_engine *pppp;
+ struct nv98_ppp_priv *priv = (void *)object;
+ nouveau_ppp_destroy(&priv->base);
+}
- pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
- if (!pppp)
- return -ENOMEM;
+static int
+nv98_ppp_init(struct nouveau_object *object)
+{
+ struct nv98_ppp_priv *priv = (void *)object;
+ int ret;
- pppp->base.destroy = nv98_ppp_destroy;
- pppp->base.init = nv98_ppp_init;
- pppp->base.fini = nv98_ppp_fini;
+ ret = nouveau_ppp_init(&priv->base);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
return 0;
}
+
+static int
+nv98_ppp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv98_ppp_priv *priv = (void *)object;
+ return nouveau_ppp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv98_ppp_oclass = {
+ .handle = NV_ENGINE(PPP, 0x98),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv98_ppp_ctor,
+ .dtor = nv98_ppp_dtor,
+ .init = nv98_ppp_init,
+ .fini = nv98_ppp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
new file mode 100644
index 000000000000..f48da7577cc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+#include <engine/fifo.h>
+
+struct nv04_software_priv {
+ struct nouveau_software base;
+};
+
+struct nv04_software_chan {
+ struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv04_software_set_ref(struct nouveau_object *object, u32 mthd,
+ void *data, u32 size)
+{
+ struct nouveau_object *channel = (void *)nv_engctx(object->parent);
+ struct nouveau_fifo_chan *fifo = (void *)channel->parent;
+ atomic_set(&fifo->refcnt, *(u32*)data);
+ return 0;
+}
+
+static int
+nv04_software_flip(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv04_software_chan *chan = (void *)nv_engctx(object->parent);
+ if (chan->base.flip)
+ return chan->base.flip(chan->base.flip_data);
+ return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv04_software_omthds[] = {
+ { 0x0150, nv04_software_set_ref },
+ { 0x0500, nv04_software_flip },
+ {}
+};
+
+static struct nouveau_oclass
+nv04_software_sclass[] = {
+ { 0x006e, &nouveau_object_ofuncs, nv04_software_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv04_software_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_software_chan *chan;
+ int ret;
+
+ ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv04_software_cclass = {
+ .handle = NV_ENGCTX(SW, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_software_context_ctor,
+ .dtor = _nouveau_software_context_dtor,
+ .init = _nouveau_software_context_init,
+ .fini = _nouveau_software_context_fini,
+ },
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv04_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv04_software_priv *priv;
+ int ret;
+
+ ret = nouveau_software_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nv04_software_cclass;
+ nv_engine(priv)->sclass = nv04_software_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nv04_software_oclass = {
+ .handle = NV_ENGINE(SW, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_software_ctor,
+ .dtor = _nouveau_software_dtor,
+ .init = _nouveau_software_init,
+ .fini = _nouveau_software_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
new file mode 100644
index 000000000000..46dada53d272
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+
+struct nv10_software_priv {
+ struct nouveau_software base;
+};
+
+struct nv10_software_chan {
+ struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv10_software_flip(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv10_software_chan *chan = (void *)nv_engctx(object->parent);
+ if (chan->base.flip)
+ return chan->base.flip(chan->base.flip_data);
+ return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv10_software_omthds[] = {
+ { 0x0500, nv10_software_flip },
+ {}
+};
+
+static struct nouveau_oclass
+nv10_software_sclass[] = {
+ { 0x016e, &nouveau_object_ofuncs, nv10_software_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv10_software_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv10_software_chan *chan;
+ int ret;
+
+ ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static struct nouveau_oclass
+nv10_software_cclass = {
+ .handle = NV_ENGCTX(SW, 0x04),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_software_context_ctor,
+ .dtor = _nouveau_software_context_dtor,
+ .init = _nouveau_software_context_init,
+ .fini = _nouveau_software_context_fini,
+ },
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv10_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv10_software_priv *priv;
+ int ret;
+
+ ret = nouveau_software_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nv10_software_cclass;
+ nv_engine(priv)->sclass = nv10_software_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nv10_software_oclass = {
+ .handle = NV_ENGINE(SW, 0x10),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv10_software_ctor,
+ .dtor = _nouveau_software_dtor,
+ .init = _nouveau_software_init,
+ .fini = _nouveau_software_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
new file mode 100644
index 000000000000..6b889713480d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+#include <core/namedb.h>
+#include <core/handle.h>
+#include <core/gpuobj.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nv50_software_priv {
+ struct nouveau_software base;
+};
+
+struct nv50_software_chan {
+ struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nv50_software_mthd_dma_vblsem(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nouveau_fifo_chan *fifo = (void *)nv_object(chan)->parent;
+ struct nouveau_handle *handle;
+ int ret = -EINVAL;
+
+ handle = nouveau_namedb_get(nv_namedb(fifo), *(u32 *)args);
+ if (!handle)
+ return -ENOENT;
+
+ if (nv_iclass(handle->object, NV_GPUOBJ_CLASS)) {
+ struct nouveau_gpuobj *gpuobj = nv_gpuobj(handle->object);
+ chan->base.vblank.ctxdma = gpuobj->node->offset >> 4;
+ ret = 0;
+ }
+ nouveau_namedb_put(handle);
+ return ret;
+}
+
+static int
+nv50_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ chan->base.vblank.offset = *(u32 *)args;
+ return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ chan->base.vblank.value = *(u32 *)args;
+ return 0;
+}
+
+static int
+nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nouveau_disp *disp = nouveau_disp(object);
+ unsigned long flags;
+ u32 crtc = *(u32 *)args;
+
+ if (crtc > 1)
+ return -EINVAL;
+
+ disp->vblank.get(disp->vblank.data, crtc);
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_add(&chan->base.vblank.head, &disp->vblank.list);
+ chan->base.vblank.crtc = crtc;
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ return 0;
+}
+
+static int
+nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
+ if (chan->base.flip)
+ return chan->base.flip(chan->base.flip_data);
+ return -EINVAL;
+}
+
+static struct nouveau_omthds
+nv50_software_omthds[] = {
+ { 0x018c, nv50_software_mthd_dma_vblsem },
+ { 0x0400, nv50_software_mthd_vblsem_offset },
+ { 0x0404, nv50_software_mthd_vblsem_value },
+ { 0x0408, nv50_software_mthd_vblsem_release },
+ { 0x0500, nv50_software_mthd_flip },
+ {}
+};
+
+static struct nouveau_oclass
+nv50_software_sclass[] = {
+ { 0x506e, &nouveau_object_ofuncs, nv50_software_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nv50_software_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_software_chan *chan;
+ int ret;
+
+ ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ return 0;
+}
+
+static struct nouveau_oclass
+nv50_software_cclass = {
+ .handle = NV_ENGCTX(SW, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_software_context_ctor,
+ .dtor = _nouveau_software_context_dtor,
+ .init = _nouveau_software_context_init,
+ .fini = _nouveau_software_context_fini,
+ },
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv50_software_priv *priv;
+ int ret;
+
+ ret = nouveau_software_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nv50_software_cclass;
+ nv_engine(priv)->sclass = nv50_software_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nv50_software_oclass = {
+ .handle = NV_ENGINE(SW, 0x50),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv50_software_ctor,
+ .dtor = _nouveau_software_dtor,
+ .init = _nouveau_software_init,
+ .fini = _nouveau_software_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
new file mode 100644
index 000000000000..e3be78f3a5d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
+
+#include <engine/software.h>
+#include <engine/disp.h>
+
+struct nvc0_software_priv {
+ struct nouveau_software base;
+};
+
+struct nvc0_software_chan {
+ struct nouveau_software_chan base;
+};
+
+/*******************************************************************************
+ * software object classes
+ ******************************************************************************/
+
+static int
+nvc0_software_mthd_vblsem_offset(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ u64 data = *(u32 *)args;
+ if (mthd == 0x0400) {
+ chan->base.vblank.offset &= 0x00ffffffffULL;
+ chan->base.vblank.offset |= data << 32;
+ } else {
+ chan->base.vblank.offset &= 0xff00000000ULL;
+ chan->base.vblank.offset |= data;
+ }
+ return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_value(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ chan->base.vblank.value = *(u32 *)args;
+ return 0;
+}
+
+static int
+nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ struct nouveau_disp *disp = nouveau_disp(object);
+ unsigned long flags;
+ u32 crtc = *(u32 *)args;
+
+ if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
+ return -EINVAL;
+
+ disp->vblank.get(disp->vblank.data, crtc);
+
+ spin_lock_irqsave(&disp->vblank.lock, flags);
+ list_add(&chan->base.vblank.head, &disp->vblank.list);
+ chan->base.vblank.crtc = crtc;
+ spin_unlock_irqrestore(&disp->vblank.lock, flags);
+ return 0;
+}
+
+static int
+nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
+ void *args, u32 size)
+{
+ struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
+ if (chan->base.flip)
+ return chan->base.flip(chan->base.flip_data);
+ return -EINVAL;
+}
+
+static struct nouveau_omthds
+nvc0_software_omthds[] = {
+ { 0x0400, nvc0_software_mthd_vblsem_offset },
+ { 0x0404, nvc0_software_mthd_vblsem_offset },
+ { 0x0408, nvc0_software_mthd_vblsem_value },
+ { 0x040c, nvc0_software_mthd_vblsem_release },
+ { 0x0500, nvc0_software_mthd_flip },
+ {}
+};
+
+static struct nouveau_oclass
+nvc0_software_sclass[] = {
+ { 0x906e, &nouveau_object_ofuncs, nvc0_software_omthds },
+ {}
+};
+
+/*******************************************************************************
+ * software context
+ ******************************************************************************/
+
+static int
+nvc0_software_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_software_chan *chan;
+ int ret;
+
+ ret = nouveau_software_context_create(parent, engine, oclass, &chan);
+ *pobject = nv_object(chan);
+ if (ret)
+ return ret;
+
+ chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
+ return 0;
+}
+
+static struct nouveau_oclass
+nvc0_software_cclass = {
+ .handle = NV_ENGCTX(SW, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_software_context_ctor,
+ .dtor = _nouveau_software_context_dtor,
+ .init = _nouveau_software_context_init,
+ .fini = _nouveau_software_context_fini,
+ },
+};
+
+/*******************************************************************************
+ * software engine/subdev functions
+ ******************************************************************************/
+
+static int
+nvc0_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nvc0_software_priv *priv;
+ int ret;
+
+ ret = nouveau_software_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
+
+ nv_engine(priv)->cclass = &nvc0_software_cclass;
+ nv_engine(priv)->sclass = nvc0_software_sclass;
+ return 0;
+}
+
+struct nouveau_oclass
+nvc0_software_oclass = {
+ .handle = NV_ENGINE(SW, 0xc0),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nvc0_software_ctor,
+ .dtor = _nouveau_software_dtor,
+ .init = _nouveau_software_init,
+ .fini = _nouveau_software_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index 5e164a684aec..dd23c80e5405 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Red Hat Inc.
+ * Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -22,61 +22,154 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include <core/ramht.h>
+#include <core/os.h>
+#include <core/class.h>
+#include <core/engctx.h>
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_vp.c...
- */
+#include <engine/vp.h>
+
+struct nv84_vp_priv {
+ struct nouveau_vp base;
+};
-struct nv84_vp_engine {
- struct nouveau_exec_engine base;
+struct nv84_vp_chan {
+ struct nouveau_vp_chan base;
};
+/*******************************************************************************
+ * VP object classes
+ ******************************************************************************/
+
+static struct nouveau_oclass
+nv84_vp_sclass[] = {
+ {},
+};
+
+/*******************************************************************************
+ * PVP context
+ ******************************************************************************/
+
static int
-nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+nv84_vp_context_ctor(struct nouveau_object *parent,
+ struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
{
- if (!(nv_rd32(dev, 0x000200) & 0x00020000))
- return 0;
+ struct nv84_vp_chan *priv;
+ int ret;
+
+ ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
+ 0, 0, 0, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
return 0;
}
+static void
+nv84_vp_context_dtor(struct nouveau_object *object)
+{
+ struct nv84_vp_chan *priv = (void *)object;
+ nouveau_vp_context_destroy(&priv->base);
+}
+
static int
-nv84_vp_init(struct drm_device *dev, int engine)
+nv84_vp_context_init(struct nouveau_object *object)
{
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+ struct nv84_vp_chan *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_vp_context_init(&priv->base);
+ if (ret)
+ return ret;
+
return 0;
}
+static int
+nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_vp_chan *priv = (void *)object;
+ return nouveau_vp_context_fini(&priv->base, suspend);
+}
+
+static struct nouveau_oclass
+nv84_vp_cclass = {
+ .handle = NV_ENGCTX(VP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_vp_context_ctor,
+ .dtor = nv84_vp_context_dtor,
+ .init = nv84_vp_context_init,
+ .fini = nv84_vp_context_fini,
+ .rd32 = _nouveau_vp_context_rd32,
+ .wr32 = _nouveau_vp_context_wr32,
+ },
+};
+
+/*******************************************************************************
+ * PVP engine/subdev functions
+ ******************************************************************************/
+
static void
-nv84_vp_destroy(struct drm_device *dev, int engine)
+nv84_vp_intr(struct nouveau_subdev *subdev)
{
- struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+}
- NVOBJ_ENGINE_DEL(dev, VP);
+static int
+nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
+ struct nouveau_oclass *oclass, void *data, u32 size,
+ struct nouveau_object **pobject)
+{
+ struct nv84_vp_priv *priv;
+ int ret;
+
+ ret = nouveau_vp_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
+ if (ret)
+ return ret;
- kfree(pvp);
+ nv_subdev(priv)->unit = 0x01020000;
+ nv_subdev(priv)->intr = nv84_vp_intr;
+ nv_engine(priv)->cclass = &nv84_vp_cclass;
+ nv_engine(priv)->sclass = nv84_vp_sclass;
+ return 0;
}
-int
-nv84_vp_create(struct drm_device *dev)
+static void
+nv84_vp_dtor(struct nouveau_object *object)
{
- struct nv84_vp_engine *pvp;
+ struct nv84_vp_priv *priv = (void *)object;
+ nouveau_vp_destroy(&priv->base);
+}
- pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
- if (!pvp)
- return -ENOMEM;
+static int
+nv84_vp_init(struct nouveau_object *object)
+{
+ struct nv84_vp_priv *priv = (void *)object;
+ int ret;
- pvp->base.destroy = nv84_vp_destroy;
- pvp->base.init = nv84_vp_init;
- pvp->base.fini = nv84_vp_fini;
+ ret = nouveau_vp_init(&priv->base);
+ if (ret)
+ return ret;
- NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
return 0;
}
+
+static int
+nv84_vp_fini(struct nouveau_object *object, bool suspend)
+{
+ struct nv84_vp_priv *priv = (void *)object;
+ return nouveau_vp_fini(&priv->base, suspend);
+}
+
+struct nouveau_oclass
+nv84_vp_oclass = {
+ .handle = NV_ENGINE(VP, 0x84),
+ .ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv84_vp_ctor,
+ .dtor = nv84_vp_dtor,
+ .init = nv84_vp_init,
+ .fini = nv84_vp_fini,
+ },
+};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/ramht.h b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
index c82de98fee0e..47e4cacbca37 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/ramht.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/ramht.h
@@ -1,55 +1,23 @@
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
#ifndef __NOUVEAU_RAMHT_H__
#define __NOUVEAU_RAMHT_H__
-struct nouveau_ramht_entry {
- struct list_head head;
- struct nouveau_channel *channel;
- struct nouveau_gpuobj *gpuobj;
- u32 handle;
-};
+#include <core/gpuobj.h>
struct nouveau_ramht {
- struct drm_device *dev;
- struct kref refcount;
- spinlock_t lock;
- struct nouveau_gpuobj *gpuobj;
- struct list_head entries;
+ struct nouveau_gpuobj base;
int bits;
};
-extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
- struct nouveau_ramht **);
-extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
- struct nouveau_channel *unref_channel);
+int nouveau_ramht_insert(struct nouveau_ramht *, int chid,
+ u32 handle, u32 context);
+void nouveau_ramht_remove(struct nouveau_ramht *, int cookie);
+int nouveau_ramht_new(struct nouveau_object *, struct nouveau_object *,
+ u32 size, u32 align, struct nouveau_ramht **);
-extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
- struct nouveau_gpuobj *);
-extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
-extern struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+static inline void
+nouveau_ramht_ref(struct nouveau_ramht *obj, struct nouveau_ramht **ref)
+{
+ nouveau_gpuobj_ref(&obj->base, (struct nouveau_gpuobj **)ref);
+}
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
new file mode 100644
index 000000000000..75d1ed5f85fd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_BSP_H__
+#define __NOUVEAU_BSP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_bsp_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_bsp_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_bsp_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_bsp_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_bsp_context_init _nouveau_engctx_init
+#define _nouveau_bsp_context_fini _nouveau_engctx_fini
+#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_bsp {
+ struct nouveau_engine base;
+};
+
+#define nouveau_bsp_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
+#define nouveau_bsp_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_bsp_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_bsp_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_bsp_dtor _nouveau_engine_dtor
+#define _nouveau_bsp_init _nouveau_engine_init
+#define _nouveau_bsp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_bsp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
new file mode 100644
index 000000000000..23bb9dfeaf67
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -0,0 +1,47 @@
+#ifndef __NOUVEAU_COPY_H__
+#define __NOUVEAU_COPY_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_copy_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_copy_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_copy_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_copy_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
+#define _nouveau_copy_context_init _nouveau_engctx_init
+#define _nouveau_copy_context_fini _nouveau_engctx_fini
+#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_copy {
+ struct nouveau_engine base;
+};
+
+#define nouveau_copy_create(p,e,c,y,i,d) \
+ nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
+#define nouveau_copy_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_copy_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_copy_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_copy_dtor _nouveau_engine_dtor
+#define _nouveau_copy_init _nouveau_engine_init
+#define _nouveau_copy_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nva3_copy_oclass;
+extern struct nouveau_oclass nvc0_copy0_oclass;
+extern struct nouveau_oclass nvc0_copy1_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
new file mode 100644
index 000000000000..e3674743baaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -0,0 +1,46 @@
+#ifndef __NOUVEAU_CRYPT_H__
+#define __NOUVEAU_CRYPT_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_crypt_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_crypt_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_crypt_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_crypt_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
+#define _nouveau_crypt_context_init _nouveau_engctx_init
+#define _nouveau_crypt_context_fini _nouveau_engctx_fini
+#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_crypt {
+ struct nouveau_engine base;
+};
+
+#define nouveau_crypt_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
+#define nouveau_crypt_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_crypt_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_crypt_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_crypt_dtor _nouveau_engine_dtor
+#define _nouveau_crypt_init _nouveau_engine_init
+#define _nouveau_crypt_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_crypt_oclass;
+extern struct nouveau_oclass nv98_crypt_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
new file mode 100644
index 000000000000..38ec1252cbaa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -0,0 +1,44 @@
+#ifndef __NOUVEAU_DISP_H__
+#define __NOUVEAU_DISP_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+#include <core/device.h>
+
+struct nouveau_disp {
+ struct nouveau_engine base;
+
+ struct {
+ struct list_head list;
+ spinlock_t lock;
+ void (*notify)(void *, int);
+ void (*get)(void *, int);
+ void (*put)(void *, int);
+ void *data;
+ } vblank;
+};
+
+static inline struct nouveau_disp *
+nouveau_disp(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
+}
+
+#define nouveau_disp_create(p,e,c,i,x,d) \
+ nouveau_engine_create((p), (e), (c), true, (i), (x), (d))
+#define nouveau_disp_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_disp_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_disp_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_disp_dtor _nouveau_engine_dtor
+#define _nouveau_disp_init _nouveau_engine_init
+#define _nouveau_disp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_disp_oclass;
+extern struct nouveau_oclass nv50_disp_oclass;
+extern struct nouveau_oclass nvd0_disp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
new file mode 100644
index 000000000000..700ccbb1941f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -0,0 +1,57 @@
+#ifndef __NOUVEAU_DMAOBJ_H__
+#define __NOUVEAU_DMAOBJ_H__
+
+#include <core/object.h>
+#include <core/engine.h>
+
+struct nouveau_gpuobj;
+
+struct nouveau_dmaobj {
+ struct nouveau_object base;
+ u32 target;
+ u32 access;
+ u64 start;
+ u64 limit;
+};
+
+#define nouveau_dmaobj_create(p,e,c,a,s,d) \
+ nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
+#define nouveau_dmaobj_destroy(p) \
+ nouveau_object_destroy(&(p)->base)
+#define nouveau_dmaobj_init(p) \
+ nouveau_object_init(&(p)->base)
+#define nouveau_dmaobj_fini(p,s) \
+ nouveau_object_fini(&(p)->base, (s))
+
+int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *data, u32 size,
+ int length, void **);
+
+#define _nouveau_dmaobj_dtor nouveau_object_destroy
+#define _nouveau_dmaobj_init nouveau_object_init
+#define _nouveau_dmaobj_fini nouveau_object_fini
+
+struct nouveau_dmaeng {
+ struct nouveau_engine base;
+ int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent,
+ struct nouveau_dmaobj *, struct nouveau_gpuobj **);
+};
+
+#define nouveau_dmaeng_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "DMAOBJ", "dmaobj", (d))
+#define nouveau_dmaeng_destroy(p) \
+ nouveau_engine_destroy(&(p)->base)
+#define nouveau_dmaeng_init(p) \
+ nouveau_engine_init(&(p)->base)
+#define nouveau_dmaeng_fini(p,s) \
+ nouveau_engine_fini(&(p)->base, (s))
+
+#define _nouveau_dmaeng_dtor _nouveau_engine_dtor
+#define _nouveau_dmaeng_init _nouveau_engine_init
+#define _nouveau_dmaeng_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_dmaeng_oclass;
+extern struct nouveau_oclass nv50_dmaeng_oclass;
+extern struct nouveau_oclass nvc0_dmaeng_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index ce99cab2f257..65ee929a75f0 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -1,32 +1,109 @@
#ifndef __NOUVEAU_FIFO_H__
#define __NOUVEAU_FIFO_H__
-struct nouveau_fifo_priv {
- struct nouveau_exec_engine base;
- u32 channels;
-};
+#include <core/namedb.h>
+#include <core/gpuobj.h>
+#include <core/engine.h>
struct nouveau_fifo_chan {
+ struct nouveau_namedb base;
+ struct nouveau_dmaobj *pushdma;
+ struct nouveau_gpuobj *pushgpu;
+ void __iomem *user;
+ u32 size;
+ u16 chid;
+ atomic_t refcnt; /* NV04_NVSW_SET_REF */
+};
+
+static inline struct nouveau_fifo_chan *
+nouveau_fifo_chan(void *obj)
+{
+ return (void *)nv_namedb(obj);
+}
+
+#define nouveau_fifo_channel_create(p,e,c,b,a,s,n,m,d) \
+ nouveau_fifo_channel_create_((p), (e), (c), (b), (a), (s), (n), \
+ (m), sizeof(**d), (void **)d)
+#define nouveau_fifo_channel_init(p) \
+ nouveau_namedb_init(&(p)->base)
+#define nouveau_fifo_channel_fini(p,s) \
+ nouveau_namedb_fini(&(p)->base, (s))
+
+int nouveau_fifo_channel_create_(struct nouveau_object *,
+ struct nouveau_object *,
+ struct nouveau_oclass *,
+ int bar, u32 addr, u32 size, u32 push,
+ u32 engmask, int len, void **);
+void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
+
+#define _nouveau_fifo_channel_init _nouveau_namedb_init
+#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
+
+void _nouveau_fifo_channel_dtor(struct nouveau_object *);
+u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32);
+void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32);
+
+struct nouveau_fifo_base {
+ struct nouveau_gpuobj base;
};
-bool nv04_fifo_cache_pull(struct drm_device *, bool);
-void nv04_fifo_context_del(struct nouveau_channel *, int);
-int nv04_fifo_fini(struct drm_device *, int, bool);
-int nv04_fifo_init(struct drm_device *, int);
-void nv04_fifo_isr(struct drm_device *);
-void nv04_fifo_destroy(struct drm_device *, int);
-
-void nv50_fifo_playlist_update(struct drm_device *);
-void nv50_fifo_destroy(struct drm_device *, int);
-void nv50_fifo_tlb_flush(struct drm_device *, int);
-
-int nv04_fifo_create(struct drm_device *);
-int nv10_fifo_create(struct drm_device *);
-int nv17_fifo_create(struct drm_device *);
-int nv40_fifo_create(struct drm_device *);
-int nv50_fifo_create(struct drm_device *);
-int nv84_fifo_create(struct drm_device *);
-int nvc0_fifo_create(struct drm_device *);
-int nve0_fifo_create(struct drm_device *);
+#define nouveau_fifo_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_gpuobj_create((p), (e), (c), 0, (g), (s), (a), (f), (d))
+#define nouveau_fifo_context_destroy(p) \
+ nouveau_gpuobj_destroy(&(p)->base)
+#define nouveau_fifo_context_init(p) \
+ nouveau_gpuobj_init(&(p)->base)
+#define nouveau_fifo_context_fini(p,s) \
+ nouveau_gpuobj_fini(&(p)->base, (s))
+
+#define _nouveau_fifo_context_dtor _nouveau_gpuobj_dtor
+#define _nouveau_fifo_context_init _nouveau_gpuobj_init
+#define _nouveau_fifo_context_fini _nouveau_gpuobj_fini
+#define _nouveau_fifo_context_rd32 _nouveau_gpuobj_rd32
+#define _nouveau_fifo_context_wr32 _nouveau_gpuobj_wr32
+
+struct nouveau_fifo {
+ struct nouveau_engine base;
+
+ struct nouveau_object **channel;
+ spinlock_t lock;
+ u16 min;
+ u16 max;
+
+ void (*pause)(struct nouveau_fifo *, unsigned long *);
+ void (*start)(struct nouveau_fifo *, unsigned long *);
+};
+
+static inline struct nouveau_fifo *
+nouveau_fifo(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_FIFO];
+}
+
+#define nouveau_fifo_create(o,e,c,fc,lc,d) \
+ nouveau_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d)
+#define nouveau_fifo_init(p) \
+ nouveau_engine_init(&(p)->base)
+#define nouveau_fifo_fini(p,s) \
+ nouveau_engine_fini(&(p)->base, (s))
+
+int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, int min, int max,
+ int size, void **);
+void nouveau_fifo_destroy(struct nouveau_fifo *);
+
+#define _nouveau_fifo_init _nouveau_engine_init
+#define _nouveau_fifo_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_fifo_oclass;
+extern struct nouveau_oclass nv10_fifo_oclass;
+extern struct nouveau_oclass nv17_fifo_oclass;
+extern struct nouveau_oclass nv40_fifo_oclass;
+extern struct nouveau_oclass nv50_fifo_oclass;
+extern struct nouveau_oclass nv84_fifo_oclass;
+extern struct nouveau_oclass nvc0_fifo_oclass;
+extern struct nouveau_oclass nve0_fifo_oclass;
+
+void nv04_fifo_intr(struct nouveau_subdev *);
#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/graph.h b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
new file mode 100644
index 000000000000..388cfcff7bd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/graph.h
@@ -0,0 +1,72 @@
+#ifndef __NOUVEAU_GRAPH_H__
+#define __NOUVEAU_GRAPH_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+#include <core/enum.h>
+
+struct nouveau_graph_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_graph_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_graph_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_graph_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_graph_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_graph_context_dtor _nouveau_engctx_dtor
+#define _nouveau_graph_context_init _nouveau_engctx_init
+#define _nouveau_graph_context_fini _nouveau_engctx_fini
+#define _nouveau_graph_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_graph_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_graph {
+ struct nouveau_engine base;
+};
+
+static inline struct nouveau_graph *
+nouveau_graph(void *obj)
+{
+ return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_GR];
+}
+
+#define nouveau_graph_create(p,e,c,y,d) \
+ nouveau_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
+#define nouveau_graph_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_graph_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_graph_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_graph_dtor _nouveau_engine_dtor
+#define _nouveau_graph_init _nouveau_engine_init
+#define _nouveau_graph_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_graph_oclass;
+extern struct nouveau_oclass nv10_graph_oclass;
+extern struct nouveau_oclass nv20_graph_oclass;
+extern struct nouveau_oclass nv25_graph_oclass;
+extern struct nouveau_oclass nv2a_graph_oclass;
+extern struct nouveau_oclass nv30_graph_oclass;
+extern struct nouveau_oclass nv34_graph_oclass;
+extern struct nouveau_oclass nv35_graph_oclass;
+extern struct nouveau_oclass nv40_graph_oclass;
+extern struct nouveau_oclass nv50_graph_oclass;
+extern struct nouveau_oclass nvc0_graph_oclass;
+extern struct nouveau_oclass nve0_graph_oclass;
+
+extern struct nouveau_bitfield nv04_graph_nsource[];
+extern struct nouveau_ofuncs nv04_graph_ofuncs;
+bool nv04_graph_idle(void *obj);
+
+extern struct nouveau_bitfield nv10_graph_intr_name[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
+
+extern struct nouveau_enum nv50_data_error_names[];
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
new file mode 100644
index 000000000000..bbf0d4a5bbd7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/mpeg.h
@@ -0,0 +1,61 @@
+#ifndef __NOUVEAU_MPEG_H__
+#define __NOUVEAU_MPEG_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_mpeg_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_mpeg_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_mpeg_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_mpeg_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_mpeg_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_context_dtor _nouveau_engctx_dtor
+#define _nouveau_mpeg_context_init _nouveau_engctx_init
+#define _nouveau_mpeg_context_fini _nouveau_engctx_fini
+#define _nouveau_mpeg_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_mpeg_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_mpeg {
+ struct nouveau_engine base;
+};
+
+#define nouveau_mpeg_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "PMPEG", "mpeg", (d))
+#define nouveau_mpeg_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_mpeg_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_mpeg_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_mpeg_dtor _nouveau_engine_dtor
+#define _nouveau_mpeg_init _nouveau_engine_init
+#define _nouveau_mpeg_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv31_mpeg_oclass;
+extern struct nouveau_oclass nv40_mpeg_oclass;
+extern struct nouveau_oclass nv50_mpeg_oclass;
+extern struct nouveau_oclass nv84_mpeg_oclass;
+
+extern struct nouveau_oclass nv31_mpeg_sclass[];
+void nv31_mpeg_intr(struct nouveau_subdev *);
+void nv31_mpeg_tile_prog(struct nouveau_engine *, int);
+int nv31_mpeg_init(struct nouveau_object *);
+
+extern struct nouveau_ofuncs nv50_mpeg_ofuncs;
+int nv50_mpeg_context_ctor(struct nouveau_object *, struct nouveau_object *,
+ struct nouveau_oclass *, void *, u32,
+ struct nouveau_object **);
+int nv50_mpeg_tlb_flush(struct nouveau_engine *);
+void nv50_mpeg_intr(struct nouveau_subdev *);
+int nv50_mpeg_init(struct nouveau_object *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
new file mode 100644
index 000000000000..74d554fb3281
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_PPP_H__
+#define __NOUVEAU_PPP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_ppp_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_ppp_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_ppp_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_ppp_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_ppp_context_init _nouveau_engctx_init
+#define _nouveau_ppp_context_fini _nouveau_engctx_fini
+#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_ppp {
+ struct nouveau_engine base;
+};
+
+#define nouveau_ppp_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
+#define nouveau_ppp_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_ppp_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_ppp_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_ppp_dtor _nouveau_engine_dtor
+#define _nouveau_ppp_init _nouveau_engine_init
+#define _nouveau_ppp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv98_ppp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
new file mode 100644
index 000000000000..8d740793cf8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -0,0 +1,58 @@
+#ifndef __NOUVEAU_SOFTWARE_H__
+#define __NOUVEAU_SOFTWARE_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_software_chan {
+ struct nouveau_engctx base;
+
+ struct {
+ struct list_head head;
+ u32 channel;
+ u32 ctxdma;
+ u64 offset;
+ u32 value;
+ u32 crtc;
+ } vblank;
+
+ int (*flip)(void *);
+ void *flip_data;
+};
+
+#define nouveau_software_context_create(p,e,c,d) \
+ nouveau_engctx_create((p), (e), (c), (p), 0, 0, 0, (d))
+#define nouveau_software_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_software_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_software_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_software_context_dtor _nouveau_engctx_dtor
+#define _nouveau_software_context_init _nouveau_engctx_init
+#define _nouveau_software_context_fini _nouveau_engctx_fini
+
+struct nouveau_software {
+ struct nouveau_engine base;
+};
+
+#define nouveau_software_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "SW", "software", (d))
+#define nouveau_software_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_software_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_software_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_software_dtor _nouveau_engine_dtor
+#define _nouveau_software_init _nouveau_engine_init
+#define _nouveau_software_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv04_software_oclass;
+extern struct nouveau_oclass nv10_software_oclass;
+extern struct nouveau_oclass nv50_software_oclass;
+extern struct nouveau_oclass nvc0_software_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
new file mode 100644
index 000000000000..05cd08fba377
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -0,0 +1,45 @@
+#ifndef __NOUVEAU_VP_H__
+#define __NOUVEAU_VP_H__
+
+#include <core/engine.h>
+#include <core/engctx.h>
+
+struct nouveau_vp_chan {
+ struct nouveau_engctx base;
+};
+
+#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
+ nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
+#define nouveau_vp_context_destroy(d) \
+ nouveau_engctx_destroy(&(d)->base)
+#define nouveau_vp_context_init(d) \
+ nouveau_engctx_init(&(d)->base)
+#define nouveau_vp_context_fini(d,s) \
+ nouveau_engctx_fini(&(d)->base, (s))
+
+#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
+#define _nouveau_vp_context_init _nouveau_engctx_init
+#define _nouveau_vp_context_fini _nouveau_engctx_fini
+#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
+#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
+
+struct nouveau_vp {
+ struct nouveau_engine base;
+};
+
+#define nouveau_vp_create(p,e,c,d) \
+ nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
+#define nouveau_vp_destroy(d) \
+ nouveau_engine_destroy(&(d)->base)
+#define nouveau_vp_init(d) \
+ nouveau_engine_init(&(d)->base)
+#define nouveau_vp_fini(d,s) \
+ nouveau_engine_fini(&(d)->base, (s))
+
+#define _nouveau_vp_dtor _nouveau_engine_dtor
+#define _nouveau_vp_init _nouveau_engine_init
+#define _nouveau_vp_fini _nouveau_engine_fini
+
+extern struct nouveau_oclass nv84_vp_oclass;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/device.h b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
index 5eec03a40f6d..c9e4c4afa50e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/device.h
@@ -8,7 +8,6 @@
int nouveau_device_create_(struct pci_dev *, u64 name, const char *sname,
const char *cfg, const char *dbg, int, void **);
-void nouveau_device_destroy(struct nouveau_device **);
int nv04_identify(struct nouveau_device *);
int nv10_identify(struct nouveau_device *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
index 2adfcafa4478..ec7a54e91a08 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/instmem.h
@@ -8,7 +8,6 @@
struct nouveau_instobj {
struct nouveau_object base;
struct list_head head;
- struct nouveau_mm heap;
u32 *suspend;
u64 addr;
u32 size;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index 81577bb783e8..747781c2371d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -73,6 +73,7 @@ struct nouveau_vm {
struct nouveau_vmmgr {
struct nouveau_subdev base;
+ u64 limit;
u32 pgt_bits;
u8 spg_shift;
u8 lpg_shift;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index 40456b99cb5f..d8d101630e46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -241,6 +241,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (!device->subdev[i]) {
ret = nouveau_object_ctor(nv_object(device), NULL,
oclass, NULL, i, &subdev);
+ if (ret == -ENODEV)
+ continue;
if (ret)
return ret;
@@ -404,10 +406,26 @@ nouveau_device_sclass[] = {
{}
};
+static void
+nouveau_device_dtor(struct nouveau_object *object)
+{
+ struct nouveau_device *device = (void *)object;
+
+ mutex_lock(&nv_devices_mutex);
+ list_del(&device->head);
+ mutex_unlock(&nv_devices_mutex);
+
+ if (device->base.mmio)
+ iounmap(device->base.mmio);
+
+ nouveau_subdev_destroy(&device->base);
+}
+
static struct nouveau_oclass
nouveau_device_oclass = {
.handle = NV_SUBDEV(DEVICE, 0x00),
.ofuncs = &(struct nouveau_ofuncs) {
+ .dtor = nouveau_device_dtor,
},
};
@@ -444,18 +462,3 @@ done:
mutex_unlock(&nv_devices_mutex);
return ret;
}
-
-void
-nouveau_device_destroy(struct nouveau_device **pdevice)
-{
- struct nouveau_device *device = *pdevice;
- if (device) {
- mutex_lock(&nv_devices_mutex);
- list_del(&device->head);
- mutex_unlock(&nv_devices_mutex);
- if (device->base.mmio)
- iounmap(device->base.mmio);
- nouveau_subdev_destroy(&device->base);
- }
- *pdevice = NULL;
-}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 5173c785b061..693d200a3e22 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -33,6 +33,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
int
nv04_identify(struct nouveau_device *device)
{
@@ -47,6 +53,11 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x05:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -58,6 +69,11 @@ nv04_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv04_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv04_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv04_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown RIVA chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index c4f2c2d3eaec..de6ce890e842 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -34,6 +34,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
int
nv10_identify(struct nouveau_device *device)
{
@@ -49,6 +55,9 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x15:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +70,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x16:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +87,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x1a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +104,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x11:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +121,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv10_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x17:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +138,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x1f:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -121,6 +155,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x18:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -133,6 +172,11 @@ nv10_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv10_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Celsius chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 719b72a43e47..0b30143d0114 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -34,6 +34,12 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
int
nv20_identify(struct nouveau_device *device)
{
@@ -49,6 +55,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv20_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x25:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +72,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x28:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +89,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv25_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x2a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +106,11 @@ nv20_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv2a_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Kelvin chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0a1a72809d82..1d5c6977c86a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -34,6 +34,13 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
int
nv30_identify(struct nouveau_device *device)
{
@@ -49,6 +56,11 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x35:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +73,11 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x31:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +90,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv30_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x36:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +108,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv35_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x34:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +126,12 @@ nv30_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv17_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv34_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv31_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Rankine chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 5e1ef5e4cf7f..2e071fa9fca0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -34,6 +34,13 @@
#include <subdev/instmem.h>
#include <subdev/vm.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/disp.h>
+
int
nv40_identify(struct nouveau_device *device)
{
@@ -49,6 +56,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x41:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -61,6 +74,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x42:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -73,6 +92,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x43:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -85,6 +110,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x45:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -97,6 +128,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x47:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +146,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x49:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -121,6 +164,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4b:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -133,6 +182,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x44:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -145,6 +200,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x46:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -157,6 +218,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4a:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -169,6 +236,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4c:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -181,6 +254,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x4e:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -193,6 +272,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x63:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -205,6 +290,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x67:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -217,6 +308,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
case 0x68:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -229,6 +326,12 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv40_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv10_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv40_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv40_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv04_disp_oclass;
break;
default:
nv_fatal(device, "unknown Curie chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 5e86a2f6ad8a..5d44b2a5bfa9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -35,6 +35,18 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/mpeg.h>
+#include <engine/vp.h>
+#include <engine/crypt.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
int
nv50_identify(struct nouveau_device *device)
{
@@ -51,6 +63,12 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv50_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv50_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x84:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -64,6 +82,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x86:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -77,6 +104,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x92:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -90,6 +126,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x94:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -103,6 +148,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x96:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -116,6 +170,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0x98:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -129,6 +192,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa0:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -142,6 +214,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xaa:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -155,6 +236,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xac:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -168,6 +258,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -181,6 +280,16 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_MPEG ] = &nv84_mpeg_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa5:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -194,6 +303,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xa8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -207,6 +325,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xaf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -220,6 +347,15 @@ nv50_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nv50_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nv50_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv50_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nv84_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nv50_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nv50_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
default:
nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 87f4e16379c6..81d6ed593428 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -36,6 +36,16 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/vp.h>
+#include <engine/bsp.h>
+#include <engine/ppp.h>
+#include <engine/copy.h>
+#include <engine/disp.h>
+
int
nvc0_identify(struct nouveau_device *device)
{
@@ -53,6 +63,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc4:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -67,6 +87,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc3:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -81,6 +111,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xce:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -95,6 +135,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xcf:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -109,6 +159,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc1:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -123,6 +183,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xc8:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -137,6 +207,16 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass;
break;
case 0xd9:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -151,6 +231,15 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
default:
nv_fatal(device, "unknown Fermi chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index ab8346b8bde0..f4f5a5af3c06 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -36,6 +36,12 @@
#include <subdev/vm.h>
#include <subdev/bar.h>
+#include <engine/dmaobj.h>
+#include <engine/fifo.h>
+#include <engine/software.h>
+#include <engine/graph.h>
+#include <engine/disp.h>
+
int
nve0_identify(struct nouveau_device *device)
{
@@ -53,6 +59,11 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
case 0xe7:
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
@@ -67,6 +78,11 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
break;
default:
nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index f44f0f096689..ba4d28b50368 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -139,8 +139,7 @@ nv04_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
+ ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
@@ -165,7 +164,7 @@ nv04_instmem_dtor(struct nouveau_object *object)
struct nv04_instmem_priv *priv = (void *)object;
nouveau_gpuobj_ref(NULL, &priv->ramfc);
nouveau_gpuobj_ref(NULL, &priv->ramro);
- nouveau_gpuobj_ref(NULL, &priv->ramht);
+ nouveau_ramht_ref(NULL, &priv->ramht);
nouveau_gpuobj_ref(NULL, &priv->vbios);
nouveau_mm_fini(&priv->heap);
if (priv->iomem)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
index b2f82f9e4e7f..7983d8d9b358 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.h
@@ -2,6 +2,7 @@
#define __NV04_INSTMEM_H__
#include <core/gpuobj.h>
+#include <core/ramht.h>
#include <core/mm.h>
#include <subdev/instmem.h>
@@ -14,11 +15,17 @@ struct nv04_instmem_priv {
struct nouveau_mm heap;
struct nouveau_gpuobj *vbios;
- struct nouveau_gpuobj *ramht;
+ struct nouveau_ramht *ramht;
struct nouveau_gpuobj *ramro;
struct nouveau_gpuobj *ramfc;
};
+static inline struct nv04_instmem_priv *
+nv04_instmem(void *obj)
+{
+ return (void *)nouveau_instmem(obj);
+}
+
struct nv04_instobj_priv {
struct nouveau_instobj base;
struct nouveau_mm_node *mem;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 6a22160324c1..73c52ebd5932 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -87,8 +87,7 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
/* 0x10000-0x18000: reserve for RAMHT */
- ret = nouveau_gpuobj_new(parent, NULL, 0x08000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->ramht);
+ ret = nouveau_ramht_new(parent, NULL, 0x08000, 0, &priv->ramht);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index b92b3d47c69c..082c11b75acb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -299,6 +299,7 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
fpde = (vma->node->offset >> vmm->pgt_bits);
lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
+
for (pde = fpde; pde <= lpde; pde++) {
struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
int big = (vma->node->type != vmm->spg_shift);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
index 6475c0201d01..bfe6766d36ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.c
@@ -96,6 +96,7 @@ nv04_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
+ priv->base.limit = NV04_PDMA_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
index 530930320bc4..e21369cd09c0 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv04.h
@@ -10,4 +10,10 @@ struct nv04_vmmgr_priv {
dma_addr_t null;
};
+static inline struct nv04_vmmgr_priv *
+nv04_vmmgr(void *obj)
+{
+ return (void *)nouveau_vmmgr(obj);
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index f0367703dff0..bbeac8d296ed 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -97,6 +97,7 @@ nv41_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
+ priv->base.limit = NV41_GART_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
index d17f76120bcd..d099cde3a7f5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv44.c
@@ -178,6 +178,7 @@ nv44_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
priv->base.create = nv04_vm_create;
+ priv->base.limit = NV44_GART_SIZE;
priv->base.pgt_bits = 32 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 12;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 6e9bcd212cfc..0f0d3a5de5c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -154,7 +154,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
struct nouveau_engine *engine;
int i;
-#if 0
for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
if (atomic_read(&vm->engref[i])) {
engine = nouveau_engine(vm->vmm, i);
@@ -162,11 +161,6 @@ nv50_vm_flush(struct nouveau_vm *vm)
engine->tlb_flush(engine);
}
}
-#else
- nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x06); /* bar */
- nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x05); /* fifo */
- nv50_vm_flush_engine(nv_subdev(vm->vmm), 0x00); /* gr */
-#endif
}
void
@@ -206,6 +200,7 @@ nv50_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.limit = 1ULL << 40;
priv->base.pgt_bits = 29 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 16;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
index a0bc0f678d12..e48ece297511 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nvc0.c
@@ -162,6 +162,7 @@ nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ priv->base.limit = 1ULL << 40;
priv->base.pgt_bits = 27 - 12;
priv->base.spg_shift = 12;
priv->base.lpg_shift = 17;
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 9b3a4617bffa..9e6ced3b941a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -21,23 +21,153 @@
*
*/
-#include "drmP.h"
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+#include <core/mm.h>
-#include "nouveau_drv.h"
+#include <subdev/fb.h>
+#include <subdev/timer.h>
+#include <subdev/instmem.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_chan.h"
#include "nouveau_abi16.h"
-#include <core/ramht.h>
-#include "nouveau_software.h"
+
+struct nouveau_abi16 *
+nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
+{
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ mutex_lock(&cli->mutex);
+ if (!cli->abi16) {
+ struct nouveau_abi16 *abi16;
+ cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
+ if (cli->abi16) {
+ INIT_LIST_HEAD(&abi16->channels);
+ abi16->client = nv_object(cli);
+
+ /* allocate device object targeting client's default
+ * device (ie. the one that belongs to the fd it
+ * opened)
+ */
+ if (nouveau_object_new(abi16->client, NVDRM_CLIENT,
+ NVDRM_DEVICE, 0x0080,
+ &(struct nv_device_class) {
+ .device = ~0ULL,
+ },
+ sizeof(struct nv_device_class),
+ &abi16->device) == 0)
+ return cli->abi16;
+
+ kfree(cli->abi16);
+ cli->abi16 = NULL;
+ }
+
+ mutex_unlock(&cli->mutex);
+ }
+ return cli->abi16;
+}
+
+int
+nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
+{
+ struct nouveau_cli *cli = (void *)abi16->client;
+ mutex_unlock(&cli->mutex);
+ return ret;
+}
+
+u16
+nouveau_abi16_swclass(struct nouveau_drm *drm)
+{
+ switch (nv_device(drm->device)->card_type) {
+ case NV_04:
+ return 0x006e;
+ case NV_10:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ return 0x016e;
+ case NV_50:
+ return 0x506e;
+ case NV_C0:
+ case NV_D0:
+ case NV_E0:
+ return 0x906e;
+ }
+
+ return 0x0000;
+}
+
+static void
+nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
+ struct nouveau_abi16_ntfy *ntfy)
+{
+ nouveau_mm_free(&chan->heap, &ntfy->node);
+ list_del(&ntfy->head);
+ kfree(ntfy);
+}
+
+static void
+nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
+ struct nouveau_abi16_chan *chan)
+{
+ struct nouveau_abi16_ntfy *ntfy, *temp;
+
+ /* cleanup notifier state */
+ list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
+ nouveau_abi16_ntfy_fini(chan, ntfy);
+ }
+
+ if (chan->ntfy) {
+ nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
+ drm_gem_object_unreference_unlocked(chan->ntfy->gem);
+ }
+
+ if (chan->heap.block_size)
+ nouveau_mm_fini(&chan->heap);
+
+ /* destroy channel object, all children will be killed too */
+ if (chan->chan) {
+ abi16->handles &= ~(1 << (chan->chan->handle & 0xffff));
+ nouveau_channel_del(&chan->chan);
+ }
+
+ list_del(&chan->head);
+ kfree(chan);
+}
+
+void
+nouveau_abi16_fini(struct nouveau_abi16 *abi16)
+{
+ struct nouveau_cli *cli = (void *)abi16->client;
+ struct nouveau_abi16_chan *chan, *temp;
+
+ /* cleanup channels */
+ list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+ nouveau_abi16_chan_fini(abi16, chan);
+ }
+
+ /* destroy the device object */
+ nouveau_object_del(abi16->client, NVDRM_CLIENT, NVDRM_DEVICE);
+
+ kfree(cli->abi16);
+ cli->abi16 = NULL;
+}
int
nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_timer *ptimer = nouveau_timer(device);
struct drm_nouveau_getparam *getparam = data;
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
- getparam->value = dev_priv->chipset;
+ getparam->value = device->chipset;
break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
getparam->value = dev->pci_vendor;
@@ -55,16 +185,16 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
getparam->value = 2;
break;
case NOUVEAU_GETPARAM_FB_SIZE:
- getparam->value = dev_priv->fb_available_size;
+ getparam->value = drm->gem.vram_available;
break;
case NOUVEAU_GETPARAM_AGP_SIZE:
- getparam->value = dev_priv->gart_info.aper_size;
+ getparam->value = drm->gem.gart_available;
break;
case NOUVEAU_GETPARAM_VM_VRAM_BASE:
getparam->value = 0; /* deprecated */
break;
case NOUVEAU_GETPARAM_PTIMER_TIME:
- getparam->value = nv_timer_read(dev);
+ getparam->value = ptimer->read(ptimer);
break;
case NOUVEAU_GETPARAM_HAS_BO_USAGE:
getparam->value = 1;
@@ -76,13 +206,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
/* NV40 and NV50 versions are quite different, but register
* address is the same. User is supposed to know the card
* family anyway... */
- if (dev_priv->chipset >= 0x40) {
- getparam->value = nv_rd32(dev, NV40_PMC_GRAPH_UNITS);
+ if (device->chipset >= 0x40) {
+ getparam->value = nv_rd32(device, 0x001540);
break;
}
/* FALLTHRU */
default:
- NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param);
+ nv_debug(device, "unknown parameter %lld\n", getparam->param);
return -EINVAL;
}
@@ -98,148 +228,247 @@ nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
int
nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_channel_alloc *init = data;
- struct nouveau_channel *chan;
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_abi16_chan *chan;
+ struct nouveau_client *client;
+ struct nouveau_device *device;
+ struct nouveau_instmem *imem;
+ struct nouveau_fb *pfb;
int ret;
- if (!dev_priv->eng[NVOBJ_ENGINE_GR])
- return -ENODEV;
+ if (unlikely(!abi16))
+ return -ENOMEM;
+ client = nv_client(abi16->client);
if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
- return -EINVAL;
+ return nouveau_abi16_put(abi16, -EINVAL);
+
+ device = nv_device(abi16->device);
+ imem = nouveau_instmem(device);
+ pfb = nouveau_fb(device);
+
+ /* allocate "abi16 channel" data and make up a handle for it */
+ init->channel = ffsll(~abi16->handles);
+ if (!init->channel--)
+ return nouveau_abi16_put(abi16, -ENOSPC);
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOMEM);
+
+ INIT_LIST_HEAD(&chan->notifiers);
+ list_add(&chan->head, &abi16->channels);
+ abi16->handles |= (1 << init->channel);
- ret = nouveau_channel_alloc(dev, &chan, file_priv,
- init->fb_ctxdma_handle,
- init->tt_ctxdma_handle);
+ /* create channel object and initialise dma and fence management */
+ ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
+ init->channel, init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle, &chan->chan);
if (ret)
- return ret;
- init->channel = chan->id;
-
- if (nouveau_vram_pushbuf == 0) {
- if (chan->dma.ib_max)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
- NOUVEAU_GEM_DOMAIN_GART;
- else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- else
- init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- } else {
+ goto done;
+
+ if (device->card_type >= NV_50)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else
+ if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
- }
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
- if (dev_priv->card_type < NV_C0) {
+ if (device->card_type < NV_C0) {
init->subchan[0].handle = 0x00000000;
init->subchan[0].grclass = 0x0000;
init->subchan[1].handle = NvSw;
- init->subchan[1].grclass = NV_SW;
+ init->subchan[1].grclass = 0x506e;
init->nr_subchan = 2;
}
/* Named memory object area */
- ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
+ ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
+ 0, 0, &chan->ntfy);
+ if (ret == 0)
+ ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT);
+ if (ret)
+ goto done;
+
+ if (device->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(chan->ntfy, client->vm,
+ &chan->ntfy_vma);
+ if (ret)
+ goto done;
+ }
+
+ ret = drm_gem_handle_create(file_priv, chan->ntfy->gem,
&init->notifier_handle);
+ if (ret)
+ goto done;
- if (ret == 0)
- atomic_inc(&chan->users); /* userspace reference */
- nouveau_channel_put(&chan);
- return ret;
+ ret = nouveau_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
+done:
+ if (ret)
+ nouveau_abi16_chan_fini(abi16, chan);
+ return nouveau_abi16_put(abi16, ret);
}
+
int
nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_channel_free *req = data;
- struct nouveau_channel *chan;
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_abi16_chan *chan;
+ int ret = -ENOENT;
- chan = nouveau_channel_get(file_priv, req->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
+ if (unlikely(!abi16))
+ return -ENOMEM;
- list_del(&chan->list);
- atomic_dec(&chan->users);
- nouveau_channel_put(&chan);
- return 0;
+ list_for_each_entry(chan, &abi16->channels, head) {
+ if (chan->chan->handle == (NVDRM_CHAN | req->channel)) {
+ nouveau_abi16_chan_fini(abi16, chan);
+ return nouveau_abi16_put(abi16, 0);
+ }
+ }
+
+ return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_grobj_alloc *init = data;
- struct nouveau_channel *chan;
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_object *object;
int ret;
+ if (unlikely(!abi16))
+ return -ENOMEM;
+
if (init->handle == ~0)
- return -EINVAL;
+ return nouveau_abi16_put(abi16, -EINVAL);
/* compatibility with userspace that assumes 506e for all chipsets */
if (init->class == 0x506e) {
- init->class = nouveau_software_class(dev);
+ init->class = nouveau_abi16_swclass(drm);
if (init->class == 0x906e)
- return 0;
- } else
- if (init->class == 0x906e) {
- NV_DEBUG(dev, "906e not supported yet\n");
- return -EINVAL;
- }
-
- chan = nouveau_channel_get(file_priv, init->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
-
- if (nouveau_ramht_find(chan, init->handle)) {
- ret = -EEXIST;
- goto out;
+ return nouveau_abi16_put(abi16, 0);
}
- ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
- if (ret) {
- NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
- ret, init->channel, init->handle);
- }
-
-out:
- nouveau_channel_put(&chan);
- return ret;
+ ret = nouveau_object_new(abi16->client, NVDRM_CHAN | init->channel,
+ init->handle, init->class, NULL, 0, &object);
+ return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_notifierobj_alloc *na = data;
- struct nouveau_channel *chan;
+ struct drm_nouveau_notifierobj_alloc *info = data;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_ntfy *ntfy;
+ struct nouveau_object *object;
+ struct nv_dma_class args;
int ret;
+ if (unlikely(!abi16))
+ return -ENOMEM;
+
/* completely unnecessary for these chipsets... */
- if (unlikely(dev_priv->card_type >= NV_C0))
- return -EINVAL;
+ if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
+ return nouveau_abi16_put(abi16, -EINVAL);
- chan = nouveau_channel_get(file_priv, na->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
+ list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+ if (chan->chan->handle == (NVDRM_CHAN | info->channel))
+ break;
+ chan = NULL;
+ }
- ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
- &na->offset);
- nouveau_channel_put(&chan);
- return ret;
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+
+ ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
+ if (!ntfy)
+ return nouveau_abi16_put(abi16, -ENOMEM);
+
+ list_add(&ntfy->head, &chan->notifiers);
+ ntfy->handle = info->handle;
+
+ ret = nouveau_mm_head(&chan->heap, 1, info->size, info->size, 1,
+ &ntfy->node);
+ if (ret)
+ goto done;
+
+ args.start = ntfy->node->offset;
+ args.limit = ntfy->node->offset + ntfy->node->length - 1;
+ if (device->card_type >= NV_50) {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.start += chan->ntfy_vma.offset;
+ args.limit += chan->ntfy_vma.offset;
+ } else
+ if (drm->agp.stat == ENABLED) {
+ args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.start += drm->agp.base + chan->ntfy->bo.offset;
+ args.limit += drm->agp.base + chan->ntfy->bo.offset;
+ } else {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.start += chan->ntfy->bo.offset;
+ args.limit += chan->ntfy->bo.offset;
+ }
+
+ ret = nouveau_object_new(abi16->client, chan->chan->handle,
+ ntfy->handle, 0x003d, &args,
+ sizeof(args), &object);
+ if (ret)
+ goto done;
+
+done:
+ if (ret)
+ nouveau_abi16_ntfy_fini(chan, ntfy);
+ return nouveau_abi16_put(abi16, ret);
}
int
nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
- struct drm_nouveau_gpuobj_free *objfree = data;
- struct nouveau_channel *chan;
+ struct drm_nouveau_gpuobj_free *fini = data;
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_abi16_chan *chan, *temp;
+ struct nouveau_abi16_ntfy *ntfy;
int ret;
- chan = nouveau_channel_get(file_priv, objfree->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
+ if (unlikely(!abi16))
+ return -ENOMEM;
+
+ list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+ if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
+ break;
+ chan = NULL;
+ }
+
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
- /* Synchronize with the user channel */
- nouveau_channel_idle(chan);
+ /* synchronize with the user channel and destroy the gpu object */
+ nouveau_channel_idle(chan->chan);
- ret = nouveau_ramht_remove(chan, objfree->handle);
- nouveau_channel_put(&chan);
- return ret;
+ ret = nouveau_object_del(abi16->client, chan->chan->handle, fini->handle);
+ if (ret)
+ return nouveau_abi16_put(abi16, ret);
+
+ /* cleanup extra state if this object was a notifier */
+ list_for_each_entry(ntfy, &chan->notifiers, head) {
+ if (ntfy->handle == fini->handle) {
+ nouveau_mm_free(&chan->heap, &ntfy->node);
+ list_del(&ntfy->head);
+ break;
+ }
+ }
+
+ return nouveau_abi16_put(abi16, 0);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.h b/drivers/gpu/drm/nouveau/nouveau_abi16.h
index e6328b008a8c..90004081a501 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.h
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.h
@@ -3,6 +3,7 @@
#define ABI16_IOCTL_ARGS \
struct drm_device *dev, void *data, struct drm_file *file_priv
+
int nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS);
@@ -11,6 +12,37 @@ int nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS);
int nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS);
+struct nouveau_abi16_ntfy {
+ struct list_head head;
+ struct nouveau_mm_node *node;
+ u32 handle;
+};
+
+struct nouveau_abi16_chan {
+ struct list_head head;
+ struct nouveau_channel *chan;
+ struct list_head notifiers;
+ struct nouveau_bo *ntfy;
+ struct nouveau_vma ntfy_vma;
+ struct nouveau_mm heap;
+};
+
+struct nouveau_abi16 {
+ struct nouveau_object *client;
+ struct nouveau_object *device;
+ struct list_head channels;
+ u64 handles;
+};
+
+struct nouveau_drm;
+struct nouveau_abi16 *nouveau_abi16_get(struct drm_file *, struct drm_device *);
+int nouveau_abi16_put(struct nouveau_abi16 *, int);
+void nouveau_abi16_fini(struct nouveau_abi16 *);
+u16 nouveau_abi16_swclass(struct nouveau_drm *);
+
+#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
+#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
+
struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 778cd149f7cd..83686ef75d04 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -13,7 +13,6 @@
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include <nouveau_drm.h>
-#include "nv50_display.h"
#include "nouveau_connector.h"
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c3e66ae04c83..3465df327227 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,66 +27,57 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
-#include "drmP.h"
-#include "ttm/ttm_page_alloc.h"
+#include <core/engine.h>
-#include <nouveau_drm.h>
-#include "nouveau_drv.h"
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/mm.h>
#include "nouveau_fence.h"
-#include <core/ramht.h>
-#include <engine/fifo.h>
-#include <linux/log2.h>
-#include <linux/slab.h>
+#include "nouveau_bo.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
/*
* NV10-NV40 tiling helpers
*/
static void
-nv10_bo_update_tile_region(struct drm_device *dev,
- struct nouveau_tile_reg *tilereg, uint32_t addr,
- uint32_t size, uint32_t pitch, uint32_t flags)
+nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
+ u32 addr, u32 size, u32 pitch, u32 flags)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i = tilereg - dev_priv->tile.reg, j;
- struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
- unsigned long save;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ int i = reg - drm->tile.reg;
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb_tile *tile = &pfb->tile.region[i];
+ struct nouveau_engine *engine;
- nouveau_fence_unref(&tilereg->fence);
+ nouveau_fence_unref(&reg->fence);
if (tile->pitch)
- nvfb_tile_fini(dev, i);
+ pfb->tile.fini(pfb, i, tile);
if (pitch)
- nvfb_tile_init(dev, i, addr, size, pitch, flags);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, save);
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- nv04_fifo_cache_pull(dev, false);
+ pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
- nouveau_wait_for_idle(dev);
-
- nvfb_tile_prog(dev, i);
- for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
- if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
- dev_priv->eng[j]->set_tile_region(dev, i);
- }
+ pfb->tile.prog(pfb, i, tile);
- nv04_fifo_cache_pull(dev, true);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
+ if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
+ engine->tile_prog(engine, i);
+ if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
+ engine->tile_prog(engine, i);
}
-static struct nouveau_tile_reg *
+static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device *dev, int i)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_drm_tile *tile = &drm->tile.reg[i];
- spin_lock(&dev_priv->tile.lock);
+ spin_lock(&drm->tile.lock);
if (!tile->used &&
(!tile->fence || nouveau_fence_done(tile->fence)))
@@ -94,18 +85,18 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i)
else
tile = NULL;
- spin_unlock(&dev_priv->tile.lock);
+ spin_unlock(&drm->tile.lock);
return tile;
}
static void
-nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
- struct nouveau_fence *fence)
+nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
+ struct nouveau_fence *fence)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
if (tile) {
- spin_lock(&dev_priv->tile.lock);
+ spin_lock(&drm->tile.lock);
if (fence) {
/* Mark it as pending. */
tile->fence = fence;
@@ -113,25 +104,27 @@ nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
}
tile->used = false;
- spin_unlock(&dev_priv->tile.lock);
+ spin_unlock(&drm->tile.lock);
}
}
-static struct nouveau_tile_reg *
-nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
- uint32_t pitch, uint32_t flags)
+static struct nouveau_drm_tile *
+nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
+ u32 size, u32 pitch, u32 flags)
{
- struct nouveau_tile_reg *tile, *found = NULL;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_drm_tile *tile, *found = NULL;
int i;
- for (i = 0; i < nvfb_tile_nr(dev); i++) {
+ for (i = 0; i < pfb->tile.regions; i++) {
tile = nv10_bo_get_tile_region(dev, i);
if (pitch && !found) {
found = tile;
continue;
- } else if (tile && nvfb_tile(dev, i)->pitch) {
+ } else if (tile && pfb->tile.region[i].pitch) {
/* Kill an unused tile region. */
nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
}
@@ -148,13 +141,12 @@ nv10_bo_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
-
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo);
}
@@ -163,23 +155,24 @@ static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nv_device(drm->device);
- if (dev_priv->card_type < NV_50) {
+ if (device->card_type < NV_50) {
if (nvbo->tile_mode) {
- if (dev_priv->chipset >= 0x40) {
+ if (device->chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x30) {
+ } else if (device->chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x20) {
+ } else if (device->chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x10) {
+ } else if (device->chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@@ -198,7 +191,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
@@ -215,22 +208,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
- nvbo->bo.bdev = &dev_priv->ttm.bdev;
+ nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
- if (dev_priv->chan_vm) {
+ if (drm->client.base.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = nvvm_lpg_shift(dev_priv->chan_vm);
+ nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+ acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
sizeof(struct nouveau_bo));
- ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+ ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
@@ -259,10 +252,11 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- int vram_pages = nvfb_vram_size(dev_priv->dev) >> PAGE_SHIFT;
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
- if (dev_priv->card_type == NV_10 &&
+ if (nv_device(drm->device)->card_type == NV_10 &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -302,13 +296,12 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
- NV_ERROR(nouveau_bdev(bo->bdev)->dev,
- "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+ NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
return -EINVAL;
}
@@ -326,10 +319,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- dev_priv->fb_aper_free -= bo->mem.size;
+ drm->gem.vram_available -= bo->mem.size;
break;
case TTM_PL_TT:
- dev_priv->gart_info.aper_free -= bo->mem.size;
+ drm->gem.gart_available -= bo->mem.size;
break;
default:
break;
@@ -345,7 +338,7 @@ out:
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
@@ -362,10 +355,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- dev_priv->fb_aper_free += bo->mem.size;
+ drm->gem.vram_available += bo->mem.size;
break;
case TTM_PL_TT:
- dev_priv->gart_info.aper_free += bo->mem.size;
+ drm->gem.gart_available += bo->mem.size;
break;
default:
break;
@@ -460,30 +453,18 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
}
static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct drm_device *dev = drm->dev;
- switch (dev_priv->gart_info.type) {
-#if __OS_HAS_AGP
- case NOUVEAU_GART_AGP:
- return ttm_agp_tt_create(bdev, dev->agp->bridge,
- size, page_flags, dummy_read_page);
-#endif
- case NOUVEAU_GART_PDMA:
- case NOUVEAU_GART_HW:
- return nouveau_sgdma_create_ttm(bdev, size, page_flags,
- dummy_read_page);
- default:
- NV_ERROR(dev, "Unknown GART type %d\n",
- dev_priv->gart_info.type);
- break;
+ if (drm->agp.stat == ENABLED) {
+ return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+ page_flags, dummy_read);
}
- return NULL;
+ return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
}
static int
@@ -497,8 +478,7 @@ static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -507,7 +487,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- if (dev_priv->card_type >= NV_50) {
+ if (nv_device(drm->device)->card_type >= NV_50) {
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
@@ -521,35 +501,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- if (dev_priv->card_type >= NV_50)
+ if (nv_device(drm->device)->card_type >= NV_50)
man->func = &nouveau_gart_manager;
else
- if (dev_priv->gart_info.type != NOUVEAU_GART_AGP)
+ if (drm->agp.stat != ENABLED)
man->func = &nv04_gart_manager;
else
man->func = &ttm_bo_manager_func;
- switch (dev_priv->gart_info.type) {
- case NOUVEAU_GART_AGP:
+
+ if (drm->agp.stat == ENABLED) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- break;
- case NOUVEAU_GART_PDMA:
- case NOUVEAU_GART_HW:
+ } else {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- NV_ERROR(dev, "Unknown GART type: %d\n",
- dev_priv->gart_info.type);
- return -EINVAL;
}
+
break;
default:
- NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
@@ -783,20 +756,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
+ int ret = RING_SPACE(chan, 6);
if (ret == 0) {
- ret = RING_SPACE(chan, 6);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, NvDmaFB);
- OUT_RING (chan, NvDmaFB);
- } else {
- nouveau_ramht_remove(chan, NvNotify0);
- }
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, NvDmaFB);
}
return ret;
@@ -895,16 +862,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
+ int ret = RING_SPACE(chan, 4);
if (ret == 0) {
- ret = RING_SPACE(chan, 4);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, NvNotify0);
- }
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+ OUT_RING (chan, NvNotify0);
}
return ret;
@@ -915,8 +878,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
+ return NvDmaTT;
+ return NvDmaFB;
}
static int
@@ -972,8 +935,9 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
struct nouveau_mem *node = mem->mm_node;
int ret;
- ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
- node->page_shift, NV_MEM_ACCESS_RO, vma);
+ ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
+ PAGE_SHIFT, node->page_shift,
+ NV_MEM_ACCESS_RW, vma);
if (ret)
return ret;
@@ -990,19 +954,19 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_channel *chan = chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_channel *chan = chan = drm->channel;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
int ret;
- mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+ mutex_lock(&chan->cli->mutex);
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (dev_priv->card_type >= NV_50) {
+ if (nv_device(drm->device)->card_type >= NV_50) {
struct nouveau_mem *node = old_mem->mm_node;
ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
@@ -1014,7 +978,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
goto out;
}
- ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
+ ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
if (ret == 0) {
ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
no_wait_reserve,
@@ -1022,14 +986,15 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
out:
- mutex_unlock(&chan->mutex);
+ mutex_unlock(&chan->cli->mutex);
return ret;
}
void
nouveau_bo_move_init(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_cli *cli = chan->cli;
+ struct nouveau_drm *drm = chan->drm;
static const struct {
const char *name;
int engine;
@@ -1054,19 +1019,26 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
int ret;
do {
+ struct nouveau_object *object;
u32 handle = (mthd->engine << 16) | mthd->oclass;
- ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+
+ ret = nouveau_object_new(nv_object(cli), chan->handle, handle,
+ mthd->oclass, NULL, 0, &object);
if (ret == 0) {
ret = mthd->init(chan, handle);
- if (ret == 0) {
- dev_priv->ttm.move = mthd->exec;
- name = mthd->name;
- break;
+ if (ret) {
+ nouveau_object_del(nv_object(cli),
+ chan->handle, handle);
+ continue;
}
+
+ drm->ttm.move = mthd->exec;
+ name = mthd->name;
+ break;
}
} while ((++mthd)->exec);
- NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+ NV_INFO(drm, "MM: using %s for buffer copies\n", name);
}
static int
@@ -1151,7 +1123,7 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
nouveau_vm_map(vma, new_mem->mm_node);
} else
if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == nvvm_spg_shift(vma->vm)) {
+ nvbo->page_shift == vma->vm->vmm->spg_shift) {
if (((struct nouveau_mem *)new_mem->mm_node)->sg)
nouveau_vm_map_sg_table(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
@@ -1168,10 +1140,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
- struct nouveau_tile_reg **new_tile)
+ struct nouveau_drm_tile **new_tile)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 offset = new_mem->start << PAGE_SHIFT;
@@ -1179,7 +1151,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- if (dev_priv->card_type >= NV_10) {
+ if (nv_device(drm->device)->card_type >= NV_10) {
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
@@ -1190,11 +1162,11 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
- struct nouveau_tile_reg *new_tile,
- struct nouveau_tile_reg **old_tile)
+ struct nouveau_drm_tile *new_tile,
+ struct nouveau_drm_tile **old_tile)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
@@ -1205,13 +1177,13 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
- struct nouveau_tile_reg *new_tile = NULL;
+ struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if (dev_priv->card_type < NV_50) {
+ if (nv_device(drm->device)->card_type < NV_50) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@@ -1226,7 +1198,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
}
/* CPU copy if we have no accelerated method available */
- if (!dev_priv->ttm.move) {
+ if (!drm->ttm.move) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -1246,7 +1218,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
- if (dev_priv->card_type < NV_50) {
+ if (nv_device(drm->device)->card_type < NV_50) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1266,8 +1238,8 @@ static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct drm_device *dev = drm->dev;
int ret;
mem->bus.addr = NULL;
@@ -1283,9 +1255,9 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = dev_priv->gart_info.aper_base;
+ mem->bus.base = drm->agp.base;
mem->bus.is_iomem = true;
}
#endif
@@ -1294,10 +1266,11 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
- if (dev_priv->card_type >= NV_50) {
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ struct nouveau_bar *bar = nouveau_bar(drm->device);
struct nouveau_mem *node = mem->mm_node;
- ret = nvbar_map(dev, node, NV_MEM_ACCESS_RW,
+ ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
&node->bar_vma);
if (ret)
return ret;
@@ -1314,40 +1287,40 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_bar *bar = nouveau_bar(drm->device);
struct nouveau_mem *node = mem->mm_node;
- if (mem->mem_type != TTM_PL_VRAM)
- return;
-
if (!node->bar_vma.node)
return;
- nvbar_unmap(dev_priv->dev, &node->bar_vma);
+ bar->unmap(bar, &node->bar_vma);
}
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_device *device = nv_device(drm->device);
+ u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (dev_priv->card_type < NV_50 ||
+ if (nv_device(drm->device)->card_type < NV_50 ||
!nouveau_bo_tile_layout(nvbo))
return 0;
}
/* make sure bo is in mappable vram */
- if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+ if (bo->mem.start + bo->mem.num_pages < mappable)
return 0;
nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+ nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
return nouveau_bo_validate(nvbo, false, true, false);
}
@@ -1356,7 +1329,7 @@ static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
+ struct nouveau_drm *drm;
struct drm_device *dev;
unsigned i;
int r;
@@ -1373,11 +1346,11 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
+ drm = nouveau_bdev(ttm->bdev);
+ dev = drm->dev;
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_populate(ttm);
}
#endif
@@ -1414,7 +1387,7 @@ static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
+ struct nouveau_drm *drm;
struct drm_device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1422,11 +1395,11 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
+ drm = nouveau_bdev(ttm->bdev);
+ dev = drm->dev;
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
ttm_agp_tt_unpopulate(ttm);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index a0a889cbf5ca..c42aea9fb546 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -2,13 +2,9 @@
#define __NOUVEAU_BO_H__
struct nouveau_channel;
+struct nouveau_fence;
struct nouveau_vma;
-struct nouveau_tile_reg {
- bool used;
- struct nouveau_fence *fence;
-};
-
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@@ -29,7 +25,7 @@ struct nouveau_bo {
u32 tile_mode;
u32 tile_flags;
- struct nouveau_tile_reg *tile;
+ struct nouveau_drm_tile *tile;
struct drm_gem_object *gem;
int pin_refcnt;
@@ -89,4 +85,15 @@ int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
struct nouveau_vma *);
void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
+/* TODO: submit equivalent to TTM generic API upstream? */
+static inline void __iomem *
+nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
+{
+ bool is_iomem;
+ void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
+ &nvbo->kmap, &is_iomem);
+ WARN_ON_ONCE(ioptr && !is_iomem);
+ return ioptr;
+}
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
new file mode 100644
index 000000000000..3dd5f712b98c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <core/object.h>
+#include <core/client.h>
+#include <core/device.h>
+#include <core/class.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
+
+#include <engine/software.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
+#include "nouveau_fence.h"
+#include "nouveau_abi16.h"
+
+MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
+static int nouveau_vram_pushbuf;
+module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct nouveau_drm *drm = chan->drm;
+ struct nouveau_fence *fence = NULL;
+ int ret;
+
+ ret = nouveau_fence_new(chan, &fence);
+ if (!ret) {
+ ret = nouveau_fence_wait(fence, false, false);
+ nouveau_fence_unref(&fence);
+ }
+
+ if (ret)
+ NV_ERROR(drm, "failed to idle channel 0x%08x\n", chan->handle);
+ return ret;
+}
+
+void
+nouveau_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan = *pchan;
+ if (chan) {
+ struct nouveau_object *client = nv_object(chan->cli);
+ if (chan->fence) {
+ nouveau_channel_idle(chan);
+ nouveau_fence(chan->drm)->context_del(chan);
+ }
+ nouveau_object_del(client, NVDRM_DEVICE, chan->handle);
+ nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
+ nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
+ nouveau_bo_unmap(chan->push.buffer);
+ nouveau_bo_ref(NULL, &chan->push.buffer);
+ kfree(chan);
+ }
+ *pchan = NULL;
+}
+
+static int
+nouveau_channel_prep(struct nouveau_drm *drm, struct nouveau_cli *cli,
+ u32 parent, u32 handle, u32 size,
+ struct nouveau_channel **pchan)
+{
+ struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_instmem *imem = nouveau_instmem(device);
+ struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+ struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nouveau_client *client = &cli->base;
+ struct nv_dma_class args = {};
+ struct nouveau_channel *chan;
+ struct nouveau_object *push;
+ u32 target;
+ int ret;
+
+ chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ chan->cli = cli;
+ chan->drm = drm;
+ chan->handle = handle;
+
+ /* allocate memory for dma push buffer */
+ target = TTM_PL_FLAG_TT;
+ if (nouveau_vram_pushbuf)
+ target = TTM_PL_FLAG_VRAM;
+
+ ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL,
+ &chan->push.buffer);
+ if (ret == 0) {
+ ret = nouveau_bo_pin(chan->push.buffer, target);
+ if (ret == 0)
+ ret = nouveau_bo_map(chan->push.buffer);
+ }
+
+ if (ret) {
+ nouveau_channel_del(pchan);
+ return ret;
+ }
+
+ /* create dma object covering the *entire* memory space that the
+ * pushbuf lives in, this is because the GEM code requires that
+ * we be able to call out to other (indirect) push buffers
+ */
+ chan->push.vma.offset = chan->push.buffer->bo.offset;
+ chan->push.handle = NVDRM_PUSH | (handle & 0xffff);
+
+ if (device->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(chan->push.buffer, client->vm,
+ &chan->push.vma);
+ if (ret) {
+ nouveau_channel_del(pchan);
+ return ret;
+ }
+
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.start = 0;
+ args.limit = client->vm->vmm->limit - 1;
+ } else
+ if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
+ u64 limit = pfb->ram.size - imem->reserved - 1;
+ if (device->card_type == NV_04) {
+ /* nv04 vram pushbuf hack, retarget to its location in
+ * the framebuffer bar rather than direct vram access..
+ * nfi why this exists, it came from the -nv ddx.
+ */
+ args.flags = NV_DMA_TARGET_PCI | NV_DMA_ACCESS_RDWR;
+ args.start = pci_resource_start(device->pdev, 1);
+ args.limit = args.start + limit;
+ } else {
+ args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.start = 0;
+ args.limit = limit;
+ }
+ } else {
+ if (chan->drm->agp.stat == ENABLED) {
+ args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.start = chan->drm->agp.base;
+ args.limit = chan->drm->agp.base +
+ chan->drm->agp.size - 1;
+ } else {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.start = 0;
+ args.limit = vmm->limit - 1;
+ }
+ }
+
+ ret = nouveau_object_new(nv_object(chan->cli), parent,
+ chan->push.handle, 0x0002,
+ &args, sizeof(args), &push);
+ if (ret) {
+ nouveau_channel_del(pchan);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_channel_ind(struct nouveau_drm *drm, struct nouveau_cli *cli,
+ u32 parent, u32 handle, struct nouveau_channel **pchan)
+{
+ static const u16 oclasses[] = { 0xa06f, 0x906f, 0x826f, 0x506f, 0 };
+ const u16 *oclass = oclasses;
+ struct nv_channel_ind_class args;
+ struct nouveau_channel *chan;
+ int ret;
+
+ /* allocate dma push buffer */
+ ret = nouveau_channel_prep(drm, cli, parent, handle, 0x12000, &chan);
+ *pchan = chan;
+ if (ret)
+ return ret;
+
+ /* create channel object */
+ args.pushbuf = chan->push.handle;
+ args.ioffset = 0x10000 + chan->push.vma.offset;
+ args.ilength = 0x02000;
+
+ do {
+ ret = nouveau_object_new(nv_object(cli), parent, handle,
+ *oclass++, &args, sizeof(args),
+ &chan->object);
+ if (ret == 0)
+ return ret;
+ } while (*oclass);
+
+ nouveau_channel_del(pchan);
+ return ret;
+}
+
+static int
+nouveau_channel_dma(struct nouveau_drm *drm, struct nouveau_cli *cli,
+ u32 parent, u32 handle, struct nouveau_channel **pchan)
+{
+ static const u16 oclasses[] = { 0x006e, 0 };
+ const u16 *oclass = oclasses;
+ struct nv_channel_dma_class args;
+ struct nouveau_channel *chan;
+ int ret;
+
+ /* allocate dma push buffer */
+ ret = nouveau_channel_prep(drm, cli, parent, handle, 0x10000, &chan);
+ *pchan = chan;
+ if (ret)
+ return ret;
+
+ /* create channel object */
+ args.pushbuf = chan->push.handle;
+ args.offset = chan->push.vma.offset;
+
+ do {
+ ret = nouveau_object_new(nv_object(cli), parent, handle,
+ *oclass++, &args, sizeof(args),
+ &chan->object);
+ if (ret == 0)
+ return ret;
+ } while (ret && *oclass);
+
+ nouveau_channel_del(pchan);
+ return ret;
+}
+
+static int
+nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
+{
+ struct nouveau_client *client = nv_client(chan->cli);
+ struct nouveau_device *device = nv_device(chan->drm->device);
+ struct nouveau_instmem *imem = nouveau_instmem(device);
+ struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
+ struct nouveau_fb *pfb = nouveau_fb(device);
+ struct nouveau_software_chan *swch;
+ struct nouveau_object *object;
+ struct nv_dma_class args;
+ int ret, i;
+
+ chan->vram = vram;
+ chan->gart = gart;
+
+ /* allocate dma objects to cover all allowed vram, and gart */
+ if (device->card_type < NV_C0) {
+ if (device->card_type >= NV_50) {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.start = 0;
+ args.limit = client->vm->vmm->limit - 1;
+ } else {
+ args.flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR;
+ args.start = 0;
+ args.limit = pfb->ram.size - imem->reserved - 1;
+ }
+
+ ret = nouveau_object_new(nv_object(client), chan->handle, vram,
+ 0x003d, &args, sizeof(args), &object);
+ if (ret)
+ return ret;
+
+ if (device->card_type >= NV_50) {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_VM;
+ args.start = 0;
+ args.limit = client->vm->vmm->limit - 1;
+ } else
+ if (chan->drm->agp.stat == ENABLED) {
+ args.flags = NV_DMA_TARGET_AGP | NV_DMA_ACCESS_RDWR;
+ args.start = chan->drm->agp.base;
+ args.limit = chan->drm->agp.base +
+ chan->drm->agp.size - 1;
+ } else {
+ args.flags = NV_DMA_TARGET_VM | NV_DMA_ACCESS_RDWR;
+ args.start = 0;
+ args.limit = vmm->limit - 1;
+ }
+
+ ret = nouveau_object_new(nv_object(client), chan->handle, gart,
+ 0x003d, &args, sizeof(args), &object);
+ if (ret)
+ return ret;
+ }
+
+ /* initialise dma tracking parameters */
+ switch (nv_hclass(chan->object) & 0xffff) {
+ case 0x006e:
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
+ chan->dma.max = (0x10000 / 4) - 2;
+ break;
+ default:
+ chan->user_put = 0x40;
+ chan->user_get = 0x44;
+ chan->user_get_hi = 0x60;
+ chan->dma.ib_base = 0x10000 / 4;
+ chan->dma.ib_max = (0x02000 / 8) - 1;
+ chan->dma.ib_put = 0;
+ chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+ chan->dma.max = chan->dma.ib_base;
+ break;
+ }
+
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+
+ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(chan, 0x00000000);
+
+ /* allocate software object class (used for fences on <= nv05, and
+ * to signal flip completion), bind it to a subchannel.
+ */
+ ret = nouveau_object_new(nv_object(client), chan->handle,
+ NvSw, nouveau_abi16_swclass(chan->drm),
+ NULL, 0, &object);
+ if (ret)
+ return ret;
+
+ swch = (void *)object->parent;
+ swch->flip = nouveau_flip_complete;
+ swch->flip_data = chan;
+
+ if (device->card_type < NV_C0) {
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+
+ BEGIN_NV04(chan, NvSubSw, 0x0000, 1);
+ OUT_RING (chan, NvSw);
+ FIRE_RING (chan);
+ }
+
+ /* initialise synchronisation */
+ return nouveau_fence(chan->drm)->context_new(chan);
+}
+
+int
+nouveau_channel_new(struct nouveau_drm *drm, struct nouveau_cli *cli,
+ u32 parent, u32 handle, u32 vram, u32 gart,
+ struct nouveau_channel **pchan)
+{
+ int ret;
+
+ ret = nouveau_channel_ind(drm, cli, parent, handle, pchan);
+ if (ret) {
+ NV_DEBUG(drm, "ib channel create, %d\n", ret);
+ ret = nouveau_channel_dma(drm, cli, parent, handle, pchan);
+ if (ret) {
+ NV_DEBUG(drm, "dma channel create, %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = nouveau_channel_init(*pchan, vram, gart);
+ if (ret) {
+ NV_ERROR(drm, "channel failed to initialise, %d\n", ret);
+ nouveau_channel_del(pchan);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
new file mode 100644
index 000000000000..0fa94244bed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -0,0 +1,47 @@
+#ifndef __NOUVEAU_CHAN_H__
+#define __NOUVEAU_CHAN_H__
+
+struct nouveau_cli;
+
+struct nouveau_channel {
+ struct nouveau_cli *cli;
+ struct nouveau_drm *drm;
+
+ u32 handle;
+ u32 vram;
+ u32 gart;
+
+ struct {
+ struct nouveau_bo *buffer;
+ struct nouveau_vma vma;
+ u32 handle;
+ } push;
+
+ /* TODO: this will be reworked in the near future */
+ bool accel_done;
+ void *fence;
+ struct {
+ int max;
+ int free;
+ int cur;
+ int put;
+ int ib_base;
+ int ib_max;
+ int ib_free;
+ int ib_put;
+ } dma;
+ u32 user_get_hi;
+ u32 user_get;
+ u32 user_put;
+
+ struct nouveau_object *object;
+};
+
+
+int nouveau_channel_new(struct nouveau_drm *, struct nouveau_cli *,
+ u32 parent, u32 handle, u32 vram, u32 gart,
+ struct nouveau_channel **);
+void nouveau_channel_del(struct nouveau_channel **);
+int nouveau_channel_idle(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
deleted file mode 100644
index 285fde8ed3e3..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * Copyright 2005-2006 Stephane Marchesin
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <nouveau_drm.h>
-#include "nouveau_dma.h"
-#include <engine/fifo.h>
-#include <core/ramht.h>
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
-
-MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
-int nouveau_vram_pushbuf;
-module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
-
-static int
-nouveau_channel_pushbuf_init(struct nouveau_channel *chan)
-{
- u32 mem = nouveau_vram_pushbuf ? TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int ret;
-
- /* allocate buffer object */
- ret = nouveau_bo_new(dev, 65536, 0, mem, 0, 0, NULL, &chan->pushbuf_bo);
- if (ret)
- goto out;
-
- ret = nouveau_bo_pin(chan->pushbuf_bo, mem);
- if (ret)
- goto out;
-
- ret = nouveau_bo_map(chan->pushbuf_bo);
- if (ret)
- goto out;
-
- /* create DMA object covering the entire memtype where the push
- * buffer resides, userspace can submit its own push buffers from
- * anywhere within the same memtype.
- */
- chan->pushbuf_base = chan->pushbuf_bo->bo.offset;
- if (dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(chan->pushbuf_bo, chan->vm,
- &chan->pushbuf_vma);
- if (ret)
- goto out;
-
- if (dev_priv->card_type < NV_C0) {
- ret = nouveau_gpuobj_dma_new(chan,
- NV_CLASS_DMA_IN_MEMORY, 0,
- (1ULL << 40),
- NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_VM,
- &chan->pushbuf);
- }
- chan->pushbuf_base = chan->pushbuf_vma.offset;
- } else
- if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_TT) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
- dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_GART,
- &chan->pushbuf);
- } else
- if (dev_priv->card_type != NV_04) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
- dev_priv->fb_available_size,
- NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_VRAM,
- &chan->pushbuf);
- } else {
- /* NV04 cmdbuf hack, from original ddx.. not sure of it's
- * exact reason for existing :) PCI access to cmdbuf in
- * VRAM.
- */
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- pci_resource_start(dev->pdev, 1),
- dev_priv->fb_available_size,
- NV_MEM_ACCESS_RO,
- NV_MEM_TARGET_PCI,
- &chan->pushbuf);
- }
-
-out:
- if (ret) {
- NV_ERROR(dev, "error initialising pushbuf: %d\n", ret);
- nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
- nouveau_gpuobj_ref(NULL, &chan->pushbuf);
- if (chan->pushbuf_bo) {
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
- }
- }
-
- return 0;
-}
-
-/* allocates and initializes a fifo for user space consumption */
-int
-nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
- struct drm_file *file_priv,
- uint32_t vram_handle, uint32_t gart_handle)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nouveau_fence_priv *fence = dev_priv->fence.func;
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- struct nouveau_channel *chan;
- unsigned long flags;
- int ret, i;
-
- /* allocate and lock channel structure */
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->dev = dev;
- chan->file_priv = file_priv;
- chan->vram_handle = vram_handle;
- chan->gart_handle = gart_handle;
-
- kref_init(&chan->ref);
- atomic_set(&chan->users, 1);
- mutex_init(&chan->mutex);
- mutex_lock(&chan->mutex);
-
- /* allocate hw channel id */
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
- if ( dev_priv->card_type == NV_50 && chan->id == 0)
- continue;
-
- if (!dev_priv->channels.ptr[chan->id]) {
- nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
- break;
- }
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
- if (chan->id == pfifo->channels) {
- mutex_unlock(&chan->mutex);
- kfree(chan);
- return -ENODEV;
- }
-
- NV_DEBUG(dev, "initialising channel %d\n", chan->id);
-
- /* setup channel's memory and vm */
- ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
- if (ret) {
- NV_ERROR(dev, "gpuobj %d\n", ret);
- nouveau_channel_put(&chan);
- return ret;
- }
-
- /* Allocate space for per-channel fixed notifier memory */
- ret = nouveau_notifier_init_channel(chan);
- if (ret) {
- NV_ERROR(dev, "ntfy %d\n", ret);
- nouveau_channel_put(&chan);
- return ret;
- }
-
- /* Allocate DMA push buffer */
- ret = nouveau_channel_pushbuf_init(chan);
- if (ret) {
- NV_ERROR(dev, "pushbuf %d\n", ret);
- nouveau_channel_put(&chan);
- return ret;
- }
-
- nouveau_dma_init(chan);
- chan->user_put = 0x40;
- chan->user_get = 0x44;
- if (dev_priv->card_type >= NV_50)
- chan->user_get_hi = 0x60;
-
- /* create fifo context */
- ret = pfifo->base.context_new(chan, NVOBJ_ENGINE_FIFO);
- if (ret) {
- nouveau_channel_put(&chan);
- return ret;
- }
-
- /* Insert NOPs for NOUVEAU_DMA_SKIPS */
- ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
- if (ret) {
- nouveau_channel_put(&chan);
- return ret;
- }
-
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
- OUT_RING (chan, 0x00000000);
-
- ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
- if (ret) {
- nouveau_channel_put(&chan);
- return ret;
- }
-
- if (dev_priv->card_type < NV_C0) {
- ret = RING_SPACE(chan, 2);
- if (ret) {
- nouveau_channel_put(&chan);
- return ret;
- }
-
- BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
- OUT_RING (chan, NvSw);
- FIRE_RING (chan);
- }
-
- FIRE_RING(chan);
-
- ret = fence->context_new(chan);
- if (ret) {
- nouveau_channel_put(&chan);
- return ret;
- }
-
- nouveau_debugfs_channel_init(chan);
-
- NV_DEBUG(dev, "channel %d initialised\n", chan->id);
- if (fpriv) {
- spin_lock(&fpriv->lock);
- list_add(&chan->list, &fpriv->channels);
- spin_unlock(&fpriv->lock);
- }
- *chan_ret = chan;
- return 0;
-}
-
-struct nouveau_channel *
-nouveau_channel_get_unlocked(struct nouveau_channel *ref)
-{
- struct nouveau_channel *chan = NULL;
-
- if (likely(ref && atomic_inc_not_zero(&ref->users)))
- nouveau_channel_ref(ref, &chan);
-
- return chan;
-}
-
-struct nouveau_channel *
-nouveau_channel_get(struct drm_file *file_priv, int id)
-{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- struct nouveau_channel *chan;
-
- spin_lock(&fpriv->lock);
- list_for_each_entry(chan, &fpriv->channels, list) {
- if (chan->id == id) {
- chan = nouveau_channel_get_unlocked(chan);
- spin_unlock(&fpriv->lock);
- mutex_lock(&chan->mutex);
- return chan;
- }
- }
- spin_unlock(&fpriv->lock);
-
- return ERR_PTR(-EINVAL);
-}
-
-void
-nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan = *pchan;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fence_priv *fence = dev_priv->fence.func;
- unsigned long flags;
- int i;
-
- /* decrement the refcount, and we're done if there's still refs */
- if (likely(!atomic_dec_and_test(&chan->users))) {
- nouveau_channel_ref(NULL, pchan);
- return;
- }
-
- /* no one wants the channel anymore */
- NV_DEBUG(dev, "freeing channel %d\n", chan->id);
- nouveau_debugfs_channel_fini(chan);
-
- /* give it chance to idle */
- nouveau_channel_idle(chan);
-
- /* destroy the engine specific contexts */
- for (i = NVOBJ_ENGINE_NR - 1; i >= 0; i--) {
- if (chan->engctx[i])
- dev_priv->eng[i]->context_del(chan, i);
- }
-
- if (chan->fence)
- fence->context_del(chan);
-
- /* aside from its resources, the channel should now be dead,
- * remove it from the channel list
- */
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
- /* destroy any resources the channel owned */
- nouveau_gpuobj_ref(NULL, &chan->pushbuf);
- if (chan->pushbuf_bo) {
- nouveau_bo_vma_del(chan->pushbuf_bo, &chan->pushbuf_vma);
- nouveau_bo_unmap(chan->pushbuf_bo);
- nouveau_bo_unpin(chan->pushbuf_bo);
- nouveau_bo_ref(NULL, &chan->pushbuf_bo);
- }
- nouveau_ramht_ref(NULL, &chan->ramht, chan);
- nouveau_notifier_takedown_channel(chan);
- nouveau_gpuobj_channel_takedown(chan);
-
- nouveau_channel_ref(NULL, pchan);
-}
-
-void
-nouveau_channel_put(struct nouveau_channel **pchan)
-{
- mutex_unlock(&(*pchan)->mutex);
- nouveau_channel_put_unlocked(pchan);
-}
-
-static void
-nouveau_channel_del(struct kref *ref)
-{
- struct nouveau_channel *chan =
- container_of(ref, struct nouveau_channel, ref);
-
- kfree(chan);
-}
-
-void
-nouveau_channel_ref(struct nouveau_channel *chan,
- struct nouveau_channel **pchan)
-{
- if (chan)
- kref_get(&chan->ref);
-
- if (*pchan)
- kref_put(&(*pchan)->ref, nouveau_channel_del);
-
- *pchan = chan;
-}
-
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_fence *fence = NULL;
- int ret;
-
- ret = nouveau_fence_new(chan, &fence);
- if (!ret) {
- ret = nouveau_fence_wait(fence, false, false);
- nouveau_fence_unref(&fence);
- }
-
- if (ret)
- NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
- return ret;
-}
-
-/* cleans up all the fifos from file_priv */
-void
-nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nouveau_channel *chan;
- int i;
-
- if (!pfifo)
- return;
-
- NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
- for (i = 0; i < pfifo->channels; i++) {
- chan = nouveau_channel_get(file_priv, i);
- if (IS_ERR(chan))
- continue;
-
- list_del(&chan->list);
- atomic_dec(&chan->users);
- nouveau_channel_put(&chan);
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.c b/drivers/gpu/drm/nouveau/nouveau_compat.c
index 0403f2b94fa6..3db23496dff6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.c
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.c
@@ -1,4 +1,5 @@
#include "nouveau_drm.h"
+#include "nouveau_chan.h"
#include "nouveau_compat.h"
#include <subdev/bios.h>
@@ -14,8 +15,6 @@
#include <subdev/bar.h>
#include <subdev/vm.h>
-void *nouveau_newpriv(struct drm_device *);
-
int
nvdrm_gart_init(struct drm_device *dev, u64 *base, u64 *size)
{
@@ -583,3 +582,28 @@ nvvm_lpg_shift(struct nouveau_vm *vm)
{
return vm->vmm->lpg_shift;
}
+
+u64 nvgpuobj_addr(struct nouveau_object *object)
+{
+ return nv_gpuobj(object)->addr;
+}
+
+struct drm_device *
+nouveau_drv(void *ptr)
+{
+ struct nouveau_drm *drm = ptr;
+ return drm->dev;
+}
+
+struct nouveau_channel *
+nvdrm_channel(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ return drm->channel;
+}
+
+struct mutex *
+nvchan_mutex(struct nouveau_channel *chan)
+{
+ return &chan->cli->mutex;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_compat.h b/drivers/gpu/drm/nouveau/nouveau_compat.h
index d691b2535c72..9f42d1d0f868 100644
--- a/drivers/gpu/drm/nouveau/nouveau_compat.h
+++ b/drivers/gpu/drm/nouveau/nouveau_compat.h
@@ -124,4 +124,18 @@ nvvm_spg_shift(struct nouveau_vm *);
int
nvvm_lpg_shift(struct nouveau_vm *);
+u32
+nv50_display_active_crtcs(struct drm_device *dev);
+
+u64 nvgpuobj_addr(struct nouveau_object *object);
+
+struct drm_device *
+nouveau_drv(void *drm);
+
+struct nouveau_channel *
+nvdrm_channel(struct drm_device *dev);
+
+struct mutex *
+nvchan_mutex(struct nouveau_channel *chan);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
deleted file mode 100644
index 6564b547973e..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Authors:
- * Ben Skeggs <bskeggs@redhat.com>
- */
-
-#include <linux/debugfs.h>
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-
-#include <ttm/ttm_page_alloc.h>
-
-static int
-nouveau_debugfs_channel_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct nouveau_channel *chan = node->info_ent->data;
-
- seq_printf(m, "channel id : %d\n", chan->id);
-
- seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
- seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
- seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
- seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
- seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
- if (chan->dma.ib_max) {
- seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
- seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
- seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
- }
-
- seq_printf(m, "gpu fifo state:\n");
- seq_printf(m, " get: 0x%08x\n",
- nvchan_rd32(chan, chan->user_get));
- seq_printf(m, " put: 0x%08x\n",
- nvchan_rd32(chan, chan->user_put));
- if (chan->dma.ib_max) {
- seq_printf(m, " ib get: 0x%08x\n",
- nvchan_rd32(chan, 0x88));
- seq_printf(m, " ib put: 0x%08x\n",
- nvchan_rd32(chan, 0x8c));
- }
-
- return 0;
-}
-
-int
-nouveau_debugfs_channel_init(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct drm_minor *minor = chan->dev->primary;
- int ret;
-
- if (!dev_priv->debugfs.channel_root) {
- dev_priv->debugfs.channel_root =
- debugfs_create_dir("channel", minor->debugfs_root);
- if (!dev_priv->debugfs.channel_root)
- return -ENOENT;
- }
-
- snprintf(chan->debugfs.name, 32, "%d", chan->id);
- chan->debugfs.info.name = chan->debugfs.name;
- chan->debugfs.info.show = nouveau_debugfs_channel_info;
- chan->debugfs.info.driver_features = 0;
- chan->debugfs.info.data = chan;
-
- ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
- dev_priv->debugfs.channel_root,
- chan->dev->primary);
- if (ret == 0)
- chan->debugfs.active = true;
- return ret;
-}
-
-void
-nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-
- if (!chan->debugfs.active)
- return;
-
- drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
- chan->debugfs.active = false;
-
- if (chan == dev_priv->channel) {
- debugfs_remove(dev_priv->debugfs.channel_root);
- dev_priv->debugfs.channel_root = NULL;
- }
-}
-
-static int
-nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t ppci_0;
-
- ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
-
- seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
- seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
- ppci_0 & 0xffff, ppci_0 >> 16);
- return 0;
-}
-
-static int
-nouveau_debugfs_memory_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
-
- seq_printf(m, "VRAM total: %dKiB\n", (int)(nvfb_vram_size(minor->dev) >> 10));
- return 0;
-}
-
-static int
-nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
- int i;
-
- for (i = 0; i < dev_priv->vbios.length; i++)
- seq_printf(m, "%c", dev_priv->vbios.data[i]);
- return 0;
-}
-
-static int
-nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
- int ret;
-
- ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
- if (ret)
- seq_printf(m, "failed: %d", ret);
- else
- seq_printf(m, "succeeded\n");
- return 0;
-}
-
-static struct drm_info_list nouveau_debugfs_list[] = {
- { "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
- { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
- { "memory", nouveau_debugfs_memory_info, 0, NULL },
- { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
- { "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
- { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
-};
-#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
-
-int
-nouveau_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- return 0;
-}
-
-void
-nouveau_debugfs_takedown(struct drm_minor *minor)
-{
- drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
- minor);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index e0a56b277884..a60a9f51e890 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -33,10 +33,10 @@
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
#include "nouveau_connector.h"
-#include "nouveau_software.h"
-#include "nouveau_fence.h"
#include "nv50_display.h"
+#include "nouveau_fence.h"
+
#include <subdev/bios/gpio.h>
static void
@@ -260,6 +260,24 @@ nouveau_display_fini(struct drm_device *dev)
disp->fini(dev);
}
+static void
+nouveau_display_vblank_notify(void *data, int crtc)
+{
+ drm_handle_vblank(data, crtc);
+}
+
+static void
+nouveau_display_vblank_get(void *data, int crtc)
+{
+ drm_vblank_get(data, crtc);
+}
+
+static void
+nouveau_display_vblank_put(void *data, int crtc)
+{
+ drm_vblank_put(data, crtc);
+}
+
int
nouveau_display_create(struct drm_device *dev)
{
@@ -365,6 +383,10 @@ nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->card_type >= NV_D0)
+ nv_mask(dev, 0x6100c0 + (crtc * 0x800), 1, 1);
+ else
+
if (dev_priv->card_type >= NV_50)
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
@@ -380,6 +402,9 @@ nouveau_vblank_disable(struct drm_device *dev, int crtc)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->card_type >= NV_D0)
+ nv_mask(dev, 0x6100c0 + (crtc * 0x800), 1, 0);
+ else
if (dev_priv->card_type >= NV_50)
nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
@@ -436,8 +461,8 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_fence **pfence)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct drm_device *dev = chan->dev;
+ struct drm_device *dev = nouveau_drv(chan->drm);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
int ret;
@@ -492,7 +517,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct nouveau_fence *fence;
int ret;
- if (!dev_priv->channel)
+ if (!nvdrm_channel(dev))
return -ENODEV;
s = kzalloc(sizeof(*s), GFP_KERNEL);
@@ -513,10 +538,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Choose the channel the flip will be handled in */
fence = new_bo->bo.sync_obj;
if (fence)
- chan = nouveau_channel_get_unlocked(fence->channel);
+ chan = fence->channel;
if (!chan)
- chan = nouveau_channel_get_unlocked(dev_priv->channel);
- mutex_lock(&chan->mutex);
+ chan = nvdrm_channel(dev);
+ mutex_lock(nvchan_mutex(chan));
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
@@ -525,13 +550,13 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
else
ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
- nouveau_channel_put(&chan);
+ mutex_unlock(nvchan_mutex(chan));
goto fail_unreserve;
}
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
- nouveau_channel_put(&chan);
+ mutex_unlock(nvchan_mutex(chan));
if (ret)
goto fail_unreserve;
@@ -554,14 +579,14 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
struct nouveau_fence_chan *fctx = chan->fence;
- struct drm_device *dev = chan->dev;
+ struct drm_device *dev = nouveau_drv(chan->drm);
struct nouveau_page_flip_state *s;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (list_empty(&fctx->flip)) {
- NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+ NV_ERROR(dev, "unexpected pageflip\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
@@ -592,7 +617,7 @@ int
nouveau_flip_complete(void *data)
{
struct nouveau_channel *chan = data;
- struct drm_device *dev = chan->dev;
+ struct drm_device *dev = nouveau_drv(chan->drm);
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_page_flip_state state;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index efd082323405..40f91e1e5842 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -24,41 +24,16 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
-#include <core/ramht.h>
-
-void
-nouveau_dma_init(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_bo *pushbuf = chan->pushbuf_bo;
-
- if (dev_priv->card_type >= NV_50) {
- const int ib_size = pushbuf->bo.mem.size / 2;
-
- chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
- chan->dma.ib_max = (ib_size / 8) - 1;
- chan->dma.ib_put = 0;
- chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+#include <core/client.h>
- chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
- } else {
- chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
- }
-
- chan->dma.put = 0;
- chan->dma.cur = chan->dma.put;
- chan->dma.free = chan->dma.max - chan->dma.cur;
-}
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
void
OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
{
bool is_iomem;
- u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
+ u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
mem = &mem[chan->dma.cur];
if (is_iomem)
memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
@@ -79,9 +54,9 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
uint64_t val;
- val = nvchan_rd32(chan, chan->user_get);
+ val = nv_ro32(chan->object, chan->user_get);
if (chan->user_get_hi)
- val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
+ val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -93,32 +68,33 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
}
if ((++*timeout & 0xff) == 0) {
- DRM_UDELAY(1);
+ udelay(1);
if (*timeout > 100000)
return -EBUSY;
}
- if (val < chan->pushbuf_base ||
- val > chan->pushbuf_base + (chan->dma.max << 2))
+ if (val < chan->push.vma.offset ||
+ val > chan->push.vma.offset + (chan->dma.max << 2))
return -EINVAL;
- return (val - chan->pushbuf_base) >> 2;
+ return (val - chan->push.vma.offset) >> 2;
}
void
nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
int delta, int length)
{
- struct nouveau_bo *pb = chan->pushbuf_bo;
+ struct nouveau_bo *pb = chan->push.buffer;
struct nouveau_vma *vma;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
u64 offset;
- vma = nouveau_bo_vma_find(bo, chan->vm);
+ vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
BUG_ON(!vma);
offset = vma->offset + delta;
BUG_ON(chan->dma.ib_free < 1);
+
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
@@ -128,7 +104,7 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
/* Flush writes. */
nouveau_bo_rd32(pb, 0);
- nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+ nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}
@@ -138,7 +114,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
uint32_t cnt = 0, prev_get = 0;
while (chan->dma.ib_free < count) {
- uint32_t get = nvchan_rd32(chan, 0x88);
+ uint32_t get = nv_ro32(chan->object, 0x88);
if (get != prev_get) {
prev_get = get;
cnt = 0;
@@ -249,7 +225,7 @@ nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
* instruct the GPU to jump back to the start right
* after processing the currently pending commands.
*/
- OUT_RING(chan, chan->pushbuf_base | 0x20000000);
+ OUT_RING(chan, chan->push.vma.offset | 0x20000000);
/* wait for GET to depart from the skips area.
* prevents writing GET==PUT and causing a race
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 8db68be9544f..5c2e22932d1c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -27,10 +27,10 @@
#ifndef __NOUVEAU_DMA_H__
#define __NOUVEAU_DMA_H__
-#ifndef NOUVEAU_DMA_DEBUG
-#define NOUVEAU_DMA_DEBUG 0
-#endif
+#include "nouveau_bo.h"
+#include "nouveau_chan.h"
+int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
int delta, int length);
@@ -116,12 +116,7 @@ RING_SPACE(struct nouveau_channel *chan, int size)
static inline void
OUT_RING(struct nouveau_channel *chan, int data)
{
- if (NOUVEAU_DMA_DEBUG) {
- NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
- chan->id, chan->dma.cur << 2, data);
- }
-
- nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
+ nouveau_bo_wr32(chan->push.buffer, chan->dma.cur++, data);
}
extern void
@@ -159,24 +154,19 @@ BEGIN_IMC0(struct nouveau_channel *chan, int subc, int mthd, u16 data)
#define WRITE_PUT(val) do { \
DRM_MEMORYBARRIER(); \
- nouveau_bo_rd32(chan->pushbuf_bo, 0); \
- nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
+ nouveau_bo_rd32(chan->push.buffer, 0); \
+ nv_wo32(chan->object, chan->user_put, ((val) << 2) + chan->push.vma.offset); \
} while (0)
static inline void
FIRE_RING(struct nouveau_channel *chan)
{
- if (NOUVEAU_DMA_DEBUG) {
- NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
- chan->id, chan->dma.cur << 2);
- }
-
if (chan->dma.cur == chan->dma.put)
return;
chan->accel_done = true;
if (chan->dma.ib_max) {
- nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
+ nv50_dma_push(chan, chan->push.buffer, chan->dma.put << 2,
(chan->dma.cur - chan->dma.put) << 2);
} else {
WRITE_PUT(chan->dma.cur);
@@ -191,4 +181,31 @@ WIND_RING(struct nouveau_channel *chan)
chan->dma.cur = chan->dma.put;
}
+/* FIFO methods */
+#define NV01_SUBCHAN_OBJECT 0x00000000
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH 0x00000010
+#define NV84_SUBCHAN_SEMAPHORE_ADDRESS_LOW 0x00000014
+#define NV84_SUBCHAN_SEMAPHORE_SEQUENCE 0x00000018
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER 0x0000001c
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
+#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
+#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
+#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
+#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
+#define NV10_SUBCHAN_REF_CNT 0x00000050
+#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
+#define NV11_SUBCHAN_DMA_SEMAPHORE 0x00000060
+#define NV11_SUBCHAN_SEMAPHORE_OFFSET 0x00000064
+#define NV11_SUBCHAN_SEMAPHORE_ACQUIRE 0x00000068
+#define NV11_SUBCHAN_SEMAPHORE_RELEASE 0x0000006c
+#define NV40_SUBCHAN_YIELD 0x00000080
+
+/* NV_SW object class */
+#define NV_SW_DMA_VBLSEM 0x0000018c
+#define NV_SW_VBLSEM_OFFSET 0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV_SW_VBLSEM_RELEASE 0x00000408
+#define NV_SW_PAGE_FLIP 0x00000500
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3b4e65d5122b..92ecf50a39d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -27,12 +27,20 @@
#include <core/device.h>
#include <core/client.h>
+#include <core/gpuobj.h>
#include <core/class.h>
#include <subdev/device.h>
+#include <subdev/vm.h>
#include "nouveau_drm.h"
+#include "nouveau_dma.h"
#include "nouveau_agp.h"
+#include "nouveau_abi16.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_fence.h"
+
+#include "nouveau_ttm.h"
int __devinit nouveau_pci_probe(struct pci_dev *, const struct pci_device_id *);
void nouveau_pci_remove(struct pci_dev *);
@@ -43,7 +51,6 @@ void __exit nouveau_exit(struct pci_driver *);
int nouveau_load(struct drm_device *, unsigned long);
int nouveau_unload(struct drm_device *);
-void *nouveau_newpriv(struct drm_device *);
MODULE_PARM_DESC(config, "option string to pass to driver core");
static char *nouveau_config;
@@ -53,6 +60,10 @@ MODULE_PARM_DESC(debug, "debug string to pass to driver core");
static char *nouveau_debug;
module_param_named(debug, nouveau_debug, charp, 0400);
+MODULE_PARM_DESC(noaccel, "disable kernel/abi16 acceleration");
+static int nouveau_noaccel = 0;
+module_param_named(noaccel, nouveau_noaccel, int, 0400);
+
static u64
nouveau_name(struct pci_dev *pdev)
{
@@ -82,17 +93,112 @@ static void
nouveau_cli_destroy(struct nouveau_cli *cli)
{
struct nouveau_object *client = nv_object(cli);
+ nouveau_vm_ref(NULL, &cli->base.vm, NULL);
nouveau_client_fini(&cli->base, false);
atomic_set(&client->refcount, 1);
nouveau_object_ref(NULL, &client);
}
+static void
+nouveau_accel_fini(struct nouveau_drm *drm)
+{
+ nouveau_gpuobj_ref(NULL, &drm->notify);
+ nouveau_channel_del(&drm->channel);
+ if (drm->fence)
+ nouveau_fence(drm)->dtor(drm);
+}
+
+static void
+nouveau_accel_init(struct nouveau_drm *drm)
+{
+ struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_object *object;
+ int ret;
+
+ if (nouveau_noaccel)
+ return;
+
+ /* initialise synchronisation routines */
+ if (device->card_type < NV_10) ret = nv04_fence_create(drm);
+ else if (device->chipset < 0x84) ret = nv10_fence_create(drm);
+ else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
+ else ret = nvc0_fence_create(drm);
+ if (ret) {
+ NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
+ nouveau_accel_fini(drm);
+ return;
+ }
+
+ ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN,
+ NvDmaFB, NvDmaTT, &drm->channel);
+ if (ret) {
+ NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
+ nouveau_accel_fini(drm);
+ return;
+ }
+
+ if (device->card_type < NV_C0) {
+ ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0,
+ &drm->notify);
+ if (ret) {
+ NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
+ nouveau_accel_fini(drm);
+ return;
+ }
+
+ ret = nouveau_object_new(nv_object(drm),
+ drm->channel->handle, NvNotify0,
+ 0x003d, &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = drm->notify->addr,
+ .limit = drm->notify->addr + 31
+ }, sizeof(struct nv_dma_class),
+ &object);
+ if (ret) {
+ nouveau_accel_fini(drm);
+ return;
+ }
+ }
+
+
+ nouveau_bo_move_init(drm->channel);
+}
+
static int __devinit
nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
{
struct nouveau_device *device;
+ struct apertures_struct *aper;
+ bool boot = false;
int ret;
+ /* remove conflicting drivers (vesafb, efifb etc) */
+ aper = alloc_apertures(3);
+ if (!aper)
+ return -ENOMEM;
+
+ aper->ranges[0].base = pci_resource_start(pdev, 1);
+ aper->ranges[0].size = pci_resource_len(pdev, 1);
+ aper->count = 1;
+
+ if (pci_resource_len(pdev, 2)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
+ aper->count++;
+ }
+
+ if (pci_resource_len(pdev, 3)) {
+ aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
+ aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
+ aper->count++;
+ }
+
+#ifdef CONFIG_X86
+ boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(aper, "nouveaufb", boot);
+
ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
nouveau_config, nouveau_debug, &device);
if (ret)
@@ -102,7 +208,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
ret = nouveau_pci_probe(pdev, pent);
if (ret) {
- nouveau_device_destroy(&device);
+ nouveau_object_ref(NULL, (struct nouveau_object **)&device);
return ret;
}
@@ -113,6 +219,7 @@ int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = dev->pdev;
+ struct nouveau_device *device;
struct nouveau_drm *drm;
int ret;
@@ -122,6 +229,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
return ret;
INIT_LIST_HEAD(&drm->clients);
+ spin_lock_init(&drm->tile.lock);
drm->dev = dev;
/* make sure AGP controller is in a consistent state before we
@@ -142,7 +250,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
}, sizeof(struct nv_device_class),
&drm->device);
if (ret)
- return ret;
+ goto fail_device;
nouveau_agp_reset(drm);
nouveau_object_del(nv_object(drm), NVDRM_CLIENT, NVDRM_DEVICE);
@@ -158,15 +266,32 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
+ device = nv_device(drm->device);
+
/* initialise AGP */
nouveau_agp_init(drm);
- ret = nouveau_load(dev, flags);
+ if (device->card_type >= NV_50) {
+ ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+ 0x1000, &drm->client.base.vm);
+ if (ret)
+ goto fail_device;
+ }
+
+ ret = nouveau_ttm_init(drm);
if (ret)
goto fail_device;
+ ret = nouveau_load(dev, flags);
+ if (ret)
+ goto fail_load;
+
+ nouveau_accel_init(drm);
+ nouveau_fbcon_init(dev);
return 0;
+fail_load:
+ nouveau_ttm_fini(drm);
fail_device:
nouveau_cli_destroy(&drm->client);
return ret;
@@ -179,10 +304,14 @@ nouveau_drm_unload(struct drm_device *dev)
struct pci_dev *pdev = dev->pdev;
int ret;
+ nouveau_fbcon_fini(dev);
+ nouveau_accel_fini(drm);
+
ret = nouveau_unload(dev);
if (ret)
return ret;
+ nouveau_ttm_fini(drm);
nouveau_agp_fini(drm);
pci_set_drvdata(pdev, drm->client.base.device);
@@ -193,10 +322,11 @@ nouveau_drm_unload(struct drm_device *dev)
static void
nouveau_drm_remove(struct pci_dev *pdev)
{
- struct nouveau_device *device;
+ struct nouveau_object *device;
nouveau_pci_remove(pdev);
device = pci_get_drvdata(pdev);
- nouveau_device_destroy(&device);
+ nouveau_object_ref(NULL, &device);
+ nouveau_object_debug();
}
int
@@ -211,10 +341,23 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pm_state.event == PM_EVENT_PRETHAW)
return 0;
+ NV_INFO(drm, "suspending fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
+
+ NV_INFO(drm, "suspending drm...\n");
ret = nouveau_pci_suspend(pdev, pm_state);
if (ret)
return ret;
+ NV_INFO(drm, "evicting buffers...\n");
+ ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+ if (drm->fence && nouveau_fence(drm)->suspend) {
+ if (!nouveau_fence(drm)->suspend(drm))
+ return -ENOMEM;
+ }
+
+ NV_INFO(drm, "suspending client object trees...\n");
list_for_each_entry(cli, &drm->clients, head) {
ret = nouveau_client_fini(&cli->base, true);
if (ret)
@@ -255,6 +398,7 @@ nouveau_drm_resume(struct pci_dev *pdev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ NV_INFO(drm, "re-enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
ret = pci_enable_device(pdev);
@@ -264,17 +408,70 @@ nouveau_drm_resume(struct pci_dev *pdev)
nouveau_agp_reset(drm);
+ NV_INFO(drm, "resuming client object trees...\n");
nouveau_client_init(&drm->client.base);
+ nouveau_agp_init(drm);
list_for_each_entry(cli, &drm->clients, head) {
nouveau_client_init(&cli->base);
}
- nouveau_agp_init(drm);
+ if (drm->fence && nouveau_fence(drm)->resume)
+ nouveau_fence(drm)->resume(drm);
return nouveau_pci_resume(pdev);
}
+int
+nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct pci_dev *pdev = dev->pdev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_cli *cli;
+ int ret;
+
+ ret = nouveau_cli_create(pdev, fpriv->pid, sizeof(*cli), (void **)&cli);
+ if (ret)
+ return ret;
+
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
+ 0x1000, &cli->base.vm);
+ if (ret) {
+ nouveau_cli_destroy(cli);
+ return ret;
+ }
+ }
+
+ fpriv->driver_priv = cli;
+
+ mutex_lock(&drm->client.mutex);
+ list_add(&cli->head, &drm->clients);
+ mutex_unlock(&drm->client.mutex);
+ return 0;
+}
+
+void
+nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct nouveau_cli *cli = nouveau_cli(fpriv);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (cli->abi16)
+ nouveau_abi16_fini(cli->abi16);
+
+ mutex_lock(&drm->client.mutex);
+ list_del(&cli->head);
+ mutex_unlock(&drm->client.mutex);
+}
+
+void
+nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
+{
+ struct nouveau_cli *cli = nouveau_cli(fpriv);
+ nouveau_cli_destroy(cli);
+}
+
static struct pci_device_id
nouveau_drm_pci_table[] = {
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 87698067244b..2e3364d50ca0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -3,20 +3,50 @@
#include <core/client.h>
+#include <subdev/vm.h>
+
#include <drmP.h>
#include <drm/nouveau_drm.h>
+#include "ttm/ttm_bo_api.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include "ttm/ttm_memory.h"
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_page_alloc.h"
+
+struct nouveau_channel;
+
+#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
+
+#include "nouveau_revcompat.h"
+#include "nouveau_fence.h"
+
+struct nouveau_drm_tile {
+ struct nouveau_fence *fence;
+ bool used;
+};
+
enum nouveau_drm_handle {
NVDRM_CLIENT = 0xffffffff,
NVDRM_DEVICE = 0xdddddddd,
+ NVDRM_PUSH = 0xbbbb0000, /* |= client chid */
+ NVDRM_CHAN = 0xcccc0000, /* |= client chid */
};
struct nouveau_cli {
struct nouveau_client base;
struct list_head head;
struct mutex mutex;
+ void *abi16;
};
+static inline struct nouveau_cli *
+nouveau_cli(struct drm_file *fpriv)
+{
+ return fpriv ? fpriv->driver_priv : NULL;
+}
+
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
@@ -33,8 +63,46 @@ struct nouveau_drm {
u32 base;
u32 size;
} agp;
+
+ /* TTM interface support */
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+ struct ttm_bo_device bdev;
+ atomic_t validate_sequence;
+ int (*move)(struct nouveau_channel *,
+ struct ttm_buffer_object *,
+ struct ttm_mem_reg *, struct ttm_mem_reg *);
+ int mtrr;
+ } ttm;
+
+ /* GEM interface support */
+ struct {
+ u64 vram_available;
+ u64 gart_available;
+ } gem;
+
+ /* synchronisation */
+ void *fence;
+
+ /* context for accelerated drm-internal operations */
+ struct nouveau_channel *channel;
+ struct nouveau_gpuobj *notify;
+ struct nouveau_fbdev *fbcon;
+
+ /* nv10-nv40 tiling regions */
+ struct {
+ struct nouveau_drm_tile reg[15];
+ spinlock_t lock;
+ } tile;
};
+static inline struct nouveau_drm *
+nouveau_drm(struct drm_device *dev)
+{
+ return nouveau_newpriv(dev);
+}
+
int nouveau_drm_suspend(struct pci_dev *, pm_message_t);
int nouveau_drm_resume(struct pci_dev *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index db150d9e0cd4..2294cb8848f7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,7 +35,6 @@
#include "nouveau_fbcon.h"
#include "nouveau_fence.h"
#include "nouveau_pm.h"
-#include <engine/fifo.h>
#include "nv50_display.h"
#include "drm_pciids.h"
@@ -68,14 +67,6 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
-MODULE_PARM_DESC(noaccel, "Disable all acceleration");
-int nouveau_noaccel = -1;
-module_param_named(noaccel, nouveau_noaccel, int, 0400);
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
MODULE_PARM_DESC(force_post, "Force POST");
int nouveau_force_post = 0;
module_param_named(force_post, nouveau_force_post, int, 0400);
@@ -148,19 +139,11 @@ int
nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nouveau_fence_priv *fence = dev_priv->fence.func;
- struct nouveau_channel *chan;
struct drm_crtc *crtc;
- int ret, i, e;
NV_INFO(dev, "Disabling display...\n");
nouveau_display_fini(dev);
- NV_INFO(dev, "Disabling fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
-
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -179,74 +162,23 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
}
- NV_INFO(dev, "Evicting buffers...\n");
- ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
-
- NV_INFO(dev, "Idling channels...\n");
- for (i = 0; i < (pfifo ? pfifo->channels : 0); i++) {
- chan = dev_priv->channels.ptr[i];
-
- if (chan && chan->pushbuf_bo)
- nouveau_channel_idle(chan);
- }
-
- if (fence->suspend) {
- if (!fence->suspend(dev))
- return -ENOMEM;
- }
-
- for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
- if (!dev_priv->eng[e])
- continue;
-
- ret = dev_priv->eng[e]->fini(dev, e, true);
- if (ret) {
- NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
- goto out_abort;
- }
- }
-
return 0;
-
-out_abort:
- NV_INFO(dev, "Re-enabling acceleration..\n");
- for (e = e + 1; e < NVOBJ_ENGINE_NR; e++) {
- if (dev_priv->eng[e])
- dev_priv->eng[e]->init(dev, e);
- }
- return ret;
}
int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fence_priv *fence = dev_priv->fence.func;
- struct nouveau_engine *engine = &dev_priv->engine;
struct drm_crtc *crtc;
- int ret, i;
-
- /* Make the CRTCs accessible */
- engine->display.early_init(dev);
+ int ret;
- NV_INFO(dev, "POSTing device...\n");
ret = nouveau_run_vbios_init(dev);
if (ret)
return ret;
- NV_INFO(dev, "Reinitialising engines...\n");
- for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
- if (dev_priv->eng[i])
- dev_priv->eng[i]->init(dev, i);
- }
-
- if (fence->resume)
- fence->resume(dev);
-
nouveau_irq_postinstall(dev);
+#if 0
/* Re-write SKIPS, they'll have been lost over the suspend */
if (nouveau_vram_pushbuf) {
struct nouveau_channel *chan;
@@ -261,6 +193,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
}
}
+#endif
nouveau_pm_resume(dev);
@@ -343,6 +276,9 @@ static const struct file_operations nouveau_driver_fops = {
int nouveau_drm_load(struct drm_device *, unsigned long);
int nouveau_drm_unload(struct drm_device *);
+int nouveau_drm_open(struct drm_device *, struct drm_file *);
+void nouveau_drm_preclose(struct drm_device *dev, struct drm_file *);
+void nouveau_drm_postclose(struct drm_device *, struct drm_file *);
static struct drm_driver driver = {
.driver_features =
@@ -353,13 +289,9 @@ static struct drm_driver driver = {
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
.unload = nouveau_drm_unload,
- .open = nouveau_open,
- .preclose = nouveau_preclose,
- .postclose = nouveau_postclose,
-#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
- .debugfs_init = nouveau_debugfs_init,
- .debugfs_cleanup = nouveau_debugfs_takedown,
-#endif
+ .open = nouveau_drm_open,
+ .preclose = nouveau_drm_preclose,
+ .postclose = nouveau_drm_postclose,
.irq_preinstall = nouveau_irq_preinstall,
.irq_postinstall = nouveau_irq_postinstall,
.irq_uninstall = nouveau_irq_uninstall,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 08ce60be3f3c..f1cce652a2a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -64,24 +64,11 @@ enum blah {
NV_MEM_TYPE_GDDR5
};
-struct nouveau_fpriv {
- spinlock_t lock;
- struct list_head channels;
- struct nouveau_vm *vm;
-};
-
-static inline struct nouveau_fpriv *
-nouveau_fpriv(struct drm_file *file_priv)
-{
- return file_priv ? file_priv->driver_priv : NULL;
-}
-
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
#include <nouveau_drm.h>
#include "nouveau_reg.h"
#include <nouveau_bios.h>
-#include "nouveau_util.h"
struct nouveau_grctx;
struct nouveau_mem;
@@ -90,8 +77,7 @@ struct nouveau_mem;
#include "nouveau_compat.h"
#define nouveau_gpuobj_new(d,c,s,a,f,o) \
- _nouveau_gpuobj_new((d), (c) ? ((struct nouveau_channel *)(c))->ramin : NULL, \
- (s), (a), (f), (o))
+ _nouveau_gpuobj_new((d), NULL, (s), (a), (f), (o))
#define nouveau_vm_new(d,o,l,m,v) \
_nouveau_vm_new((d), (o), (l), (m), (v))
@@ -102,40 +88,15 @@ struct nouveau_mem;
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 4096
-#define NOUVEAU_MAX_TILE_NR 15
#include "nouveau_bo.h"
#include "nouveau_gem.h"
-/* TODO: submit equivalent to TTM generic API upstream? */
-static inline void __iomem *
-nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
-{
- bool is_iomem;
- void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
- &nvbo->kmap, &is_iomem);
- WARN_ON_ONCE(ioptr && !is_iomem);
- return ioptr;
-}
-
enum nouveau_flags {
NV_NFORCE = 0x10000000,
NV_NFORCE2 = 0x20000000
};
-#define NVOBJ_ENGINE_SW 0
-#define NVOBJ_ENGINE_GR 1
-#define NVOBJ_ENGINE_CRYPT 2
-#define NVOBJ_ENGINE_COPY0 3
-#define NVOBJ_ENGINE_COPY1 4
-#define NVOBJ_ENGINE_MPEG 5
-#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
-#define NVOBJ_ENGINE_BSP 6
-#define NVOBJ_ENGINE_VP 7
-#define NVOBJ_ENGINE_FIFO 14
-#define NVOBJ_ENGINE_NR 16
-#define NVOBJ_ENGINE_DISPLAY (NVOBJ_ENGINE_NR + 0) /*XXX*/
-
struct nouveau_page_flip_state {
struct list_head head;
struct drm_pending_vblank_event *event;
@@ -148,95 +109,6 @@ enum nouveau_channel_mutex_class {
NOUVEAU_KCHANNEL_MUTEX
};
-struct nouveau_channel {
- struct drm_device *dev;
- struct list_head list;
- int id;
-
- /* references to the channel data structure */
- struct kref ref;
- /* users of the hardware channel resources, the hardware
- * context will be kicked off when it reaches zero. */
- atomic_t users;
- struct mutex mutex;
-
- /* owner of this fifo */
- struct drm_file *file_priv;
- /* mapping of the fifo itself */
- struct drm_local_map *map;
-
- /* mapping of the regs controlling the fifo */
- void __iomem *user;
- uint32_t user_get;
- uint32_t user_get_hi;
- uint32_t user_put;
-
- /* DMA push buffer */
- struct nouveau_gpuobj *pushbuf;
- struct nouveau_bo *pushbuf_bo;
- struct nouveau_vma pushbuf_vma;
- uint64_t pushbuf_base;
-
- /* Notifier memory */
- struct nouveau_bo *notifier_bo;
- struct nouveau_vma notifier_vma;
- struct drm_mm notifier_heap;
-
- /* PFIFO context */
- struct nouveau_gpuobj *engptr;
- struct nouveau_gpuobj *ramfc;
-
- /* Execution engine contexts */
- void *engctx[NVOBJ_ENGINE_NR];
- void *fence;
-
- /* NV50 VM */
- struct nouveau_vm *vm;
- struct nouveau_gpuobj *vm_pd;
-
- /* Objects */
- struct nouveau_gpuobj *ramin; /* Private instmem */
- struct nouveau_ramht *ramht; /* Hash table */
-
- /* GPU object info for stuff used in-kernel (mm_enabled) */
- uint32_t m2mf_ntfy;
- uint32_t vram_handle;
- uint32_t gart_handle;
- bool accel_done;
-
- /* Push buffer state (only for drm's channel on !mm_enabled) */
- struct {
- int max;
- int free;
- int cur;
- int put;
- /* access via pushbuf_bo */
-
- int ib_base;
- int ib_max;
- int ib_free;
- int ib_put;
- } dma;
-
- struct {
- bool active;
- char name[32];
- struct drm_info_list info;
- } debugfs;
-};
-
-struct nouveau_exec_engine {
- void (*destroy)(struct drm_device *, int engine);
- int (*init)(struct drm_device *, int engine);
- int (*fini)(struct drm_device *, int engine, bool suspend);
- int (*context_new)(struct nouveau_channel *, int engine);
- void (*context_del)(struct nouveau_channel *, int engine);
- int (*object_new)(struct nouveau_channel *, int engine,
- u32 handle, u16 class);
- void (*set_tile_region)(struct drm_device *dev, int i);
- void (*tlb_flush)(struct drm_device *, int engine);
-};
-
struct nouveau_display_engine {
void *priv;
int (*early_init)(struct drm_device *);
@@ -434,6 +306,8 @@ enum nouveau_card_type {
NV_E0 = 0xe0,
};
+struct nouveau_channel;
+
struct drm_nouveau_private {
struct drm_device *dev;
bool noaccel;
@@ -447,92 +321,29 @@ struct drm_nouveau_private {
int flags;
u32 crystal;
- struct nouveau_exec_engine *eng[NVOBJ_ENGINE_NR];
-
- struct list_head classes;
-
struct nouveau_bo *vga_ram;
/* interrupt handling */
void (*irq_handler[32])(struct drm_device *);
bool msi_enabled;
- struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
- struct ttm_bo_device bdev;
- atomic_t validate_sequence;
- int (*move)(struct nouveau_channel *,
- struct ttm_buffer_object *,
- struct ttm_mem_reg *, struct ttm_mem_reg *);
- } ttm;
-
- struct {
- void *func;
- spinlock_t lock;
- struct drm_mm heap;
- struct nouveau_bo *bo;
- } fence;
-
- struct {
- spinlock_t lock;
- struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
- } channels;
-
struct nouveau_engine engine;
- struct nouveau_channel *channel;
/* For PFIFO and PGRAPH. */
spinlock_t context_switch_lock;
- /* VM/PRAMIN flush, legacy PRAMIN aperture */
- spinlock_t vm_lock;
-
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_ramht *ramht;
- struct {
- enum {
- NOUVEAU_GART_NONE = 0,
- NOUVEAU_GART_AGP, /* AGP */
- NOUVEAU_GART_PDMA, /* paged dma object */
- NOUVEAU_GART_HW /* on-chip gart/vm */
- } type;
- uint64_t aper_base;
- uint64_t aper_size;
- uint64_t aper_free;
-
- struct ttm_backend_func *func;
-
- struct nouveau_gpuobj *sg_ctxdma;
- } gart_info;
-
- /* nv10-nv40 tiling regions */
- struct {
- struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
- spinlock_t lock;
- } tile;
-
uint64_t fb_available_size;
uint64_t fb_mappable_pages;
- uint64_t fb_aper_free;
int fb_mtrr;
- /* G8x/G9x virtual address space */
- struct nouveau_vm *chan_vm;
-
struct nvbios vbios;
u8 *mxms;
struct list_head i2c_ports;
struct backlight_device *backlight;
-
- struct {
- struct dentry *channel_root;
- } debugfs;
-
- struct nouveau_fbdev *nfbdev;
- struct apertures_struct *apertures;
};
static inline struct drm_nouveau_private *
@@ -541,12 +352,6 @@ nouveau_private(struct drm_device *dev)
return dev->dev_private;
}
-static inline struct drm_nouveau_private *
-nouveau_bdev(struct ttm_bo_device *bd)
-{
- return container_of(bd, struct drm_nouveau_private, ttm.bdev);
-}
-
/* nouveau_drv.c */
extern int nouveau_modeset;
extern int nouveau_duallink;
@@ -560,7 +365,6 @@ extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern int nouveau_ignorelid;
-extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
extern int nouveau_force_post;
extern int nouveau_override_conntype;
@@ -574,9 +378,6 @@ extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
/* nouveau_state.c */
-extern int nouveau_open(struct drm_device *, struct drm_file *);
-extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
-extern void nouveau_postclose(struct drm_device *, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
extern int nouveau_firstopen(struct drm_device *);
extern void nouveau_lastclose(struct drm_device *);
@@ -596,76 +397,16 @@ extern int nouveau_mem_timing_calc(struct drm_device *, u32 freq,
extern void nouveau_mem_timing_read(struct drm_device *,
struct nouveau_pm_memtiming *);
extern int nouveau_mem_vbios_type(struct drm_device *);
-extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
-extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
-extern const struct ttm_mem_type_manager_func nv04_gart_manager;
-
-/* nouveau_notifier.c */
-extern int nouveau_notifier_init_channel(struct nouveau_channel *);
-extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
-extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
- int cout, uint32_t start, uint32_t end,
- uint32_t *offset);
-
-/* nouveau_channel.c */
-extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int nouveau_channel_alloc(struct drm_device *dev,
- struct nouveau_channel **chan,
- struct drm_file *file_priv,
- uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern struct nouveau_channel *
-nouveau_channel_get_unlocked(struct nouveau_channel *);
-extern struct nouveau_channel *
-nouveau_channel_get(struct drm_file *, int id);
-extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
-extern void nouveau_channel_put(struct nouveau_channel **);
-extern void nouveau_channel_ref(struct nouveau_channel *chan,
- struct nouveau_channel **pchan);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+ struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
+
extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_gpuobj.c */
-#define NVOBJ_ENGINE_ADD(d, e, p) do { \
- struct drm_nouveau_private *dev_priv = (d)->dev_private; \
- dev_priv->eng[NVOBJ_ENGINE_##e] = (p); \
-} while (0)
-
-#define NVOBJ_ENGINE_DEL(d, e) do { \
- struct drm_nouveau_private *dev_priv = (d)->dev_private; \
- dev_priv->eng[NVOBJ_ENGINE_##e] = NULL; \
-} while (0)
-
-#define NVOBJ_CLASS(d, c, e) do { \
- int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e); \
- if (ret) \
- return ret; \
-} while (0)
-
-#define NVOBJ_MTHD(d, c, m, e) do { \
- int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e)); \
- if (ret) \
- return ret; \
-} while (0)
-
-extern int nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
-extern int nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
- int (*exec)(struct nouveau_channel *,
- u32 class, u32 mthd, u32 data));
-extern int nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
-extern int nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
-extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
- uint32_t vram_h, uint32_t tt_h);
-extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
-extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
- uint64_t offset, uint64_t size, int access,
- int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
-extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
- u64 size, int target, int access, u32 type,
- u32 comp, struct nouveau_gpuobj **pobj);
-extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
- int class, u64 base, u64 size, int target,
- int access, u32 type, u32 comp);
-
int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm,
u32 flags, struct nouveau_vma *vma);
void nouveau_gpuobj_unmap(struct nouveau_vma *vma);
@@ -681,49 +422,6 @@ extern void nouveau_irq_preinstall(struct drm_device *);
extern int nouveau_irq_postinstall(struct drm_device *);
extern void nouveau_irq_uninstall(struct drm_device *);
-/* nouveau_sgdma.c */
-extern int nouveau_sgdma_init(struct drm_device *);
-extern void nouveau_sgdma_takedown(struct drm_device *);
-extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
- uint32_t offset);
-extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
- unsigned long size,
- uint32_t page_flags,
- struct page *dummy_read_page);
-
-/* nouveau_debugfs.c */
-#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
-extern int nouveau_debugfs_init(struct drm_minor *);
-extern void nouveau_debugfs_takedown(struct drm_minor *);
-extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
-extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
-#else
-static inline int
-nouveau_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-
-static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
-{
-}
-
-static inline int
-nouveau_debugfs_channel_init(struct nouveau_channel *chan)
-{
- return 0;
-}
-
-static inline void
-nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
-{
-}
-#endif
-
-/* nouveau_dma.c */
-extern void nouveau_dma_init(struct nouveau_channel *);
-extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-
/* nouveau_acpi.c */
#define ROM_BIOS_PAGE 4096
#if defined(CONFIG_ACPI)
@@ -785,72 +483,8 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
/* nouveau_hdmi.c */
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
-/* nv04_graph.c */
-extern int nv04_graph_create(struct drm_device *);
-extern int nv04_graph_object_new(struct nouveau_channel *, int, u32, u16);
-extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data);
-extern struct nouveau_bitfield nv04_graph_nsource[];
-
-/* nv10_graph.c */
-extern int nv10_graph_create(struct drm_device *);
-extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
-extern struct nouveau_bitfield nv10_graph_intr[];
-extern struct nouveau_bitfield nv10_graph_nstatus[];
-
-/* nv20_graph.c */
-extern int nv20_graph_create(struct drm_device *);
-
-/* nv40_graph.c */
-extern int nv40_graph_create(struct drm_device *);
-extern void nv40_grctx_init(struct drm_device *, u32 *size);
-extern void nv40_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
-
-/* nv50_graph.c */
-extern int nv50_graph_create(struct drm_device *);
-extern struct nouveau_enum nv50_data_error_names[];
-extern int nv50_graph_isr_chid(struct drm_device *dev, u64 inst);
-extern int nv50_grctx_init(struct drm_device *, u32 *, u32, u32 *, u32 *);
-extern void nv50_grctx_fill(struct drm_device *, struct nouveau_gpuobj *);
-
-/* nvc0_graph.c */
-extern int nvc0_graph_create(struct drm_device *);
-extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
-
-/* nve0_graph.c */
-extern int nve0_graph_create(struct drm_device *);
-
-/* nv84_crypt.c */
-extern int nv84_crypt_create(struct drm_device *);
-
-/* nv98_crypt.c */
-extern int nv98_crypt_create(struct drm_device *dev);
-
-/* nva3_copy.c */
-extern int nva3_copy_create(struct drm_device *dev);
-
-/* nvc0_copy.c */
-extern int nvc0_copy_create(struct drm_device *dev, int engine);
-
-/* nv31_mpeg.c */
-extern int nv31_mpeg_create(struct drm_device *dev);
-
-/* nv50_mpeg.c */
-extern int nv50_mpeg_create(struct drm_device *dev);
-
-/* nv84_bsp.c */
-/* nv98_bsp.c */
-extern int nv84_bsp_create(struct drm_device *dev);
-
-/* nv84_vp.c */
-/* nv98_vp.c */
-extern int nv84_vp_create(struct drm_device *dev);
-
-/* nv98_ppp.c */
-extern int nv98_ppp_create(struct drm_device *dev);
-
extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
/* nvd0_display.c */
extern int nvd0_display_create(struct drm_device *);
@@ -895,18 +529,6 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
#endif /* def __BIG_ENDIAN else */
#endif /* !ioread32_native */
-/* channel control reg access */
-static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
-{
- return ioread32_native(chan->user + reg);
-}
-
-static inline void nvchan_wr32(struct nouveau_channel *chan,
- unsigned reg, u32 val)
-{
- iowrite32_native(val, chan->user + reg);
-}
-
/* register access */
#define nv_rd08 _nv_rd08
#define nv_wr08 _nv_wr08
@@ -1023,13 +645,6 @@ nv_match_device(struct drm_device *dev, unsigned device,
dev->pdev->subsystem_device == sub_device;
}
-static inline void *
-nv_engine(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- return (void *)dev_priv->eng[engine];
-}
-
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f3f0b4c362cb..8b8bc8314d92 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -43,19 +43,31 @@
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
-#include "nouveau_drv.h"
-#include <nouveau_drm.h>
-#include "nouveau_crtc.h"
+
+#include "nouveau_drm.h"
+#include "nouveau_gem.h"
+#include "nouveau_bo.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
-#include "nouveau_dma.h"
+#include "nouveau_chan.h"
+
+#include "nouveau_crtc.h"
+
+#include <core/client.h>
+#include <core/device.h>
+
+#include <subdev/fb.h>
+
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
+static int nouveau_nofbaccel = 0;
+module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
static void
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
- struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
+ struct nouveau_device *device = nv_device(drm->device);
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -63,15 +75,15 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&dev_priv->channel->mutex)) {
- if (dev_priv->card_type < NV_50)
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->card_type < NV_50)
ret = nv04_fbcon_fillrect(info, rect);
else
- if (dev_priv->card_type < NV_C0)
+ if (device->card_type < NV_C0)
ret = nv50_fbcon_fillrect(info, rect);
else
ret = nvc0_fbcon_fillrect(info, rect);
- mutex_unlock(&dev_priv->channel->mutex);
+ mutex_unlock(&drm->client.mutex);
}
if (ret == 0)
@@ -85,9 +97,9 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
static void
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
{
- struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
+ struct nouveau_device *device = nv_device(drm->device);
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -95,15 +107,15 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&dev_priv->channel->mutex)) {
- if (dev_priv->card_type < NV_50)
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->card_type < NV_50)
ret = nv04_fbcon_copyarea(info, image);
else
- if (dev_priv->card_type < NV_C0)
+ if (device->card_type < NV_C0)
ret = nv50_fbcon_copyarea(info, image);
else
ret = nvc0_fbcon_copyarea(info, image);
- mutex_unlock(&dev_priv->channel->mutex);
+ mutex_unlock(&drm->client.mutex);
}
if (ret == 0)
@@ -117,9 +129,9 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
static void
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
+ struct nouveau_device *device = nv_device(drm->device);
int ret;
if (info->state != FBINFO_STATE_RUNNING)
@@ -127,15 +139,15 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
ret = -ENODEV;
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
- mutex_trylock(&dev_priv->channel->mutex)) {
- if (dev_priv->card_type < NV_50)
+ mutex_trylock(&drm->client.mutex)) {
+ if (device->card_type < NV_50)
ret = nv04_fbcon_imageblit(info, image);
else
- if (dev_priv->card_type < NV_C0)
+ if (device->card_type < NV_C0)
ret = nv50_fbcon_imageblit(info, image);
else
ret = nvc0_fbcon_imageblit(info, image);
- mutex_unlock(&dev_priv->channel->mutex);
+ mutex_unlock(&drm->client.mutex);
}
if (ret == 0)
@@ -149,10 +161,9 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
static int
nouveau_fbcon_sync(struct fb_info *info)
{
- struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
if (!chan || !chan->accel_done || in_interrupt() ||
@@ -160,11 +171,11 @@ nouveau_fbcon_sync(struct fb_info *info)
info->flags & FBINFO_HWACCEL_DISABLED)
return 0;
- if (!mutex_trylock(&chan->mutex))
+ if (!mutex_trylock(&drm->client.mutex))
return 0;
ret = nouveau_channel_idle(chan);
- mutex_unlock(&chan->mutex);
+ mutex_unlock(&drm->client.mutex);
if (ret) {
nouveau_fbcon_gpu_lockup(info);
return 0;
@@ -224,9 +235,9 @@ static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
}
static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
+nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct fb_info *info = nfbdev->helper.fbdev;
+ struct fb_info *info = fbcon->helper.fbdev;
struct fb_fillrect rect;
/* Clear the entire fbcon. The drm will program every connector
@@ -242,11 +253,12 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
}
static int
-nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
+nouveau_fbcon_create(struct nouveau_fbdev *fbcon,
struct drm_fb_helper_surface_size *sizes)
{
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = fbcon->dev;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_device *device = nv_device(drm->device);
struct fb_info *info;
struct drm_framebuffer *fb;
struct nouveau_framebuffer *nouveau_fb;
@@ -254,7 +266,6 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct pci_dev *pdev = dev->pdev;
- struct device *device = &pdev->dev;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -272,37 +283,38 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, &nvbo);
if (ret) {
- NV_ERROR(dev, "failed to allocate framebuffer\n");
+ NV_ERROR(drm, "failed to allocate framebuffer\n");
goto out;
}
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
if (ret) {
- NV_ERROR(dev, "failed to pin fb: %d\n", ret);
+ NV_ERROR(drm, "failed to pin fb: %d\n", ret);
nouveau_bo_ref(NULL, &nvbo);
goto out;
}
ret = nouveau_bo_map(nvbo);
if (ret) {
- NV_ERROR(dev, "failed to map fb: %d\n", ret);
+ NV_ERROR(drm, "failed to map fb: %d\n", ret);
nouveau_bo_unpin(nvbo);
nouveau_bo_ref(NULL, &nvbo);
goto out;
}
- chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
- if (chan && dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
+ chan = nouveau_nofbaccel ? NULL : drm->channel;
+ if (chan && device->card_type >= NV_50) {
+ ret = nouveau_bo_vma_add(nvbo, nv_client(chan->cli)->vm,
+ &fbcon->nouveau_fb.vma);
if (ret) {
- NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
+ NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
}
}
mutex_lock(&dev->struct_mutex);
- info = framebuffer_alloc(0, device);
+ info = framebuffer_alloc(0, &pdev->dev);
if (!info) {
ret = -ENOMEM;
goto out_unref;
@@ -314,16 +326,16 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
goto out_unref;
}
- info->par = nfbdev;
+ info->par = fbcon;
- nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
+ nouveau_framebuffer_init(dev, &fbcon->nouveau_fb, &mode_cmd, nvbo);
- nouveau_fb = &nfbdev->nouveau_fb;
+ nouveau_fb = &fbcon->nouveau_fb;
fb = &nouveau_fb->base;
/* setup helper */
- nfbdev->helper.fb = fb;
- nfbdev->helper.fbdev = info;
+ fbcon->helper.fb = fb;
+ fbcon->helper.fbdev = info;
strcpy(info->fix.id, "nouveaufb");
if (nouveau_nofbaccel)
@@ -342,25 +354,18 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->screen_size = size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
- drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
-
- /* Set aperture base/size for vesafb takeover */
- info->apertures = dev_priv->apertures;
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_unref;
- }
+ drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
mutex_unlock(&dev->struct_mutex);
- if (dev_priv->channel && !nouveau_nofbaccel) {
+ if (chan) {
ret = -ENODEV;
- if (dev_priv->card_type < NV_50)
+ if (device->card_type < NV_50)
ret = nv04_fbcon_accel_init(info);
else
- if (dev_priv->card_type < NV_C0)
+ if (device->card_type < NV_C0)
ret = nv50_fbcon_accel_init(info);
else
ret = nvc0_fbcon_accel_init(info);
@@ -369,13 +374,12 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->fbops = &nouveau_fbcon_ops;
}
- nouveau_fbcon_zfill(dev, nfbdev);
+ nouveau_fbcon_zfill(dev, fbcon);
/* To allow resizeing without swapping buffers */
- NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
- nouveau_fb->base.width,
- nouveau_fb->base.height,
- nvbo->bo.offset, nvbo);
+ NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n",
+ nouveau_fb->base.width, nouveau_fb->base.height,
+ nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
@@ -390,12 +394,12 @@ static int
nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
- struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
+ struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
int new_fb = 0;
int ret;
if (!helper->fb) {
- ret = nouveau_fbcon_create(nfbdev, sizes);
+ ret = nouveau_fbcon_create(fbcon, sizes);
if (ret)
return ret;
new_fb = 1;
@@ -406,18 +410,18 @@ nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ drm_fb_helper_hotplug_event(&drm->fbcon->helper);
}
static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
+nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
+ struct nouveau_framebuffer *nouveau_fb = &fbcon->nouveau_fb;
struct fb_info *info;
- if (nfbdev->helper.fbdev) {
- info = nfbdev->helper.fbdev;
+ if (fbcon->helper.fbdev) {
+ info = fbcon->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
@@ -430,17 +434,17 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
}
- drm_fb_helper_fini(&nfbdev->helper);
+ drm_fb_helper_fini(&fbcon->helper);
drm_framebuffer_cleanup(&nouveau_fb->base);
return 0;
}
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
{
- struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
+ struct nouveau_fbdev *fbcon = info->par;
+ struct nouveau_drm *drm = nouveau_newpriv(fbcon->dev);
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
info->flags |= FBINFO_HWACCEL_DISABLED;
}
@@ -451,74 +455,81 @@ static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
};
-int nouveau_fbcon_init(struct drm_device *dev)
+int
+nouveau_fbcon_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fbdev *nfbdev;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fbdev *fbcon;
int preferred_bpp;
int ret;
- nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
- if (!nfbdev)
+ if (!dev->mode_config.num_crtc)
+ return 0;
+
+ fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
+ if (!fbcon)
return -ENOMEM;
- nfbdev->dev = dev;
- dev_priv->nfbdev = nfbdev;
- nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
+ fbcon->dev = dev;
+ drm->fbcon = fbcon;
+ fbcon->helper.funcs = &nouveau_fbcon_helper_funcs;
- ret = drm_fb_helper_init(dev, &nfbdev->helper,
+ ret = drm_fb_helper_init(dev, &fbcon->helper,
dev->mode_config.num_crtc, 4);
if (ret) {
- kfree(nfbdev);
+ kfree(fbcon);
return ret;
}
- drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
+ drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (nvfb_vram_size(dev) <= 32 * 1024 * 1024)
+ if (pfb->ram.size <= 32 * 1024 * 1024)
preferred_bpp = 8;
- else if (nvfb_vram_size(dev) <= 64 * 1024 * 1024)
+ else
+ if (pfb->ram.size <= 64 * 1024 * 1024)
preferred_bpp = 16;
else
preferred_bpp = 32;
- drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
+ drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
return 0;
}
-void nouveau_fbcon_fini(struct drm_device *dev)
+void
+nouveau_fbcon_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
- if (!dev_priv->nfbdev)
+ if (!drm->fbcon)
return;
- nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
- kfree(dev_priv->nfbdev);
- dev_priv->nfbdev = NULL;
+ nouveau_fbcon_destroy(dev, drm->fbcon);
+ kfree(drm->fbcon);
+ drm->fbcon = NULL;
}
void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
- dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
- dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
+ drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
void nouveau_fbcon_restore_accel(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
}
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
console_lock();
if (state == 0)
nouveau_fbcon_save_disable_accel(dev);
- fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+ fb_set_suspend(drm->fbcon->helper.fbdev, state);
if (state == 1)
nouveau_fbcon_restore_accel(dev);
console_unlock();
@@ -526,6 +537,6 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
void nouveau_fbcon_zfill_all(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ nouveau_fbcon_zfill(dev, drm->fbcon);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index b73c29f87fc3..e6404e39eaf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -30,6 +30,7 @@
#include "drm_fb_helper.h"
#include "nouveau_fb.h"
+
struct nouveau_fbdev {
struct drm_fb_helper helper;
struct nouveau_framebuffer nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index a1835d710f73..5b5471ba6eda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -30,11 +30,9 @@
#include <linux/ktime.h>
#include <linux/hrtimer.h>
-#include "nouveau_drv.h"
-#include <core/ramht.h>
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
+#include "nouveau_fence.h"
void
nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
@@ -59,12 +57,10 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
spin_lock_init(&fctx->lock);
}
-void
+static void
nouveau_fence_update(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence *fence, *fnext;
@@ -85,9 +81,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
int
nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_fence_chan *fctx = chan->fence;
int ret;
@@ -150,20 +144,17 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
int
nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fence_priv *priv = chan->drm->fence;
struct nouveau_channel *prev;
int ret = 0;
- prev = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
+ prev = fence ? fence->channel : NULL;
if (prev) {
if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
ret = priv->sync(fence, prev, chan);
if (unlikely(ret))
ret = nouveau_fence_wait(fence, true, false);
}
- nouveau_channel_put_unlocked(&prev);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 8ee65758f24f..bedafd1c9539 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -1,6 +1,8 @@
#ifndef __NOUVEAU_FENCE_H__
#define __NOUVEAU_FENCE_H__
+struct nouveau_drm;
+
struct nouveau_fence {
struct list_head head;
struct kref kref;
@@ -22,8 +24,6 @@ int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-void nouveau_fence_idle(struct nouveau_channel *);
-void nouveau_fence_update(struct nouveau_channel *);
struct nouveau_fence_chan {
struct list_head pending;
@@ -34,9 +34,9 @@ struct nouveau_fence_chan {
};
struct nouveau_fence_priv {
- void (*dtor)(struct drm_device *);
- bool (*suspend)(struct drm_device *);
- void (*resume)(struct drm_device *);
+ void (*dtor)(struct nouveau_drm *);
+ bool (*suspend)(struct nouveau_drm *);
+ void (*resume)(struct nouveau_drm *);
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
int (*emit)(struct nouveau_fence *);
@@ -45,10 +45,12 @@ struct nouveau_fence_priv {
u32 (*read)(struct nouveau_channel *);
};
+#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
+
void nouveau_fence_context_new(struct nouveau_fence_chan *);
void nouveau_fence_context_del(struct nouveau_fence_chan *);
-int nv04_fence_create(struct drm_device *dev);
+int nv04_fence_create(struct nouveau_drm *);
int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
int nv10_fence_emit(struct nouveau_fence *);
@@ -56,12 +58,12 @@ int nv17_fence_sync(struct nouveau_fence *, struct nouveau_channel *,
struct nouveau_channel *);
u32 nv10_fence_read(struct nouveau_channel *);
void nv10_fence_context_del(struct nouveau_channel *);
-void nv10_fence_destroy(struct drm_device *);
-int nv10_fence_create(struct drm_device *dev);
+void nv10_fence_destroy(struct nouveau_drm *);
+int nv10_fence_create(struct nouveau_drm *);
-int nv50_fence_create(struct drm_device *dev);
-int nv84_fence_create(struct drm_device *dev);
-int nvc0_fence_create(struct drm_device *dev);
+int nv50_fence_create(struct nouveau_drm *);
+int nv84_fence_create(struct nouveau_drm *);
+int nvc0_fence_create(struct nouveau_drm *);
u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
int nouveau_flip_complete(void *chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 96a34bce54ce..ba744daeb50e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -23,16 +23,19 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <linux/dma-buf.h>
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
+#include <linux/dma-buf.h>
#include <nouveau_drm.h>
+
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nouveau_fence.h"
+#include "nouveau_abi16.h"
-#define nouveau_gem_pushbuf_sync(chan) 0
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
int
nouveau_gem_object_new(struct drm_gem_object *gem)
@@ -67,19 +70,19 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
int
nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
- if (!fpriv->vm)
+ if (!cli->base.vm)
return 0;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return ret;
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
if (!vma) {
vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (!vma) {
@@ -87,7 +90,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
- ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma);
+ ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
if (ret) {
kfree(vma);
goto out;
@@ -104,19 +107,19 @@ out:
void
nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
int ret;
- if (!fpriv->vm)
+ if (!cli->base.vm)
return;
ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
if (ret)
return;
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
if (vma) {
if (--vma->refcount == 0) {
nouveau_bo_vma_del(nvbo, vma);
@@ -131,7 +134,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
struct nouveau_bo **pnvbo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo;
u32 flags = 0;
int ret;
@@ -155,7 +158,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
*/
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
- if (dev_priv->card_type >= NV_50)
+ if (nv_device(drm->device)->card_type >= NV_50)
nvbo->valid_domains &= domain;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
@@ -173,7 +176,7 @@ static int
nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
struct drm_nouveau_gem_info *rep)
{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
+ struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
struct nouveau_vma *vma;
@@ -183,8 +186,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->offset = nvbo->bo.offset;
- if (fpriv->vm) {
- vma = nouveau_bo_vma_find(nvbo, fpriv->vm);
+ if (cli->base.vm) {
+ vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
if (!vma)
return -EINVAL;
@@ -202,15 +205,16 @@ int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
int ret = 0;
- dev_priv->ttm.bdev.dev_mapping = dev->dev_mapping;
+ drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
- if (!nvfb_flags_valid(dev, req->info.tile_flags)) {
- NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
+ if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
+ NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
return -EINVAL;
}
@@ -312,16 +316,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_device *dev = chan->drm->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
uint32_t sequence;
int trycnt = 0;
int ret, i;
- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
+ sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
- NV_ERROR(dev, "%s failed and gave up.\n", __func__);
+ NV_ERROR(drm, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
@@ -332,14 +336,14 @@ retry:
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
- NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
+ NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -ENOENT;
}
nvbo = gem->driver_private;
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
- NV_ERROR(dev, "multiple instances of buffer %d on "
+ NV_ERROR(drm, "multiple instances of buffer %d on "
"validation list\n", b->handle);
drm_gem_object_unreference_unlocked(gem);
validate_fini(op, NULL);
@@ -354,7 +358,7 @@ retry:
drm_gem_object_unreference_unlocked(gem);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "fail reserve\n");
+ NV_ERROR(drm, "fail reserve\n");
return ret;
}
goto retry;
@@ -373,7 +377,7 @@ retry:
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
- NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
+ NV_ERROR(drm, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
@@ -407,10 +411,9 @@ static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_drm *drm = chan->drm;
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
- struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -419,7 +422,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(dev, "fail pre-validate sync\n");
+ NV_ERROR(drm, "fail pre-validate sync\n");
return ret;
}
@@ -427,24 +430,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
- NV_ERROR(dev, "fail set_domain\n");
+ NV_ERROR(drm, "fail set_domain\n");
return ret;
}
ret = nouveau_bo_validate(nvbo, true, false, false);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "fail ttm_validate\n");
+ NV_ERROR(drm, "fail ttm_validate\n");
return ret;
}
ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
- NV_ERROR(dev, "fail post-validate sync\n");
+ NV_ERROR(drm, "fail post-validate sync\n");
return ret;
}
- if (dev_priv->card_type < NV_50) {
+ if (nv_device(drm->device)->card_type < NV_50) {
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -476,7 +479,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
- struct drm_device *dev = chan->dev;
+ struct nouveau_drm *drm = chan->drm;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -489,14 +492,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "validate_init\n");
+ NV_ERROR(drm, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "validate vram_list\n");
+ NV_ERROR(drm, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -505,7 +508,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "validate gart_list\n");
+ NV_ERROR(drm, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -514,7 +517,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "validate both_list\n");
+ NV_ERROR(drm, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -547,6 +550,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
@@ -562,7 +566,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
- NV_ERROR(dev, "reloc bo index invalid\n");
+ NV_ERROR(drm, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -572,7 +576,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
- NV_ERROR(dev, "reloc container bo index invalid\n");
+ NV_ERROR(drm, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
@@ -580,7 +584,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
- NV_ERROR(dev, "reloc outside of bo\n");
+ NV_ERROR(drm, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
@@ -589,7 +593,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
- NV_ERROR(dev, "failed kmap for reloc\n");
+ NV_ERROR(drm, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
@@ -614,7 +618,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.bdev->fence_lock);
if (ret) {
- NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
+ NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret);
break;
}
@@ -629,62 +633,67 @@ int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
+ struct nouveau_abi16_chan *temp;
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
struct drm_nouveau_gem_pushbuf_bo *bo;
- struct nouveau_channel *chan;
+ struct nouveau_channel *chan = NULL;
struct validate_op op;
struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- chan = nouveau_channel_get(file_priv, req->channel);
- if (IS_ERR(chan))
- return PTR_ERR(chan);
+ if (unlikely(!abi16))
+ return -ENOMEM;
- req->vram_available = dev_priv->fb_aper_free;
- req->gart_available = dev_priv->gart_info.aper_free;
+ list_for_each_entry(temp, &abi16->channels, head) {
+ if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
+ chan = temp->chan;
+ break;
+ }
+ }
+
+ if (!chan)
+ return nouveau_abi16_put(abi16, -ENOENT);
+
+ req->vram_available = drm->gem.vram_available;
+ req->gart_available = drm->gem.gart_available;
if (unlikely(req->nr_push == 0))
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
- NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
+ NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
- nouveau_channel_put(&chan);
- return -EINVAL;
+ return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
- NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
+ NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
- nouveau_channel_put(&chan);
- return -EINVAL;
+ return nouveau_abi16_put(abi16, -EINVAL);
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
- NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
+ NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
- nouveau_channel_put(&chan);
- return -EINVAL;
+ return nouveau_abi16_put(abi16, -EINVAL);
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
- if (IS_ERR(push)) {
- nouveau_channel_put(&chan);
- return PTR_ERR(push);
- }
+ if (IS_ERR(push))
+ return nouveau_abi16_put(abi16, PTR_ERR(push));
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
- nouveau_channel_put(&chan);
- return PTR_ERR(bo);
+ return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
/* Ensure all push buffers are on validate list */
for (i = 0; i < req->nr_push; i++) {
if (push[i].bo_index >= req->nr_buffers) {
- NV_ERROR(dev, "push %d buffer not in list\n", i);
+ NV_ERROR(drm, "push %d buffer not in list\n", i);
ret = -EINVAL;
goto out_prevalid;
}
@@ -695,7 +704,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
req->nr_buffers, &op, &do_reloc);
if (ret) {
if (ret != -ERESTARTSYS)
- NV_ERROR(dev, "validate: %d\n", ret);
+ NV_ERROR(drm, "validate: %d\n", ret);
goto out_prevalid;
}
@@ -703,7 +712,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
- NV_ERROR(dev, "reloc apply: %d\n", ret);
+ NV_ERROR(drm, "reloc apply: %d\n", ret);
goto out;
}
}
@@ -711,7 +720,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
if (ret) {
- NV_INFO(dev, "nv50cal_space: %d\n", ret);
+ NV_ERROR(drm, "nv50cal_space: %d\n", ret);
goto out;
}
@@ -723,10 +732,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
push[i].length);
}
} else
- if (dev_priv->chipset >= 0x25) {
+ if (nv_device(drm->device)->chipset >= 0x25) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
- NV_ERROR(dev, "cal_space: %d\n", ret);
+ NV_ERROR(drm, "cal_space: %d\n", ret);
goto out;
}
@@ -740,7 +749,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
- NV_ERROR(dev, "jmp_space: %d\n", ret);
+ NV_ERROR(drm, "jmp_space: %d\n", ret);
goto out;
}
@@ -749,7 +758,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo[push[i].bo_index].user_priv;
uint32_t cmd;
- cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
@@ -778,7 +787,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
ret = nouveau_fence_new(chan, &fence);
if (ret) {
- NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
+ NV_ERROR(drm, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
@@ -796,17 +805,16 @@ out_next:
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
- if (dev_priv->chipset >= 0x25) {
+ if (nv_device(drm->device)->chipset >= 0x25) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
- (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
+ (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
- nouveau_channel_put(&chan);
- return ret;
+ return nouveau_abi16_put(abi16, ret);
}
static inline uint32_t
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c b/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
deleted file mode 100644
index a774b7ad0f21..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_gpuobj.c
+++ /dev/null
@@ -1,518 +0,0 @@
-/*
- * Copyright (C) 2006 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-/*
- * Authors:
- * Ben Skeggs <darktama@iinet.net.au>
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <nouveau_drm.h>
-#include <engine/fifo.h>
-#include <core/ramht.h>
-#include "nouveau_software.h"
-
-struct nouveau_gpuobj_method {
- struct list_head head;
- u32 mthd;
- int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
-};
-
-struct nouveau_gpuobj_class {
- struct list_head head;
- struct list_head methods;
- u32 id;
- u32 engine;
-};
-
-int
-nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj_class *oc;
-
- oc = kzalloc(sizeof(*oc), GFP_KERNEL);
- if (!oc)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&oc->methods);
- oc->id = class;
- oc->engine = engine;
- list_add(&oc->head, &dev_priv->classes);
- return 0;
-}
-
-int
-nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
- int (*exec)(struct nouveau_channel *, u32, u32, u32))
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj_method *om;
- struct nouveau_gpuobj_class *oc;
-
- list_for_each_entry(oc, &dev_priv->classes, head) {
- if (oc->id == class)
- goto found;
- }
-
- return -EINVAL;
-
-found:
- om = kzalloc(sizeof(*om), GFP_KERNEL);
- if (!om)
- return -ENOMEM;
-
- om->mthd = mthd;
- om->exec = exec;
- list_add(&om->head, &oc->methods);
- return 0;
-}
-
-int
-nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj_method *om;
- struct nouveau_gpuobj_class *oc;
-
- list_for_each_entry(oc, &dev_priv->classes, head) {
- if (oc->id != class)
- continue;
-
- list_for_each_entry(om, &oc->methods, head) {
- if (om->mthd == mthd)
- return om->exec(chan, class, mthd, data);
- }
- }
-
- return -ENOENT;
-}
-
-int
-nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nouveau_channel *chan = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (chid >= 0 && chid < pfifo->channels)
- chan = dev_priv->channels.ptr[chid];
- if (chan)
- ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return ret;
-}
-
-void
-nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
- u64 base, u64 size, int target, int access,
- u32 type, u32 comp)
-{
- struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
- u32 flags0;
-
- flags0 = (comp << 29) | (type << 22) | class;
- flags0 |= 0x00100000;
-
- switch (access) {
- case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
- case NV_MEM_ACCESS_RW:
- case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
- default:
- break;
- }
-
- switch (target) {
- case NV_MEM_TARGET_VRAM:
- flags0 |= 0x00010000;
- break;
- case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
- break;
- case NV_MEM_TARGET_GART:
- base += dev_priv->gart_info.aper_base;
- default:
- flags0 &= ~0x00100000;
- break;
- }
-
- /* convert to base + limit */
- size = (base + size) - 1;
-
- nv_wo32(obj, offset + 0x00, flags0);
- nv_wo32(obj, offset + 0x04, lower_32_bits(size));
- nv_wo32(obj, offset + 0x08, lower_32_bits(base));
- nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
- upper_32_bits(base));
- nv_wo32(obj, offset + 0x10, 0x00000000);
- nv_wo32(obj, offset + 0x14, 0x00000000);
-
- nvimem_flush(obj->dev);
-}
-
-int
-nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
- int target, int access, u32 type, u32 comp,
- struct nouveau_gpuobj **pobj)
-{
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
- if (ret)
- return ret;
-
- nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
- access, type, comp);
- return 0;
-}
-
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
- u64 size, int access, int target,
- struct nouveau_gpuobj **pobj)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj;
- u32 flags0, flags2;
- int ret;
-
- if (dev_priv->card_type >= NV_50) {
- u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
- u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
-
- return nv50_gpuobj_dma_new(chan, class, base, size,
- target, access, type, comp, pobj);
- }
-
- if (target == NV_MEM_TARGET_GART) {
- struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
-
- if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
- if (base == 0) {
- nouveau_gpuobj_ref(gart, pobj);
- return 0;
- }
-
- base = nouveau_sgdma_get_physical(dev, base);
- target = NV_MEM_TARGET_PCI;
- } else {
- base += dev_priv->gart_info.aper_base;
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
- target = NV_MEM_TARGET_PCI_NOSNOOP;
- else
- target = NV_MEM_TARGET_PCI;
- }
- }
-
- flags0 = class;
- flags0 |= 0x00003000; /* PT present, PT linear */
- flags2 = 0;
-
- switch (target) {
- case NV_MEM_TARGET_PCI:
- flags0 |= 0x00020000;
- break;
- case NV_MEM_TARGET_PCI_NOSNOOP:
- flags0 |= 0x00030000;
- break;
- default:
- break;
- }
-
- switch (access) {
- case NV_MEM_ACCESS_RO:
- flags0 |= 0x00004000;
- break;
- case NV_MEM_ACCESS_WO:
- flags0 |= 0x00008000;
- default:
- flags2 |= 0x00000002;
- break;
- }
-
- flags0 |= (base & 0x00000fff) << 20;
- flags2 |= (base & 0xfffff000);
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
-
- nv_wo32(obj, 0x00, flags0);
- nv_wo32(obj, 0x04, size - 1);
- nv_wo32(obj, 0x08, flags2);
- nv_wo32(obj, 0x0c, flags2);
-
- obj->engine = NVOBJ_ENGINE_SW;
- obj->class = class;
- *pobj = obj;
- return 0;
-}
-
-int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj_class *oc;
- int ret;
-
- NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
-
- list_for_each_entry(oc, &dev_priv->classes, head) {
- struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
-
- if (oc->id != class)
- continue;
-
- if (!chan->engctx[oc->engine]) {
- ret = eng->context_new(chan, oc->engine);
- if (ret)
- return ret;
- }
-
- return eng->object_new(chan, oc->engine, handle, class);
- }
-
- return -EINVAL;
-}
-
-static int
-nv04_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv50_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, 0, &chan->engptr);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nv84_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
-{
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &chan->vm_pd);
- if (ret)
- return ret;
-
- nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
-
- nv_wo32(chan->ramin, 0x0200, lower_32_bits(chan->vm_pd->addr));
- nv_wo32(chan->ramin, 0x0204, upper_32_bits(chan->vm_pd->addr));
- nv_wo32(chan->ramin, 0x0208, 0xffffffff);
- nv_wo32(chan->ramin, 0x020c, 0x000000ff);
-
- return 0;
-}
-
-int
-nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
- uint32_t vram_h, uint32_t tt_h)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
- struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
- struct nouveau_gpuobj *vram = NULL, *tt = NULL;
- int ret;
-
- NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
- if (dev_priv->card_type >= NV_C0)
- return nvc0_gpuobj_channel_init(chan, vm);
-
- /* Allocate a chunk of memory for per-channel object storage */
- if (dev_priv->chipset >= 0x84)
- ret = nv84_gpuobj_channel_init_pramin(chan);
- else
- if (dev_priv->chipset == 0x50)
- ret = nv50_gpuobj_channel_init_pramin(chan);
- else
- ret = nv04_gpuobj_channel_init_pramin(chan);
- if (ret) {
- NV_ERROR(dev, "init pramin\n");
- return ret;
- }
-
- /* NV50 VM
- * - Allocate per-channel page-directory
- * - Link with shared channel VM
- */
- if (vm)
- nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
-
- /* RAMHT */
- if (dev_priv->card_type < NV_50) {
- nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
- } else {
- struct nouveau_gpuobj *ramht = NULL;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &ramht);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret)
- return ret;
- }
-
- /* VRAM ctxdma */
- if (dev_priv->card_type >= NV_50) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, (1ULL << 40), NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VM, &vram);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
- return ret;
- }
- } else {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->fb_available_size,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &vram);
- if (ret) {
- NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
- return ret;
- }
- }
-
- ret = nouveau_ramht_insert(chan, vram_h, vram);
- nouveau_gpuobj_ref(NULL, &vram);
- if (ret) {
- NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
- return ret;
- }
-
- /* TT memory ctxdma */
- if (dev_priv->card_type >= NV_50) {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, (1ULL << 40), NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VM, &tt);
- } else {
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->gart_info.aper_size,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_GART, &tt);
- }
-
- if (ret) {
- NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_ramht_insert(chan, tt_h, tt);
- nouveau_gpuobj_ref(NULL, &tt);
- if (ret) {
- NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void
-nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
-{
- NV_DEBUG(chan->dev, "ch%d\n", chan->id);
-
- nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
- nouveau_gpuobj_ref(NULL, &chan->engptr);
-
- nouveau_gpuobj_ref(NULL, &chan->ramin);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1a75a96cef26..25e2e63cc53a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,7 +36,6 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
#include <core/ramht.h>
-#include "nouveau_util.h"
void
nouveau_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 73176bcd1b64..9c35d14fe9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -37,146 +37,6 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include <core/mm.h>
-#include <engine/fifo.h>
-#include "nouveau_fence.h"
-
-/*
- * Cleanup everything
- */
-void
-nouveau_mem_vram_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- ttm_bo_device_release(&dev_priv->ttm.bdev);
-
- nouveau_ttm_global_release(dev_priv);
-
- if (dev_priv->fb_mtrr >= 0) {
- drm_mtrr_del(dev_priv->fb_mtrr,
- pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = -1;
- }
-}
-
-void
-nouveau_mem_gart_fini(struct drm_device *dev)
-{
- nouveau_sgdma_takedown(dev);
-}
-
-int
-nouveau_mem_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- int ret, dma_bits;
-
- dma_bits = 32;
- if (dev_priv->card_type >= NV_50) {
- if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
- dma_bits = 40;
- } else
- if (0 && pci_is_pcie(dev->pdev) &&
- dev_priv->chipset > 0x40 &&
- dev_priv->chipset != 0x45) {
- if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
- dma_bits = 39;
- }
-
- ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret)
- return ret;
- ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret) {
- /* Reset to default value. */
- pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
- }
-
-
- ret = nouveau_ttm_global_init(dev_priv);
- if (ret)
- return ret;
-
- ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
- dev_priv->ttm.bo_global_ref.ref.object,
- &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
- dma_bits <= 32 ? true : false);
- if (ret) {
- NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
- return ret;
- }
-
- dev_priv->fb_available_size = nvfb_vram_size(dev);
- dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
- dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
- dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
- dev_priv->fb_available_size -= nvimem_reserved(dev);
- dev_priv->fb_aper_free = dev_priv->fb_available_size;
-
- /* mappable vram */
- ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
- dev_priv->fb_available_size >> PAGE_SHIFT);
- if (ret) {
- NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
- return ret;
- }
-
- if (dev_priv->card_type < NV_50) {
- ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
- 0, 0, NULL, &dev_priv->vga_ram);
- if (ret == 0)
- ret = nouveau_bo_pin(dev_priv->vga_ram,
- TTM_PL_FLAG_VRAM);
-
- if (ret) {
- NV_WARN(dev, "failed to reserve VGA memory\n");
- nouveau_bo_ref(NULL, &dev_priv->vga_ram);
- }
- }
-
- dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1),
- DRM_MTRR_WC);
- return 0;
-}
-
-int
-nouveau_mem_gart_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- int ret;
-
- if (!nvdrm_gart_init(dev, &dev_priv->gart_info.aper_base,
- &dev_priv->gart_info.aper_size))
- dev_priv->gart_info.type = NOUVEAU_GART_AGP;
-
- if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
- ret = nouveau_sgdma_init(dev);
- if (ret) {
- NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
- return ret;
- }
- }
-
- NV_INFO(dev, "%d MiB GART (aperture)\n",
- (int)(dev_priv->gart_info.aper_size >> 20));
- dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
-
- ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
- dev_priv->gart_info.aper_size >> PAGE_SHIFT);
- if (ret) {
- NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
static int
nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
deleted file mode 100644
index 2cc4779b4299..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2007 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include <core/ramht.h>
-
-int
-nouveau_notifier_init_channel(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_bo *ntfy = NULL;
- uint32_t flags, ttmpl;
- int ret;
-
- if (nouveau_vram_notify) {
- flags = NOUVEAU_GEM_DOMAIN_VRAM;
- ttmpl = TTM_PL_FLAG_VRAM;
- } else {
- flags = NOUVEAU_GEM_DOMAIN_GART;
- ttmpl = TTM_PL_FLAG_TT;
- }
-
- ret = nouveau_gem_new(dev, PAGE_SIZE, 0, flags, 0, 0, &ntfy);
- if (ret)
- return ret;
-
- ret = nouveau_bo_pin(ntfy, ttmpl);
- if (ret)
- goto out_err;
-
- ret = nouveau_bo_map(ntfy);
- if (ret)
- goto out_err;
-
- if (dev_priv->card_type >= NV_50) {
- ret = nouveau_bo_vma_add(ntfy, chan->vm, &chan->notifier_vma);
- if (ret)
- goto out_err;
- }
-
- ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
- if (ret)
- goto out_err;
-
- chan->notifier_bo = ntfy;
-out_err:
- if (ret) {
- nouveau_bo_vma_del(ntfy, &chan->notifier_vma);
- drm_gem_object_unreference_unlocked(ntfy->gem);
- }
-
- return ret;
-}
-
-void
-nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- if (!chan->notifier_bo)
- return;
-
- nouveau_bo_vma_del(chan->notifier_bo, &chan->notifier_vma);
- nouveau_bo_unmap(chan->notifier_bo);
- mutex_lock(&dev->struct_mutex);
- nouveau_bo_unpin(chan->notifier_bo);
- mutex_unlock(&dev->struct_mutex);
- drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
- drm_mm_takedown(&chan->notifier_heap);
-}
-
-int
-nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
- int size, uint32_t start, uint32_t end,
- uint32_t *b_offset)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *nobj = NULL;
- struct drm_mm_node *mem;
- uint64_t offset;
- int target, ret;
-
- mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
- start, end, 0);
- if (mem)
- mem = drm_mm_get_block_range(mem, size, 0, start, end);
- if (!mem) {
- NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
- return -ENOMEM;
- }
-
- if (dev_priv->card_type < NV_50) {
- if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
- target = NV_MEM_TARGET_VRAM;
- else
- target = NV_MEM_TARGET_GART;
- offset = chan->notifier_bo->bo.offset;
- } else {
- target = NV_MEM_TARGET_VM;
- offset = chan->notifier_vma.offset;
- }
- offset += mem->start;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
- mem->size, NV_MEM_ACCESS_RW, target,
- &nobj);
- if (ret) {
- drm_mm_put_block(mem);
- NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_ramht_insert(chan, handle, nobj);
- nouveau_gpuobj_ref(NULL, &nobj);
- if (ret) {
- drm_mm_put_block(mem);
- NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
- return ret;
- }
-
- *b_offset = mem->start;
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index cce47fa7cb52..de0b81fbdcc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -27,7 +27,6 @@
#include "nouveau_drv.h"
#include <nouveau_drm.h>
-#include "nouveau_dma.h"
#include <linux/dma-buf.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_revcompat.c b/drivers/gpu/drm/nouveau/nouveau_revcompat.c
new file mode 100644
index 000000000000..d5c3390503db
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_revcompat.c
@@ -0,0 +1,22 @@
+#include "nouveau_revcompat.h"
+#include "nouveau_drv.h"
+#include "nv50_display.h"
+
+struct nouveau_drm *
+nouveau_newpriv(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->newpriv;
+}
+
+struct nouveau_bo *
+nv50sema(struct drm_device *dev, int crtc)
+{
+ return nv50_display(dev)->crtc[crtc].sem.bo;
+}
+
+struct nouveau_bo *
+nvd0sema(struct drm_device *dev, int crtc)
+{
+ return nvd0_display_crtc_sema(dev, crtc);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_revcompat.h b/drivers/gpu/drm/nouveau/nouveau_revcompat.h
new file mode 100644
index 000000000000..41cf61f1415f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_revcompat.h
@@ -0,0 +1,12 @@
+#ifndef __NOUVEAU_REVCOMPAT_H__
+#define __NOUVEAU_REVCOMPAT_H__
+
+#include "drmP.h"
+
+struct nouveau_drm *
+nouveau_newpriv(struct drm_device *);
+
+struct nouveau_bo *nv50sema(struct drm_device *dev, int crtc);
+struct nouveau_bo *nvd0sema(struct drm_device *dev, int crtc);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 464beda94c58..ca5492ac2da5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,11 +1,10 @@
-#include "drmP.h"
-#include "nouveau_drv.h"
#include <linux/pagemap.h>
#include <linux/slab.h>
-#define NV_CTXDMA_PAGE_SHIFT 12
-#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
-#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
+#include <subdev/fb.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
@@ -22,7 +21,6 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
- NV_DEBUG(nvbe->dev, "\n");
ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
@@ -93,16 +91,18 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
- nvbe->dev = dev;
- nvbe->ttm.ttm.func = dev_priv->gart_info.func;
+ nvbe->dev = drm->dev;
+ if (nv_device(drm->device)->card_type < NV_50)
+ nvbe->ttm.ttm.func = &nv04_sgdma_backend;
+ else
+ nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(nvbe);
@@ -110,51 +110,3 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
}
return &nvbe->ttm.ttm;
}
-
-int
-nouveau_sgdma_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 aper_size;
-
- if (dev_priv->card_type >= NV_50)
- aper_size = 512 * 1024 * 1024;
- else
- aper_size = 128 * 1024 * 1024;
-
- if (dev_priv->card_type >= NV_50) {
- dev_priv->gart_info.aper_base = 0;
- dev_priv->gart_info.aper_size = aper_size;
- dev_priv->gart_info.type = NOUVEAU_GART_HW;
- dev_priv->gart_info.func = &nv50_sgdma_backend;
- } else {
- dev_priv->gart_info.aper_base = 0;
- dev_priv->gart_info.aper_size = aper_size;
- dev_priv->gart_info.type = NOUVEAU_GART_PDMA;
- dev_priv->gart_info.func = &nv04_sgdma_backend;
- dev_priv->gart_info.sg_ctxdma = nv04vm_refdma(dev);
- }
-
- return 0;
-}
-
-void
-nouveau_sgdma_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
-}
-
-uint32_t
-nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
-
- BUG_ON(dev_priv->card_type >= NV_50);
-
- return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
- (offset & NV_CTXDMA_PAGE_MASK);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
deleted file mode 100644
index 2105a9eef52c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_software.h
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef __NOUVEAU_SOFTWARE_H__
-#define __NOUVEAU_SOFTWARE_H__
-
-#include "nouveau_fence.h"
-
-struct nouveau_software_priv {
- struct nouveau_exec_engine base;
- struct list_head vblank;
- spinlock_t peephole_lock;
-};
-
-struct nouveau_software_chan {
- int (*flip)(void *data);
- void *flip_data;
-
- struct {
- struct list_head list;
- u32 channel;
- u32 ctxdma;
- u32 offset;
- u32 value;
- u32 head;
- } vblank;
-};
-
-static inline void
-nouveau_software_context_new(struct nouveau_channel *chan,
- struct nouveau_software_chan *pch)
-{
- pch->flip = nouveau_flip_complete;
- pch->flip_data = chan;
-}
-
-static inline void
-nouveau_software_create(struct nouveau_software_priv *psw)
-{
- INIT_LIST_HEAD(&psw->vblank);
- spin_lock_init(&psw->peephole_lock);
-}
-
-static inline u16
-nouveau_software_class(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->card_type <= NV_04)
- return 0x006e;
- if (dev_priv->card_type <= NV_40)
- return 0x016e;
- if (dev_priv->card_type <= NV_50)
- return 0x506e;
- if (dev_priv->card_type <= NV_E0)
- return 0x906e;
- return 0x0000;
-}
-
-int nv04_software_create(struct drm_device *);
-int nv50_software_create(struct drm_device *);
-int nvc0_software_create(struct drm_device *);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 5c2836fbf01a..4349b337cfdd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,13 +35,9 @@
#include "nouveau_drv.h"
#include <nouveau_drm.h>
#include "nouveau_fbcon.h"
-#include <core/ramht.h>
#include "nouveau_pm.h"
#include "nv04_display.h"
#include "nv50_display.h"
-#include <engine/fifo.h>
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_stub_init(struct drm_device *dev) { return 0; }
@@ -266,38 +262,6 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static void
-nouveau_card_channel_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->channel) {
- nouveau_channel_put_unlocked(&dev_priv->channel);
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
- }
-}
-
-static int
-nouveau_card_channel_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- int ret;
-
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x1000, &dev_priv->chan_vm);
- if (ret)
- return ret;
-
- ret = nouveau_channel_alloc(dev, &chan, NULL, NvDmaFB, NvDmaTT);
- dev_priv->channel = chan;
- if (ret)
- return ret;
- mutex_unlock(&dev_priv->channel->mutex);
-
- nouveau_bo_move_init(chan);
- return 0;
-}
-
static const struct vga_switcheroo_client_ops nouveau_switcheroo_ops = {
.set_gpu_state = nouveau_switcheroo_set_state,
.reprobe = nouveau_switcheroo_reprobe,
@@ -309,7 +273,7 @@ nouveau_card_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine;
- int ret, e = 0;
+ int ret;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
@@ -319,11 +283,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
- spin_lock_init(&dev_priv->channels.lock);
- spin_lock_init(&dev_priv->tile.lock);
spin_lock_init(&dev_priv->context_switch_lock);
- spin_lock_init(&dev_priv->vm_lock);
- INIT_LIST_HEAD(&dev_priv->classes);
/* Make the CRTCs and I2C buses accessible */
ret = engine->display.early_init(dev);
@@ -343,187 +303,9 @@ nouveau_card_init(struct drm_device *dev)
nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
}
- ret = nouveau_mem_vram_init(dev);
- if (ret)
- goto out_bios;
-
- ret = nouveau_mem_gart_init(dev);
- if (ret)
- goto out_ttmvram;
-
- if (!dev_priv->noaccel) {
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_fifo_create(dev);
- break;
- case NV_10:
- case NV_20:
- case NV_30:
- if (dev_priv->chipset < 0x17)
- nv10_fifo_create(dev);
- else
- nv17_fifo_create(dev);
- break;
- case NV_40:
- nv40_fifo_create(dev);
- break;
- case NV_50:
- if (dev_priv->chipset == 0x50)
- nv50_fifo_create(dev);
- else
- nv84_fifo_create(dev);
- break;
- case NV_C0:
- case NV_D0:
- nvc0_fifo_create(dev);
- break;
- case NV_E0:
- nve0_fifo_create(dev);
- break;
- default:
- break;
- }
-
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_fence_create(dev);
- break;
- case NV_10:
- case NV_20:
- case NV_30:
- case NV_40:
- case NV_50:
- if (dev_priv->chipset < 0x84)
- nv50_fence_create(dev);
- else
- nv84_fence_create(dev);
- break;
- case NV_C0:
- case NV_D0:
- case NV_E0:
- nvc0_fence_create(dev);
- break;
- default:
- break;
- }
-
- switch (dev_priv->card_type) {
- case NV_04:
- case NV_10:
- case NV_20:
- case NV_30:
- case NV_40:
- nv04_software_create(dev);
- break;
- case NV_50:
- nv50_software_create(dev);
- break;
- case NV_C0:
- case NV_D0:
- case NV_E0:
- nvc0_software_create(dev);
- break;
- default:
- break;
- }
-
- switch (dev_priv->card_type) {
- case NV_04:
- nv04_graph_create(dev);
- break;
- case NV_10:
- nv10_graph_create(dev);
- break;
- case NV_20:
- case NV_30:
- nv20_graph_create(dev);
- break;
- case NV_40:
- nv40_graph_create(dev);
- break;
- case NV_50:
- nv50_graph_create(dev);
- break;
- case NV_C0:
- case NV_D0:
- nvc0_graph_create(dev);
- break;
- case NV_E0:
- nve0_graph_create(dev);
- break;
- default:
- break;
- }
-
- switch (dev_priv->chipset) {
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0xa0:
- nv84_crypt_create(dev);
- break;
- case 0x98:
- case 0xaa:
- case 0xac:
- nv98_crypt_create(dev);
- break;
- }
-
- switch (dev_priv->card_type) {
- case NV_50:
- switch (dev_priv->chipset) {
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nva3_copy_create(dev);
- break;
- }
- break;
- case NV_C0:
- if (!(nv_rd32(dev, 0x022500) & 0x00000200))
- nvc0_copy_create(dev, 1);
- case NV_D0:
- if (!(nv_rd32(dev, 0x022500) & 0x00000100))
- nvc0_copy_create(dev, 0);
- break;
- default:
- break;
- }
-
- if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
- nv84_bsp_create(dev);
- nv84_vp_create(dev);
- nv98_ppp_create(dev);
- } else
- if (dev_priv->chipset >= 0x84) {
- nv50_mpeg_create(dev);
- nv84_bsp_create(dev);
- nv84_vp_create(dev);
- } else
- if (dev_priv->chipset >= 0x50) {
- nv50_mpeg_create(dev);
- } else
- if (dev_priv->card_type == NV_40 ||
- dev_priv->chipset == 0x31 ||
- dev_priv->chipset == 0x34 ||
- dev_priv->chipset == 0x36) {
- nv31_mpeg_create(dev);
- }
-
- for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
- if (dev_priv->eng[e]) {
- ret = dev_priv->eng[e]->init(dev, e);
- if (ret)
- goto out_engine;
- }
- }
- }
-
ret = nouveau_irq_init(dev);
if (ret)
- goto out_engine;
+ goto out_bios;
ret = nouveau_display_create(dev);
if (ret)
@@ -532,42 +314,20 @@ nouveau_card_init(struct drm_device *dev)
nouveau_backlight_init(dev);
nouveau_pm_init(dev);
- if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
- ret = nouveau_card_channel_init(dev);
- if (ret)
- goto out_pm;
- }
-
if (dev->mode_config.num_crtc) {
ret = nouveau_display_init(dev);
if (ret)
- goto out_chan;
-
- nouveau_fbcon_init(dev);
+ goto out_pm;
}
return 0;
-out_chan:
- nouveau_card_channel_fini(dev);
out_pm:
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
-out_engine:
- if (!dev_priv->noaccel) {
- for (e = e - 1; e >= 0; e--) {
- if (!dev_priv->eng[e])
- continue;
- dev_priv->eng[e]->fini(dev, e, false);
- dev_priv->eng[e]->destroy(dev,e );
- }
- }
- nouveau_mem_gart_fini(dev);
-out_ttmvram:
- nouveau_mem_vram_fini(dev);
out_bios:
nouveau_bios_takedown(dev);
out_display_early:
@@ -582,39 +342,19 @@ static void nouveau_card_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- int e;
- if (dev->mode_config.num_crtc) {
- nouveau_fbcon_fini(dev);
+ if (dev->mode_config.num_crtc)
nouveau_display_fini(dev);
- }
- nouveau_card_channel_fini(dev);
nouveau_pm_fini(dev);
nouveau_backlight_exit(dev);
nouveau_display_destroy(dev);
- if (!dev_priv->noaccel) {
- for (e = NVOBJ_ENGINE_NR - 1; e >= 0; e--) {
- if (dev_priv->eng[e]) {
- dev_priv->eng[e]->fini(dev, e, false);
- dev_priv->eng[e]->destroy(dev,e );
- }
- }
- }
-
if (dev_priv->vga_ram) {
nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
- mutex_lock(&dev->struct_mutex);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
- mutex_unlock(&dev->struct_mutex);
- nouveau_mem_gart_fini(dev);
- nouveau_mem_vram_fini(dev);
-
nouveau_bios_takedown(dev);
engine->display.late_takedown(dev);
@@ -624,56 +364,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
-int
-nouveau_open(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fpriv *fpriv;
- int ret;
-
- fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
- if (unlikely(!fpriv))
- return -ENOMEM;
-
- spin_lock_init(&fpriv->lock);
- INIT_LIST_HEAD(&fpriv->channels);
-
- if (dev_priv->card_type == NV_50) {
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
- &fpriv->vm);
- if (ret) {
- kfree(fpriv);
- return ret;
- }
- } else
- if (dev_priv->card_type >= NV_C0) {
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
- &fpriv->vm);
- if (ret) {
- kfree(fpriv);
- return ret;
- }
- }
-
- file_priv->driver_priv = fpriv;
- return 0;
-}
-
-/* here a client dies, release the stuff that was allocated for its
- * file_priv */
-void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- nouveau_channel_cleanup(dev, file_priv);
-}
-
-void
-nouveau_postclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
- nouveau_vm_ref(NULL, &fpriv->vm, NULL);
- kfree(fpriv);
-}
-
/* first module load, setup the mmio/fb mapping */
/* KMS: we need mmio at load time, not when the first drm client opens. */
int nouveau_firstopen(struct drm_device *dev)
@@ -704,55 +394,6 @@ static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
#endif
}
-static struct apertures_struct *nouveau_get_apertures(struct drm_device *dev)
-{
- struct pci_dev *pdev = dev->pdev;
- struct apertures_struct *aper = alloc_apertures(3);
- if (!aper)
- return NULL;
-
- aper->ranges[0].base = pci_resource_start(pdev, 1);
- aper->ranges[0].size = pci_resource_len(pdev, 1);
- aper->count = 1;
-
- if (pci_resource_len(pdev, 2)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
- aper->count++;
- }
-
- if (pci_resource_len(pdev, 3)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
- aper->count++;
- }
-
- return aper;
-}
-
-static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- bool primary = false;
- dev_priv->apertures = nouveau_get_apertures(dev);
- if (!dev_priv->apertures)
- return -ENOMEM;
-
-#ifdef CONFIG_X86
- primary = dev->pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
-
- remove_conflicting_framebuffers(dev_priv->apertures, "nouveaufb", primary);
- return 0;
-}
-
-void *
-nouveau_newpriv(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- return dev_priv->newpriv;
-}
-
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
struct drm_nouveau_private *dev_priv;
@@ -840,30 +481,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
- /* Determine whether we'll attempt acceleration or not, some
- * cards are disabled by default here due to them being known
- * non-functional, or never been tested due to lack of hw.
- */
- dev_priv->noaccel = !!nouveau_noaccel;
- if (nouveau_noaccel == -1) {
- switch (dev_priv->chipset) {
- case 0xd9: /* known broken */
- case 0xe4: /* needs binary driver firmware */
- case 0xe7: /* needs binary driver firmware */
- NV_INFO(dev, "acceleration disabled by default, pass "
- "noaccel=0 to force enable\n");
- dev_priv->noaccel = true;
- break;
- default:
- dev_priv->noaccel = false;
- break;
- }
- }
-
- ret = nouveau_remove_conflicting_drivers(dev);
- if (ret)
- goto err_priv;
-
nouveau_OF_copy_vbios_to_ramin(dev);
/* Special flags */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index e729535e9b26..560e816138eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -24,9 +24,13 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/instmem.h>
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
@@ -60,11 +64,10 @@ static void
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
struct ttm_mem_reg *mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
- struct drm_device *dev = dev_priv->dev;
-
+ struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
nouveau_mem_node_cleanup(mem->mm_node);
- nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
+ pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node);
}
static int
@@ -73,8 +76,8 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_mem *node;
u32 size_nc = 0;
@@ -83,9 +86,9 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
size_nc = 1 << nvbo->page_shift;
- ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
- mem->page_alignment << PAGE_SHIFT, size_nc,
- (nvbo->tile_flags >> 8) & 0x3ff, &node);
+ ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT,
+ mem->page_alignment << PAGE_SHIFT, size_nc,
+ (nvbo->tile_flags >> 8) & 0x3ff, &node);
if (ret) {
mem->mm_node = NULL;
return (ret == -ENOSPC) ? 0 : ret;
@@ -158,11 +161,9 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
struct ttm_placement *placement,
struct ttm_mem_reg *mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_mem *node;
- if (unlikely((mem->num_pages << PAGE_SHIFT) >=
- dev_priv->gart_info.aper_size))
+ if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024))
return -ENOMEM;
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -188,13 +189,17 @@ const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_debug
};
+#include <core/subdev/vm/nv04.h>
static int
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
- struct drm_device *dev = dev_priv->dev;
- man->priv = nv04vm_ref(dev);
- return (man->priv != NULL) ? 0 : -ENODEV;
+ struct nouveau_drm *drm = nouveau_bdev(man->bdev);
+ struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device);
+ struct nv04_vmmgr_priv *priv = (void *)vmm;
+ struct nouveau_vm *vm = NULL;
+ nouveau_vm_ref(priv->vm, &vm, NULL);
+ man->priv = vm;
+ return 0;
}
static int
@@ -260,13 +265,12 @@ int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
- struct drm_nouveau_private *dev_priv =
- file_priv->minor->dev->dev_private;
+ struct nouveau_drm *drm = nouveau_newpriv(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
- return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
+ return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
static int
@@ -282,12 +286,12 @@ nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
}
int
-nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
+nouveau_ttm_global_init(struct nouveau_drm *drm)
{
struct drm_global_reference *global_ref;
int ret;
- global_ref = &dev_priv->ttm.mem_global_ref;
+ global_ref = &drm->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &nouveau_ttm_mem_global_init;
@@ -296,12 +300,12 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting\n");
- dev_priv->ttm.mem_global_ref.release = NULL;
+ drm->ttm.mem_global_ref.release = NULL;
return ret;
}
- dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
- global_ref = &dev_priv->ttm.bo_global_ref.ref;
+ drm->ttm.bo_global_ref.mem_glob = global_ref->object;
+ global_ref = &drm->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
@@ -310,8 +314,8 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
- drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
- dev_priv->ttm.mem_global_ref.release = NULL;
+ drm_global_item_unref(&drm->ttm.mem_global_ref);
+ drm->ttm.mem_global_ref.release = NULL;
return ret;
}
@@ -319,12 +323,105 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
}
void
-nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
+nouveau_ttm_global_release(struct nouveau_drm *drm)
{
- if (dev_priv->ttm.mem_global_ref.release == NULL)
+ if (drm->ttm.mem_global_ref.release == NULL)
return;
- drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
- drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
- dev_priv->ttm.mem_global_ref.release = NULL;
+ drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&drm->ttm.mem_global_ref);
+ drm->ttm.mem_global_ref.release = NULL;
+}
+
+int
+nouveau_ttm_init(struct nouveau_drm *drm)
+{
+ struct drm_device *dev = drm->dev;
+ u32 bits;
+ int ret;
+
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
+ bits = 40;
+ else
+ bits = 32;
+ } else {
+ bits = 32;
+ }
+
+ ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+ if (ret)
+ return ret;
+
+ ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits));
+ if (ret)
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+
+ ret = nouveau_ttm_global_init(drm);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_device_init(&drm->ttm.bdev,
+ drm->ttm.bo_global_ref.ref.object,
+ &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
+ bits <= 32 ? true : false);
+ if (ret) {
+ NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
+ return ret;
+ }
+
+ /* VRAM init */
+ drm->gem.vram_available = nouveau_fb(drm->device)->ram.size;
+ drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved;
+
+ ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
+ drm->gem.vram_available >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(drm, "VRAM mm init failed, %d\n", ret);
+ return ret;
+ }
+
+ drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
+ DRM_MTRR_WC);
+
+ /* GART init */
+ if (drm->agp.stat != ENABLED) {
+ drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit;
+ if (drm->gem.gart_available > 512 * 1024 * 1024)
+ drm->gem.gart_available = 512 * 1024 * 1024;
+ } else {
+ drm->gem.gart_available = drm->agp.size;
+ }
+
+ ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT,
+ drm->gem.gart_available >> PAGE_SHIFT);
+ if (ret) {
+ NV_ERROR(drm, "GART mm init failed, %d\n", ret);
+ return ret;
+ }
+
+ NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20));
+ NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20));
+ return 0;
+}
+
+void
+nouveau_ttm_fini(struct nouveau_drm *drm)
+{
+ mutex_lock(&drm->dev->struct_mutex);
+ ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
+ mutex_unlock(&drm->dev->struct_mutex);
+
+ ttm_bo_device_release(&drm->ttm.bdev);
+
+ nouveau_ttm_global_release(drm);
+
+ if (drm->ttm.mtrr >= 0) {
+ drm_mtrr_del(drm->ttm.mtrr,
+ pci_resource_start(drm->dev->pdev, 1),
+ pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC);
+ drm->ttm.mtrr = -1;
+ }
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
new file mode 100644
index 000000000000..9f4d2715584b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -0,0 +1,21 @@
+#ifndef __NOUVEAU_TTM_H__
+#define __NOUVEAU_TTM_H__
+
+static inline struct nouveau_drm *
+nouveau_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct nouveau_drm, ttm.bdev);
+}
+
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+extern const struct ttm_mem_type_manager_func nouveau_gart_manager;
+extern const struct ttm_mem_type_manager_func nv04_gart_manager;
+
+struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *,
+ unsigned long size, u32 page_flags,
+ struct page *dummy_read_page);
+
+int nouveau_ttm_init(struct nouveau_drm *drm);
+void nouveau_ttm_fini(struct nouveau_drm *drm);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
deleted file mode 100644
index 6bff634c95fe..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2010 Nouveau Project
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/ratelimit.h>
-#include "nouveau_util.h"
-
-#include <core/enum.h>
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-int
-nouveau_ratelimit(void)
-{
- return __ratelimit(&nouveau_ratelimit_state);
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
deleted file mode 100644
index 114293758f8c..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_util.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2010 Nouveau Project
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __NOUVEAU_UTIL_H__
-#define __NOUVEAU_UTIL_H__
-
-#include <core/enum.h>
-
-int nouveau_ratelimit(void);
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 81947ea0f82d..6ab936376c40 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,9 +32,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
-static void nv04_vblank_crtc0_isr(struct drm_device *);
-static void nv04_vblank_crtc1_isr(struct drm_device *);
-
int
nv04_display_early_init(struct drm_device *dev)
{
@@ -126,8 +123,6 @@ nv04_display_create(struct drm_device *dev)
func->save(encoder);
}
- nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
- nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
return 0;
}
@@ -141,9 +136,6 @@ nv04_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- nouveau_irq_unregister(dev, 24);
- nouveau_irq_unregister(dev, 25);
-
/* Turn every CRTC off. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct drm_mode_set modeset = {
@@ -203,17 +195,3 @@ nv04_display_fini(struct drm_device *dev)
if (nv_two_heads(dev))
NVWriteCRTC(dev, 1, NV_PCRTC_INTR_EN_0, 0);
}
-
-static void
-nv04_vblank_crtc0_isr(struct drm_device *dev)
-{
- nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
- drm_handle_vblank(dev, 0);
-}
-
-static void
-nv04_vblank_crtc1_isr(struct drm_device *dev)
-{
- nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
- drm_handle_vblank(dev, 1);
-}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 06fb68acf813..35480b6776f8 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -22,19 +22,18 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include <core/object.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fbcon.h"
int
nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, 4);
@@ -53,9 +52,8 @@ int
nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, 7);
@@ -81,9 +79,8 @@ int
nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
uint32_t fg;
uint32_t bg;
uint32_t dsize;
@@ -142,9 +139,10 @@ nv04_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
- const int sub = NvSubCtxSurf2D;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_channel *chan = drm->channel;
+ struct nouveau_device *device = nv_device(drm->device);
+ struct nouveau_object *object;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -176,31 +174,35 @@ nv04_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
- dev_priv->card_type >= NV_10 ?
- 0x0062 : 0x0042);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvCtxSurf2D,
+ device->card_type >= NV_10 ? 0x0062 : 0x0042,
+ NULL, 0, &object);
if (ret)
return ret;
- ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvClipRect,
+ 0x0019, NULL, 0, &object);
if (ret)
return ret;
- ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvRop,
+ 0x0043, NULL, 0, &object);
if (ret)
return ret;
- ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImagePatt,
+ 0x0044, NULL, 0, &object);
if (ret)
return ret;
- ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvGdiRect,
+ 0x004a, NULL, 0, &object);
if (ret)
return ret;
- ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
- dev_priv->chipset >= 0x11 ?
- 0x009f : 0x005f);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, NvImageBlit,
+ device->chipset >= 0x11 ? 0x009f : 0x005f,
+ NULL, 0, &object);
if (ret)
return ret;
@@ -209,25 +211,25 @@ nv04_fbcon_accel_init(struct fb_info *info)
return 0;
}
- BEGIN_NV04(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_NV04(chan, sub, 0x0184, 2);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_NV04(chan, sub, 0x0300, 4);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_NV04(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_NV04(chan, sub, 0x0300, 1);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_NV04(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_NV04(chan, sub, 0x0300, 8);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -241,9 +243,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_NV04(chan, sub, 0x0000, 1);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_NV04(chan, sub, 0x0300, 2);
+ BEGIN_NV04(chan, NvSubCtxSurf2D, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index 1b45a4f8c0a5..a220b94ba9f2 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -22,15 +22,14 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include <engine/fifo.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fence.h"
struct nv04_fence_chan {
struct nouveau_fence_chan base;
- atomic_t sequence;
};
struct nv04_fence_priv {
@@ -57,19 +56,11 @@ nv04_fence_sync(struct nouveau_fence *fence,
return -ENODEV;
}
-int
-nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv04_fence_chan *fctx = chan->fence;
- atomic_set(&fctx->sequence, data);
- return 0;
-}
-
static u32
nv04_fence_read(struct nouveau_channel *chan)
{
- struct nv04_fence_chan *fctx = chan->fence;
- return atomic_read(&fctx->sequence);
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ return atomic_read(&fifo->refcnt);
}
static void
@@ -87,7 +78,6 @@ nv04_fence_context_new(struct nouveau_channel *chan)
struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (fctx) {
nouveau_fence_context_new(&fctx->base);
- atomic_set(&fctx->sequence, 0);
chan->fence = fctx;
return 0;
}
@@ -95,23 +85,19 @@ nv04_fence_context_new(struct nouveau_channel *chan)
}
static void
-nv04_fence_destroy(struct drm_device *dev)
+nv04_fence_destroy(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fence_priv *priv = dev_priv->fence.func;
-
- dev_priv->fence.func = NULL;
+ struct nv04_fence_priv *priv = drm->fence;
+ drm->fence = NULL;
kfree(priv);
}
int
-nv04_fence_create(struct drm_device *dev)
+nv04_fence_create(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv04_fence_priv *priv;
- int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -121,6 +107,5 @@ nv04_fence_create(struct drm_device *dev)
priv->base.emit = nv04_fence_emit;
priv->base.sync = nv04_fence_sync;
priv->base.read = nv04_fence_read;
- dev_priv->fence.func = &priv->base;
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
deleted file mode 100644
index ceeb868c7c29..000000000000
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include <core/ramht.h>
-#include "nouveau_fence.h"
-#include "nouveau_software.h"
-#include "nouveau_hw.h"
-
-struct nv04_software_priv {
- struct nouveau_software_priv base;
-};
-
-struct nv04_software_chan {
- struct nouveau_software_chan base;
-};
-
-static int
-mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv04_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- return pch->base.flip(pch->base.flip_data);
-}
-
-static int
-nv04_software_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv04_software_chan *pch;
-
- pch = kzalloc(sizeof(*pch), GFP_KERNEL);
- if (!pch)
- return -ENOMEM;
-
- nouveau_software_context_new(chan, &pch->base);
- chan->engctx[engine] = pch;
- return 0;
-}
-
-static void
-nv04_software_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv04_software_chan *pch = chan->engctx[engine];
- chan->engctx[engine] = NULL;
- kfree(pch);
-}
-
-static int
-nv04_software_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
- if (ret)
- return ret;
- obj->engine = 0;
- obj->class = class;
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv04_software_init(struct drm_device *dev, int engine)
-{
- return 0;
-}
-
-static int
-nv04_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
- return 0;
-}
-
-static void
-nv04_software_destroy(struct drm_device *dev, int engine)
-{
- struct nv04_software_priv *psw = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, SW);
- kfree(psw);
-}
-
-int
-nv04_software_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_software_priv *psw;
-
- psw = kzalloc(sizeof(*psw), GFP_KERNEL);
- if (!psw)
- return -ENOMEM;
-
- psw->base.base.destroy = nv04_software_destroy;
- psw->base.base.init = nv04_software_init;
- psw->base.base.fini = nv04_software_fini;
- psw->base.base.context_new = nv04_software_context_new;
- psw->base.base.context_del = nv04_software_context_del;
- psw->base.base.object_new = nv04_software_object_new;
- nouveau_software_create(&psw->base);
-
- NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
- if (dev_priv->card_type <= NV_04) {
- NVOBJ_CLASS(dev, 0x006e, SW);
- NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
- NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
- } else {
- NVOBJ_CLASS(dev, 0x016e, SW);
- NVOBJ_MTHD (dev, 0x016e, 0x0500, mthd_flip);
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index dd7f17dd9903..ce752bf5cc4e 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -22,10 +22,11 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fence.h"
struct nv10_fence_chan {
@@ -64,12 +65,11 @@ int
nv17_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nv10_fence_priv *priv = dev_priv->fence.func;
+ struct nv10_fence_priv *priv = chan->drm->fence;
u32 value;
int ret;
- if (!mutex_trylock(&prev->mutex))
+ if (!mutex_trylock(&prev->cli->mutex))
return -EBUSY;
spin_lock(&priv->lock);
@@ -96,14 +96,14 @@ nv17_fence_sync(struct nouveau_fence *fence,
FIRE_RING (chan);
}
- mutex_unlock(&prev->mutex);
+ mutex_unlock(&prev->cli->mutex);
return 0;
}
u32
nv10_fence_read(struct nouveau_channel *chan)
{
- return nvchan_rd32(chan, 0x0048);
+ return nv_ro32(chan->object, 0x0048);
}
void
@@ -118,10 +118,8 @@ nv10_fence_context_del(struct nouveau_channel *chan)
static int
nv10_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nv10_fence_priv *priv = dev_priv->fence.func;
+ struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
- struct nouveau_gpuobj *obj;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -132,15 +130,19 @@ nv10_fence_context_new(struct nouveau_channel *chan)
if (priv->bo) {
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- mem->start * PAGE_SIZE, mem->size,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (!ret) {
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- }
+ struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = mem->start + mem->size - 1;
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = start,
+ .limit = limit,
+ }, sizeof(struct nv_dma_class),
+ &object);
}
if (ret)
@@ -149,24 +151,22 @@ nv10_fence_context_new(struct nouveau_channel *chan)
}
void
-nv10_fence_destroy(struct drm_device *dev)
+nv10_fence_destroy(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_fence_priv *priv = dev_priv->fence.func;
-
+ struct nv10_fence_priv *priv = drm->fence;
+ nouveau_bo_unmap(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
- dev_priv->fence.func = NULL;
+ drm->fence = NULL;
kfree(priv);
}
int
-nv10_fence_create(struct drm_device *dev)
+nv10_fence_create(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv10_fence_priv *priv;
int ret = 0;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -176,11 +176,10 @@ nv10_fence_create(struct drm_device *dev)
priv->base.emit = nv10_fence_emit;
priv->base.read = nv10_fence_read;
priv->base.sync = nv10_fence_sync;
- dev_priv->fence.func = &priv->base;
spin_lock_init(&priv->lock);
- if (dev_priv->chipset >= 0x17) {
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ if (nv_device(drm->device)->chipset >= 0x17) {
+ ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -197,6 +196,6 @@ nv10_fence_create(struct drm_device *dev)
}
if (ret)
- nv10_fence_destroy(dev);
+ nv10_fence_destroy(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index d857525666ee..93f536de3779 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -27,7 +27,6 @@
#include <nouveau_bios.h>
#include "nouveau_pm.h"
#include "nouveau_hw.h"
-#include <engine/fifo.h>
#define min2(a,b) ((a) < (b) ? (a) : (b))
@@ -259,7 +258,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
if (!nv_wait(dev, 0x003220, 0x00000010, 0x00000000))
goto resume;
nv_mask(dev, 0x003200, 0x00000001, 0x00000000);
- nv04_fifo_cache_pull(dev, false);
+ //XXX: nv04_fifo_cache_pull(dev, false);
if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
goto resume;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 83419a2daa0b..ae72f7656106 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -31,7 +31,6 @@
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
-#include <core/ramht.h>
#include "drm_crtc_helper.h"
#include "nouveau_fence.h"
@@ -102,17 +101,17 @@ nv50_display_sync(struct drm_device *dev)
BEGIN_NV04(evo, 0, 0x0084, 1);
OUT_RING (evo, 0x00000000);
- nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ nv_wo32(disp->ramin, 0x2000, 0x00000000);
FIRE_RING (evo);
start = nv_timer_read(dev);
do {
- if (nv_ro32(disp->ntfy, 0x000))
+ if (nv_ro32(disp->ramin, 0x2000))
return 0;
} while (nv_timer_read(dev) - start < 2000000000ULL);
}
- return -EBUSY;
+ return 0;
}
int
@@ -217,7 +216,7 @@ nv50_display_init(struct drm_device *dev)
return ret;
evo = nv50_display(dev)->master;
- nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->addr >> 8) | 9);
+ nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
ret = RING_SPACE(evo, 3);
if (ret)
@@ -444,7 +443,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
if (dev_priv->chipset < 0x84)
OUT_RING (chan, NvSema);
else
- OUT_RING (chan, chan->vram_handle);
+ OUT_RING (chan, chan->vram);
} else {
u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
offset += dispc->sem.offset;
@@ -598,48 +597,6 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb,
}
static void
-nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_software_priv *psw = nv_engine(dev, NVOBJ_ENGINE_SW);
- struct nouveau_software_chan *pch, *tmp;
-
- list_for_each_entry_safe(pch, tmp, &psw->vblank, vblank.list) {
- if (pch->vblank.head != crtc)
- continue;
-
- spin_lock(&psw->peephole_lock);
- nv_wr32(dev, 0x001704, pch->vblank.channel);
- nv_wr32(dev, 0x001710, 0x80000000 | pch->vblank.ctxdma);
- if (dev_priv->chipset == 0x50) {
- nv_wr32(dev, 0x001570, pch->vblank.offset);
- nv_wr32(dev, 0x001574, pch->vblank.value);
- } else {
- nv_wr32(dev, 0x060010, pch->vblank.offset);
- nv_wr32(dev, 0x060014, pch->vblank.value);
- }
- spin_unlock(&psw->peephole_lock);
-
- list_del(&pch->vblank.list);
- drm_vblank_put(dev, crtc);
- }
-
- drm_handle_vblank(dev, crtc);
-}
-
-static void
-nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
-{
- if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
- nv50_display_vblank_crtc_handler(dev, 0);
-
- if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
- nv50_display_vblank_crtc_handler(dev, 1);
-
- nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
-}
-
-static void
nv50_display_unk10_handler(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -978,8 +935,8 @@ nv50_display_isr(struct drm_device *dev)
}
if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
- nv50_display_vblank_handler(dev, intr1);
intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
+ delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
}
clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 25c301391724..ef12a7afac9c 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -33,7 +33,6 @@
#include "nouveau_dma.h"
#include "nouveau_reg.h"
#include "nouveau_crtc.h"
-#include "nouveau_software.h"
#include "nv50_evo.h"
struct nv50_display_crtc {
@@ -47,7 +46,10 @@ struct nv50_display_crtc {
struct nv50_display {
struct nouveau_channel *master;
- struct nouveau_gpuobj *ntfy;
+
+ struct nouveau_gpuobj *ramin;
+ u32 dmao;
+ u32 hash;
struct nv50_display_crtc crtc[2];
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index 7e9a6d6d673b..d7d8080c6a14 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -26,9 +26,22 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nv50_display.h"
+static u32
+nv50_evo_rd32(struct nouveau_object *object, u32 addr)
+{
+ void __iomem *iomem = object->oclass->ofuncs->rd08;
+ return ioread32_native(iomem + addr);
+}
+
+static void
+nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
+{
+ void __iomem *iomem = object->oclass->ofuncs->rd08;
+ iowrite32_native(data, iomem + addr);
+}
+
static void
nv50_evo_channel_del(struct nouveau_channel **pevo)
{
@@ -38,21 +51,24 @@ nv50_evo_channel_del(struct nouveau_channel **pevo)
return;
*pevo = NULL;
- nouveau_ramht_ref(NULL, &evo->ramht, evo);
- nouveau_gpuobj_channel_takedown(evo);
- nouveau_bo_unmap(evo->pushbuf_bo);
- nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+ nouveau_bo_unmap(evo->push.buffer);
+ nouveau_bo_ref(NULL, &evo->push.buffer);
- if (evo->user)
- iounmap(evo->user);
+ if (evo->object)
+ iounmap(evo->object->oclass->ofuncs);
kfree(evo);
}
-void
-nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size)
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
+ u64 base, u64 size, struct nouveau_gpuobj **pobj)
{
- struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+ struct drm_device *dev = evo->fence;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_display *disp = nv50_display(dev);
+ u32 dmao = disp->dmao;
+ u32 hash = disp->hash;
u32 flags5;
if (dev_priv->chipset < 0xc0) {
@@ -67,36 +83,21 @@ nv50_evo_dmaobj_init(struct nouveau_gpuobj *obj, u32 memtype, u64 base, u64 size
flags5 = 0x00020000;
}
- nv50_gpuobj_dma_init(obj, 0, 0x3d, base, size, NV_MEM_TARGET_VRAM,
- NV_MEM_ACCESS_RW, (memtype >> 8) & 0xff, 0);
- nv_wo32(obj, 0x14, flags5);
- nvimem_flush(obj->dev);
-}
+ nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
+ nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
+ nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
+ nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
+ upper_32_bits(base));
+ nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
+ nv_wo32(disp->ramin, dmao + 0x14, flags5);
-int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
- u64 base, u64 size, struct nouveau_gpuobj **pobj)
-{
- struct nv50_display *disp = nv50_display(evo->dev);
- struct nouveau_gpuobj *obj = NULL;
- int ret;
+ nv_wo32(disp->ramin, hash + 0x00, handle);
+ nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
+ evo->handle);
- ret = nouveau_gpuobj_new(evo->dev, disp->master, 6*4, 32, 0, &obj);
- if (ret)
- return ret;
- obj->engine = NVOBJ_ENGINE_DISPLAY;
-
- nv50_evo_dmaobj_init(obj, memtype, base, size);
-
- ret = nouveau_ramht_insert(evo, handle, obj);
- if (ret)
- goto out;
-
- if (pobj)
- nouveau_gpuobj_ref(obj, pobj);
-out:
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
+ disp->dmao += 0x20;
+ disp->hash += 0x08;
+ return 0;
}
static int
@@ -112,49 +113,52 @@ nv50_evo_channel_new(struct drm_device *dev, int chid,
return -ENOMEM;
*pevo = evo;
- evo->id = chid;
- evo->dev = dev;
+ evo->handle = chid;
+ evo->fence = dev;
evo->user_get = 4;
evo->user_put = 0;
ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
- &evo->pushbuf_bo);
+ &evo->push.buffer);
if (ret == 0)
- ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
if (ret) {
NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
nv50_evo_channel_del(pevo);
return ret;
}
- ret = nouveau_bo_map(evo->pushbuf_bo);
+ ret = nouveau_bo_map(evo->push.buffer);
if (ret) {
NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
nv50_evo_channel_del(pevo);
return ret;
}
- evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
- if (!evo->user) {
- NV_ERROR(dev, "Error mapping EVO control regs.\n");
- nv50_evo_channel_del(pevo);
- return -ENOMEM;
- }
-
- /* bind primary evo channel's ramht to the channel */
- if (disp->master && evo != disp->master)
- nouveau_ramht_ref(disp->master->ramht, &evo->ramht, NULL);
-
+ evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
+#ifdef NOUVEAU_OBJECT_MAGIC
+ evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
+#endif
+ evo->object->parent = nv_object(disp->ramin)->parent;
+ evo->object->engine = nv_object(disp->ramin)->engine;
+ evo->object->oclass =
+ kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
+ evo->object->oclass->ofuncs =
+ kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
+ evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
+ evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
+ evo->object->oclass->ofuncs->rd08 =
+ ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
return 0;
}
static int
nv50_evo_channel_init(struct nouveau_channel *evo)
{
- struct drm_device *dev = evo->dev;
- int id = evo->id, ret, i;
- u64 pushbuf = evo->pushbuf_bo->bo.offset;
+ struct drm_device *dev = evo->fence;
+ int id = evo->handle, ret, i;
+ u64 pushbuf = evo->push.buffer->bo.offset;
u32 tmp;
tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
@@ -205,8 +209,8 @@ nv50_evo_channel_init(struct nouveau_channel *evo)
static void
nv50_evo_channel_fini(struct nouveau_channel *evo)
{
- struct drm_device *dev = evo->dev;
- int id = evo->id;
+ struct drm_device *dev = evo->fence;
+ int id = evo->handle;
nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
@@ -231,8 +235,8 @@ nv50_evo_destroy(struct drm_device *dev)
}
nv50_evo_channel_del(&disp->crtc[i].sync);
}
- nouveau_gpuobj_ref(NULL, &disp->ntfy);
nv50_evo_channel_del(&disp->master);
+ nouveau_gpuobj_ref(NULL, &disp->ramin);
}
int
@@ -240,55 +244,33 @@ nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
- struct nouveau_gpuobj *ramht = NULL;
struct nouveau_channel *evo;
int ret, i, j;
- /* create primary evo channel, the one we use for modesetting
- * purporses
- */
- ret = nv50_evo_channel_new(dev, 0, &disp->master);
- if (ret)
- return ret;
- evo = disp->master;
-
/* setup object management on it, any other evo channel will
* use this also as there's no per-channel support on the
* hardware
*/
ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
- NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+ NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
if (ret) {
NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
goto err;
}
- ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
- if (ret) {
- NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
- goto err;
- }
-
- ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret)
- goto err;
+ disp->hash = 0x0000;
+ disp->dmao = 0x1000;
- /* not sure exactly what this is..
- *
- * the first dword of the structure is used by nvidia to wait on
- * full completion of an EVO "update" command.
- *
- * method 0x8c on the master evo channel will fill a lot more of
- * this structure with some undefined info
+ /* create primary evo channel, the one we use for modesetting
+ * purporses
*/
- ret = nouveau_gpuobj_new(dev, disp->master, 0x1000, 0,
- NVOBJ_FLAG_ZERO_ALLOC, &disp->ntfy);
+ ret = nv50_evo_channel_new(dev, 0, &disp->master);
if (ret)
- goto err;
+ return ret;
+ evo = disp->master;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
- disp->ntfy->addr, disp->ntfy->size, NULL);
+ disp->ramin->addr + 0x2000, 0x1000, NULL);
if (ret)
goto err;
@@ -304,13 +286,13 @@ nv50_evo_create(struct drm_device *dev)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
- (dev_priv->chipset < 0xc0 ? 0x7a00 : 0xfe00),
+ (dev_priv->chipset < 0xc0 ? 0x7a : 0xfe),
0, nvfb_vram_size(dev), NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
- (dev_priv->chipset < 0xc0 ? 0x7000 : 0xfe00),
+ (dev_priv->chipset < 0xc0 ? 0x70 : 0xfe),
0, nvfb_vram_size(dev), NULL);
if (ret)
goto err;
@@ -352,14 +334,14 @@ nv50_evo_create(struct drm_device *dev)
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
- 0x7a00 : 0xfe00),
+ 0x7a : 0xfe),
0, nvfb_vram_size(dev), NULL);
if (ret)
goto err;
ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
(dev_priv->chipset < 0xc0 ?
- 0x7000 : 0xfe00),
+ 0x70 : 0xfe),
0, nvfb_vram_size(dev), NULL);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 1593e2402fd2..2028a4447124 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -22,20 +22,16 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fbcon.h"
-#include <core/mm.h>
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
@@ -156,10 +150,11 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct drm_device *dev = nfbdev->dev;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_channel *chan = drm->channel;
+ struct nouveau_object *object;
int ret, format;
switch (info->var.bits_per_pixel) {
@@ -189,7 +184,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
return -EINVAL;
}
- ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+ 0x502d, NULL, 0, &object);
if (ret)
return ret;
@@ -202,9 +198,9 @@ nv50_fbcon_accel_init(struct fb_info *info)
BEGIN_NV04(chan, NvSub2D, 0x0000, 1);
OUT_RING(chan, Nv2D);
BEGIN_NV04(chan, NvSub2D, 0x0184, 3);
- OUT_RING(chan, chan->vram_handle);
- OUT_RING(chan, chan->vram_handle);
- OUT_RING(chan, chan->vram_handle);
+ OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, NvDmaFB);
+ OUT_RING(chan, NvDmaFB);
BEGIN_NV04(chan, NvSub2D, 0x0290, 1);
OUT_RING(chan, 0);
BEGIN_NV04(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 10aa04f26b83..e717aaaf62c6 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -22,12 +22,12 @@
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include <core/object.h>
+#include <core/class.h>
+
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fence.h"
-#include "nv50_display.h"
struct nv50_fence_chan {
struct nouveau_fence_chan base;
@@ -43,12 +43,11 @@ struct nv50_fence_priv {
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nv50_fence_priv *priv = dev_priv->fence.func;
+ struct nv50_fence_priv *priv = chan->drm->fence;
struct nv50_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
- struct nouveau_gpuobj *obj;
- int ret = 0, i;
+ struct nouveau_object *object;
+ int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
@@ -56,30 +55,29 @@ nv50_fence_context_new(struct nouveau_channel *chan)
nouveau_fence_context_new(&fctx->base);
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- mem->start * PAGE_SIZE, mem->size,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (!ret) {
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- }
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = mem->start * PAGE_SIZE,
+ .limit = mem->size - 1,
+ }, sizeof(struct nv_dma_class),
+ &object);
/* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
- struct nv50_display *pdisp = nv50_display(chan->dev);
- struct nv50_display_crtc *dispc = &pdisp->crtc[i];
- struct nouveau_gpuobj *obj = NULL;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- dispc->sem.bo->bo.offset, 0x1000,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret)
- break;
-
- ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
- nouveau_gpuobj_ref(NULL, &obj);
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50sema(chan->drm->dev, i);
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvEvoSema0 + i, 0x003d,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = bo->bo.offset,
+ .limit = bo->bo.offset + 0xfff,
+ }, sizeof(struct nv_dma_class),
+ &object);
}
if (ret)
@@ -88,13 +86,12 @@ nv50_fence_context_new(struct nouveau_channel *chan)
}
int
-nv50_fence_create(struct drm_device *dev)
+nv50_fence_create(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fence_priv *priv;
int ret = 0;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -104,10 +101,9 @@ nv50_fence_create(struct drm_device *dev)
priv->base.emit = nv10_fence_emit;
priv->base.read = nv10_fence_read;
priv->base.sync = nv17_fence_sync;
- dev_priv->fence.func = &priv->base;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
@@ -117,9 +113,12 @@ nv50_fence_create(struct drm_device *dev)
nouveau_bo_ref(NULL, &priv->bo);
}
- if (ret == 0)
+ if (ret == 0) {
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
- else
- nv10_fence_destroy(dev);
+ priv->base.sync = nv17_fence_sync;
+ }
+
+ if (ret)
+ nv10_fence_destroy(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 142cd4e83767..ac0208438ace 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -28,7 +28,6 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
#include "nouveau_hwsq.h"
-#include "nv50_display.h"
enum clk_src {
clk_src_crystal,
diff --git a/drivers/gpu/drm/nouveau/nv50_software.c b/drivers/gpu/drm/nouveau/nv50_software.c
deleted file mode 100644
index 7c9dbe862c44..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_software.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include <core/ramht.h>
-#include "nouveau_software.h"
-
-#include "nv50_display.h"
-
-struct nv50_software_priv {
- struct nouveau_software_priv base;
-};
-
-struct nv50_software_chan {
- struct nouveau_software_chan base;
-};
-
-static int
-mthd_dma_vblsem(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- struct nouveau_gpuobj *gpuobj;
-
- gpuobj = nouveau_ramht_find(chan, data);
- if (!gpuobj)
- return -ENOENT;
-
- pch->base.vblank.ctxdma = gpuobj->node->offset >> 4;
- return 0;
-}
-
-static int
-mthd_vblsem_offset(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- pch->base.vblank.offset = data;
- return 0;
-}
-
-static int
-mthd_vblsem_value(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- pch->base.vblank.value = data;
- return 0;
-}
-
-static int
-mthd_vblsem_release(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv50_software_priv *psw = nv_engine(chan->dev, NVOBJ_ENGINE_SW);
- struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- struct drm_device *dev = chan->dev;
-
- if (data > 1)
- return -EINVAL;
-
- drm_vblank_get(dev, data);
-
- pch->base.vblank.head = data;
- list_add(&pch->base.vblank.list, &psw->base.vblank);
- return 0;
-}
-
-static int
-mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct nv50_software_chan *pch = chan->engctx[NVOBJ_ENGINE_SW];
- return pch->base.flip(pch->base.flip_data);
-}
-
-static int
-nv50_software_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv50_software_chan *pch;
-
- pch = kzalloc(sizeof(*pch), GFP_KERNEL);
- if (!pch)
- return -ENOMEM;
-
- nouveau_software_context_new(chan, &pch->base);
- pch->base.vblank.channel = chan->ramin->addr >> 12;
- chan->engctx[engine] = pch;
- return 0;
-}
-
-static void
-nv50_software_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv50_software_chan *pch = chan->engctx[engine];
- chan->engctx[engine] = NULL;
- kfree(pch);
-}
-
-static int
-nv50_software_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, 0, &obj);
- if (ret)
- return ret;
- obj->engine = 0;
- obj->class = class;
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv50_software_init(struct drm_device *dev, int engine)
-{
- return 0;
-}
-
-static int
-nv50_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
- return 0;
-}
-
-static void
-nv50_software_destroy(struct drm_device *dev, int engine)
-{
- struct nv50_software_priv *psw = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, SW);
- kfree(psw);
-}
-
-int
-nv50_software_create(struct drm_device *dev)
-{
- struct nv50_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
- if (!psw)
- return -ENOMEM;
-
- psw->base.base.destroy = nv50_software_destroy;
- psw->base.base.init = nv50_software_init;
- psw->base.base.fini = nv50_software_fini;
- psw->base.base.context_new = nv50_software_context_new;
- psw->base.base.context_del = nv50_software_context_del;
- psw->base.base.object_new = nv50_software_object_new;
- nouveau_software_create(&psw->base);
-
- NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
- NVOBJ_CLASS(dev, 0x506e, SW);
- NVOBJ_MTHD (dev, 0x506e, 0x018c, mthd_dma_vblsem);
- NVOBJ_MTHD (dev, 0x506e, 0x0400, mthd_vblsem_offset);
- NVOBJ_MTHD (dev, 0x506e, 0x0404, mthd_vblsem_value);
- NVOBJ_MTHD (dev, 0x506e, 0x0408, mthd_vblsem_release);
- NVOBJ_MTHD (dev, 0x506e, 0x0500, mthd_flip);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 5ef87edb878d..b0d147a675c4 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -22,13 +22,14 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include <core/object.h>
+#include <core/class.h>
+
#include <engine/fifo.h>
-#include <core/ramht.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
#include "nouveau_fence.h"
-#include "nv50_display.h"
struct nv84_fence_chan {
struct nouveau_fence_chan base;
@@ -43,13 +44,14 @@ static int
nv84_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, NvSema);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(chan->id * 16));
- OUT_RING (chan, lower_32_bits(chan->id * 16));
+ OUT_RING (chan, upper_32_bits(fifo->chid * 16));
+ OUT_RING (chan, lower_32_bits(fifo->chid * 16));
OUT_RING (chan, fence->sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
FIRE_RING (chan);
@@ -62,13 +64,14 @@ static int
nv84_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
+ struct nouveau_fifo_chan *fifo = (void *)prev->object;
int ret = RING_SPACE(chan, 7);
if (ret == 0) {
BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
OUT_RING (chan, NvSema);
BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
- OUT_RING (chan, upper_32_bits(prev->id * 16));
- OUT_RING (chan, lower_32_bits(prev->id * 16));
+ OUT_RING (chan, upper_32_bits(fifo->chid * 16));
+ OUT_RING (chan, lower_32_bits(fifo->chid * 16));
OUT_RING (chan, fence->sequence);
OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
FIRE_RING (chan);
@@ -79,9 +82,9 @@ nv84_fence_sync(struct nouveau_fence *fence,
static u32
nv84_fence_read(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nv84_fence_priv *priv = dev_priv->fence.func;
- return nv_ro32(priv->mem, chan->id * 16);
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nv84_fence_priv *priv = chan->drm->fence;
+ return nv_ro32(priv->mem, fifo->chid * 16);
}
static void
@@ -96,10 +99,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
static int
nv84_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nv84_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nv84_fence_priv *priv = chan->drm->fence;
struct nv84_fence_chan *fctx;
- struct nouveau_gpuobj *obj;
+ struct nouveau_object *object;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -108,58 +111,56 @@ nv84_fence_context_new(struct nouveau_channel *chan)
nouveau_fence_context_new(&fctx->base);
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
- priv->mem->addr, priv->mem->size,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret == 0) {
- ret = nouveau_ramht_insert(chan, NvSema, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- nv_wo32(priv->mem, chan->id * 16, 0x00000000);
- }
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvSema, 0x0002,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = priv->mem->addr,
+ .limit = priv->mem->addr +
+ priv->mem->size - 1,
+ }, sizeof(struct nv_dma_class),
+ &object);
/* dma objects for display sync channel semaphore blocks */
- for (i = 0; i < chan->dev->mode_config.num_crtc; i++) {
- struct nv50_display *pdisp = nv50_display(chan->dev);
- struct nv50_display_crtc *dispc = &pdisp->crtc[i];
- struct nouveau_gpuobj *obj = NULL;
-
- ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- dispc->sem.bo->bo.offset, 0x1000,
- NV_MEM_ACCESS_RW,
- NV_MEM_TARGET_VRAM, &obj);
- if (ret)
- break;
-
- ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, obj);
- nouveau_gpuobj_ref(NULL, &obj);
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nv50sema(chan->drm->dev, i);
+
+ ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
+ NvEvoSema0 + i, 0x003d,
+ &(struct nv_dma_class) {
+ .flags = NV_DMA_TARGET_VRAM |
+ NV_DMA_ACCESS_RDWR,
+ .start = bo->bo.offset,
+ .limit = bo->bo.offset + 0xfff,
+ }, sizeof(struct nv_dma_class),
+ &object);
}
if (ret)
nv84_fence_context_del(chan);
+ nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
return ret;
}
static void
-nv84_fence_destroy(struct drm_device *dev)
+nv84_fence_destroy(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fence_priv *priv = dev_priv->fence.func;
-
+ struct nv84_fence_priv *priv = drm->fence;
nouveau_gpuobj_ref(NULL, &priv->mem);
- dev_priv->fence.func = NULL;
+ drm->fence = NULL;
kfree(priv);
}
int
-nv84_fence_create(struct drm_device *dev)
+nv84_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nv84_fence_priv *priv;
+ u32 chan = pfifo->max + 1;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -169,15 +170,10 @@ nv84_fence_create(struct drm_device *dev)
priv->base.emit = nv84_fence_emit;
priv->base.sync = nv84_fence_sync;
priv->base.read = nv84_fence_read;
- dev_priv->fence.func = priv;
-
- ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
- 0x1000, 0, &priv->mem);
- if (ret)
- goto out;
-out:
+ ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0,
+ &priv->mem);
if (ret)
- nv84_fence_destroy(dev);
+ nv84_fence_destroy(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index db26e050c73c..cc88f3649909 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -22,20 +22,16 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
+#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include <core/ramht.h>
#include "nouveau_fbcon.h"
-#include <core/mm.h>
int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
@@ -69,9 +65,8 @@ int
nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
int ret;
ret = RING_SPACE(chan, 12);
@@ -98,9 +93,8 @@ int
nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
+ struct nouveau_drm *drm = nouveau_newpriv(nfbdev->dev);
+ struct nouveau_channel *chan = drm->channel;
uint32_t width, dwords, *data = (uint32_t *)image->data;
uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
uint32_t *palette = info->pseudo_palette;
@@ -157,12 +151,14 @@ nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = dev_priv->channel;
struct nouveau_framebuffer *fb = &nfbdev->nouveau_fb;
+ struct nouveau_drm *drm = nouveau_newpriv(dev);
+ struct nouveau_channel *chan = drm->channel;
+ struct nouveau_object *object;
int ret, format;
- ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+ ret = nouveau_object_new(nv_object(chan->cli), NVDRM_CHAN, Nv2D,
+ 0x902d, NULL, 0, &object);
if (ret)
return ret;
@@ -202,9 +198,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
BEGIN_NVC0(chan, NvSub2D, 0x0000, 1);
OUT_RING (chan, 0x0000902d);
- BEGIN_NVC0(chan, NvSub2D, 0x0104, 2);
- OUT_RING (chan, upper_32_bits(chan->notifier_vma.offset));
- OUT_RING (chan, lower_32_bits(chan->notifier_vma.offset));
BEGIN_NVC0(chan, NvSub2D, 0x0290, 1);
OUT_RING (chan, 0);
BEGIN_NVC0(chan, NvSub2D, 0x0888, 1);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 779c5ff4ed70..ce612ad398ad 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -22,13 +22,15 @@
* Authors: Ben Skeggs
*/
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include <core/object.h>
+#include <core/client.h>
+#include <core/class.h>
+
#include <engine/fifo.h>
-#include <core/ramht.h>
+
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
#include "nouveau_fence.h"
-#include "nv50_display.h"
struct nvc0_fence_priv {
struct nouveau_fence_priv base;
@@ -54,7 +56,8 @@ nvc0_fence_emit(struct nouveau_fence *fence)
{
struct nouveau_channel *chan = fence->channel;
struct nvc0_fence_chan *fctx = chan->fence;
- u64 addr = fctx->vma.offset + chan->id * 16;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ u64 addr = fctx->vma.offset + fifo->chid * 16;
int ret;
ret = RING_SPACE(chan, 5);
@@ -75,7 +78,8 @@ nvc0_fence_sync(struct nouveau_fence *fence,
struct nouveau_channel *prev, struct nouveau_channel *chan)
{
struct nvc0_fence_chan *fctx = chan->fence;
- u64 addr = fctx->vma.offset + prev->id * 16;
+ struct nouveau_fifo_chan *fifo = (void *)prev->object;
+ u64 addr = fctx->vma.offset + fifo->chid * 16;
int ret;
ret = RING_SPACE(chan, 5);
@@ -95,31 +99,29 @@ nvc0_fence_sync(struct nouveau_fence *fence,
static u32
nvc0_fence_read(struct nouveau_channel *chan)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
- return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nvc0_fence_priv *priv = chan->drm->fence;
+ return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
}
static void
nvc0_fence_context_del(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
+ struct drm_device *dev = chan->drm->dev;
+ struct nvc0_fence_priv *priv = chan->drm->fence;
struct nvc0_fence_chan *fctx = chan->fence;
int i;
- if (dev_priv->card_type >= NV_D0) {
+ if (nv_device(chan->drm->device)->card_type >= NV_D0) {
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ struct nouveau_bo *bo = nvd0sema(dev, i);
nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
} else
- if (dev_priv->card_type >= NV_50) {
- struct nv50_display *disp = nv50_display(dev);
+ if (nv_device(chan->drm->device)->card_type >= NV_50) {
for (i = 0; i < dev->mode_config.num_crtc; i++) {
- struct nv50_display_crtc *dispc = &disp->crtc[i];
- nouveau_bo_vma_del(dispc->sem.bo, &fctx->dispc_vma[i]);
+ struct nouveau_bo *bo = nv50sema(dev, i);
+ nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
}
}
@@ -132,9 +134,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
static int
nvc0_fence_context_new(struct nouveau_channel *chan)
{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fifo_chan *fifo = (void *)chan->object;
+ struct nouveau_client *client = nouveau_client(fifo);
+ struct nvc0_fence_priv *priv = chan->drm->fence;
struct nvc0_fence_chan *fctx;
int ret, i;
@@ -144,36 +146,35 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
nouveau_fence_context_new(&fctx->base);
- ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
+ ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
if (ret)
nvc0_fence_context_del(chan);
/* map display semaphore buffers into channel's vm */
- for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
+ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo;
- if (dev_priv->card_type >= NV_D0)
- bo = nvd0_display_crtc_sema(dev, i);
+ if (nv_device(chan->drm->device)->card_type >= NV_D0)
+ bo = nvd0sema(chan->drm->dev, i);
else
- bo = nv50_display(dev)->crtc[i].sem.bo;
+ bo = nv50sema(chan->drm->dev, i);
- ret = nouveau_bo_vma_add(bo, chan->vm, &fctx->dispc_vma[i]);
+ ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
}
- nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
+ nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
return ret;
}
static bool
-nvc0_fence_suspend(struct drm_device *dev)
+nvc0_fence_suspend(struct nouveau_drm *drm)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nvc0_fence_priv *priv = drm->fence;
int i;
- priv->suspend = vmalloc(pfifo->channels * sizeof(u32));
+ priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
if (priv->suspend) {
- for (i = 0; i < pfifo->channels; i++)
+ for (i = 0; i <= pfifo->max; i++)
priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
}
@@ -181,15 +182,14 @@ nvc0_fence_suspend(struct drm_device *dev)
}
static void
-nvc0_fence_resume(struct drm_device *dev)
+nvc0_fence_resume(struct nouveau_drm *drm)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
+ struct nvc0_fence_priv *priv = drm->fence;
int i;
if (priv->suspend) {
- for (i = 0; i < pfifo->channels; i++)
+ for (i = 0; i <= pfifo->max; i++)
nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
vfree(priv->suspend);
priv->suspend = NULL;
@@ -197,26 +197,23 @@ nvc0_fence_resume(struct drm_device *dev)
}
static void
-nvc0_fence_destroy(struct drm_device *dev)
+nvc0_fence_destroy(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fence_priv *priv = dev_priv->fence.func;
-
+ struct nvc0_fence_priv *priv = drm->fence;
nouveau_bo_unmap(priv->bo);
nouveau_bo_ref(NULL, &priv->bo);
- dev_priv->fence.func = NULL;
+ drm->fence = NULL;
kfree(priv);
}
int
-nvc0_fence_create(struct drm_device *dev)
+nvc0_fence_create(struct nouveau_drm *drm)
{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
struct nvc0_fence_priv *priv;
int ret;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -228,10 +225,9 @@ nvc0_fence_create(struct drm_device *dev)
priv->base.emit = nvc0_fence_emit;
priv->base.sync = nvc0_fence_sync;
priv->base.read = nvc0_fence_read;
- dev_priv->fence.func = priv;
- ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
- 0, 0, NULL, &priv->bo);
+ ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
+ TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
if (ret == 0) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
if (ret == 0)
@@ -241,6 +237,6 @@ nvc0_fence_create(struct drm_device *dev)
}
if (ret)
- nvc0_fence_destroy(dev);
+ nvc0_fence_destroy(drm);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_software.c b/drivers/gpu/drm/nouveau/nvc0_software.c
deleted file mode 100644
index eaaa5768f4f7..000000000000
--- a/drivers/gpu/drm/nouveau/nvc0_software.c
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include <core/ramht.h>
-#include "nouveau_software.h"
-
-#include "nv50_display.h"
-
-struct nvc0_software_priv {
- struct nouveau_software_priv base;
-};
-
-struct nvc0_software_chan {
- struct nouveau_software_chan base;
-};
-
-static int
-nvc0_software_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_software_chan *pch;
-
- pch = kzalloc(sizeof(*pch), GFP_KERNEL);
- if (!pch)
- return -ENOMEM;
-
- nouveau_software_context_new(chan, &pch->base);
- chan->engctx[engine] = pch;
- return 0;
-}
-
-static void
-nvc0_software_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_software_chan *pch = chan->engctx[engine];
- chan->engctx[engine] = NULL;
- kfree(pch);
-}
-
-static int
-nvc0_software_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- return 0;
-}
-
-static int
-nvc0_software_init(struct drm_device *dev, int engine)
-{
- return 0;
-}
-
-static int
-nvc0_software_fini(struct drm_device *dev, int engine, bool suspend)
-{
- return 0;
-}
-
-static void
-nvc0_software_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_software_priv *psw = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, SW);
- kfree(psw);
-}
-
-int
-nvc0_software_create(struct drm_device *dev)
-{
- struct nvc0_software_priv *psw = kzalloc(sizeof(*psw), GFP_KERNEL);
- if (!psw)
- return -ENOMEM;
-
- psw->base.base.destroy = nvc0_software_destroy;
- psw->base.base.init = nvc0_software_init;
- psw->base.base.fini = nvc0_software_fini;
- psw->base.base.context_new = nvc0_software_context_new;
- psw->base.base.context_del = nvc0_software_context_del;
- psw->base.base.object_new = nvc0_software_object_new;
- nouveau_software_create(&psw->base);
-
- NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
- NVOBJ_CLASS(dev, 0x906e, SW);
- return 0;
-}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 2da4927b5e06..37b3f3f071d9 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -31,7 +31,6 @@
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include "nouveau_dma.h"
#include "nouveau_fb.h"
#include "nouveau_fence.h"
#include "nv50_display.h"
@@ -1830,15 +1829,7 @@ nvd0_display_intr(struct drm_device *dev)
intr &= ~0x00100000;
}
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- u32 mask = 0x01000000 << i;
- if (intr & mask) {
- u32 stat = nv_rd32(dev, 0x6100bc + (i * 0x800));
- nv_wr32(dev, 0x6100bc + (i * 0x800), stat);
- intr &= ~mask;
- }
- }
-
+ intr &= ~0x0f000000; /* vblank, handled in core */
if (intr)
NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
}