diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2015-08-20 14:54:07 +1000 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2015-08-28 12:40:11 +1000 |
commit | 6189f1b0938dc0621c27494031b83ffae566e318 (patch) | |
tree | 5f85dff90808cbabf293eeef4d8e78be565eaa95 | |
parent | a317aa21be51f869d5ab0198fad94a4570af9ddb (diff) | |
download | linux-stable-6189f1b0938dc0621c27494031b83ffae566e318.tar.gz linux-stable-6189f1b0938dc0621c27494031b83ffae566e318.tar.bz2 linux-stable-6189f1b0938dc0621c27494031b83ffae566e318.zip |
drm/nouveau/fifo: cosmetic changes
This is purely preparation for upcoming commits, there should be no
code changes here.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
25 files changed, 800 insertions, 788 deletions
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 9100b800562e..3e77c924434b 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -68,7 +68,7 @@ struct nvkm_fifo_base { #include <core/event.h> struct nvkm_fifo { - struct nvkm_engine base; + struct nvkm_engine engine; struct nvkm_event cevent; /* channel creation event */ struct nvkm_event uevent; /* async user trigger */ @@ -92,9 +92,9 @@ nvkm_fifo(void *obj) #define nvkm_fifo_create(o,e,c,fc,lc,d) \ nvkm_fifo_create_((o), (e), (c), (fc), (lc), sizeof(**d), (void **)d) #define nvkm_fifo_init(p) \ - nvkm_engine_init(&(p)->base) + nvkm_engine_init(&(p)->engine) #define nvkm_fifo_fini(p,s) \ - nvkm_engine_fini(&(p)->base, (s)) + nvkm_engine_fini(&(p)->engine, (s)) int nvkm_fifo_create_(struct nvkm_object *, struct nvkm_object *, struct nvkm_oclass *, int min, int max, diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index a03db4368696..76098a58e2fa 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -213,7 +213,7 @@ nv84_fence_destroy(struct nouveau_drm *drm) int nv84_fence_create(struct nouveau_drm *drm) { - struct nvkm_fifo *pfifo = nvxx_fifo(&drm->device); + struct nvkm_fifo *fifo = nvxx_fifo(&drm->device); struct nv84_fence_priv *priv; u32 domain; int ret; @@ -228,7 +228,7 @@ nv84_fence_create(struct nouveau_drm *drm) priv->base.context_new = nv84_fence_context_new; priv->base.context_del = nv84_fence_context_del; - priv->base.contexts = pfifo->max + 1; + priv->base.contexts = fifo->max + 1; priv->base.context_base = fence_context_alloc(priv->base.contexts); priv->base.uevent = true; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c index 9addf43e07d4..a8fff0e13e19 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gt215.c @@ -72,7 +72,7 @@ gt215_ce_isr_error_name[] = { void gt215_ce_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_falcon *falcon = (void *)subdev; struct nvkm_object *engctx; @@ -87,7 +87,7 @@ gt215_ce_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & 0x00000040) { nv_error(falcon, "DISPATCH_ERROR ["); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c index c1f065d8efa4..442c2a002c63 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c @@ -104,7 +104,7 @@ g84_cipher_intr_mask[] = { static void g84_cipher_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct nvkm_engine *cipher = (void *)subdev; @@ -115,7 +115,7 @@ g84_cipher_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat) { nv_error(cipher, "%s", ""); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c index a85014bb37a7..2b0f497d57a4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c @@ -58,7 +58,7 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent, u64 engmask, int len, void **ptr) { struct nvkm_device *device = nv_device(engine); - struct nvkm_fifo *priv = (void *)engine; + struct nvkm_fifo *fifo = (void *)engine; struct nvkm_fifo_chan *chan; struct nvkm_dmaeng *dmaeng; unsigned long flags; @@ -90,39 +90,39 @@ nvkm_fifo_channel_create_(struct nvkm_object *parent, return ret; /* find a free fifo channel */ - spin_lock_irqsave(&priv->lock, flags); - for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { - if (!priv->channel[chan->chid]) { - priv->channel[chan->chid] = nv_object(chan); + spin_lock_irqsave(&fifo->lock, flags); + for (chan->chid = fifo->min; chan->chid < fifo->max; chan->chid++) { + if (!fifo->channel[chan->chid]) { + fifo->channel[chan->chid] = nv_object(chan); break; } } - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&fifo->lock, flags); - if (chan->chid == priv->max) { - nv_error(priv, "no free channels\n"); + if (chan->chid == fifo->max) { + nv_error(fifo, "no free channels\n"); return -ENOSPC; } chan->addr = nv_device_resource_start(device, bar) + addr + size * chan->chid; chan->size = size; - nvkm_event_send(&priv->cevent, 1, 0, NULL, 0); + nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0); return 0; } void nvkm_fifo_channel_destroy(struct nvkm_fifo_chan *chan) { - struct nvkm_fifo *priv = (void *)nv_object(chan)->engine; + struct nvkm_fifo *fifo = (void *)nv_object(chan)->engine; unsigned long flags; if (chan->user) iounmap(chan->user); - spin_lock_irqsave(&priv->lock, flags); - priv->channel[chan->chid] = NULL; - spin_unlock_irqrestore(&priv->lock, flags); + spin_lock_irqsave(&fifo->lock, flags); + fifo->channel[chan->chid] = NULL; + spin_unlock_irqrestore(&fifo->lock, flags); nvkm_gpuobj_ref(NULL, &chan->pushgpu); nvkm_object_ref(NULL, (struct nvkm_object **)&chan->pushdma); @@ -214,9 +214,9 @@ _nvkm_fifo_channel_ntfy(struct nvkm_object *object, u32 type, } static int -nvkm_fifo_chid(struct nvkm_fifo *priv, struct nvkm_object *object) +nvkm_fifo_chid(struct nvkm_fifo *fifo, struct nvkm_object *object) { - int engidx = nv_hclass(priv) & 0xff; + int engidx = nv_hclass(fifo) & 0xff; while (object && object->parent) { if ( nv_iclass(object->parent, NV_ENGCTX_CLASS) && @@ -243,12 +243,12 @@ nvkm_client_name_for_fifo_chid(struct nvkm_fifo *fifo, u32 chid) } void -nvkm_fifo_destroy(struct nvkm_fifo *priv) +nvkm_fifo_destroy(struct nvkm_fifo *fifo) { - kfree(priv->channel); - nvkm_event_fini(&priv->uevent); - nvkm_event_fini(&priv->cevent); - nvkm_engine_destroy(&priv->base); + kfree(fifo->channel); + nvkm_event_fini(&fifo->uevent); + nvkm_event_fini(&fifo->cevent); + nvkm_engine_destroy(&fifo->engine); } int @@ -256,26 +256,26 @@ nvkm_fifo_create_(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, int min, int max, int length, void **pobject) { - struct nvkm_fifo *priv; + struct nvkm_fifo *fifo; int ret; ret = nvkm_engine_create_(parent, engine, oclass, true, "PFIFO", "fifo", length, pobject); - priv = *pobject; + fifo = *pobject; if (ret) return ret; - priv->min = min; - priv->max = max; - priv->channel = kzalloc(sizeof(*priv->channel) * (max + 1), GFP_KERNEL); - if (!priv->channel) + fifo->min = min; + fifo->max = max; + fifo->channel = kzalloc(sizeof(*fifo->channel) * (max + 1), GFP_KERNEL); + if (!fifo->channel) return -ENOMEM; - ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &priv->cevent); + ret = nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent); if (ret) return ret; - priv->chid = nvkm_fifo_chid; - spin_lock_init(&priv->lock); + fifo->chid = nvkm_fifo_chid; + spin_lock_init(&fifo->lock); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c index a04920b3cf84..bff5867e24ce 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/g84.c @@ -81,7 +81,7 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); - struct nv50_fifo_priv *priv = (void *)parent->engine; + struct nv50_fifo *fifo = (void *)parent->engine; struct nv50_fifo_base *base = (void *)parent->parent; struct nv50_fifo_chan *chan = (void *)parent; u32 addr, save, engn; @@ -103,12 +103,12 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); - nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); - done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); - nv_wr32(priv, 0x002520, save); + save = nv_mask(fifo, 0x002520, 0x0000003f, 1 << engn); + nv_wr32(fifo, 0x0032fc, nv_gpuobj(base)->addr >> 12); + done = nv_wait_ne(fifo, 0x0032fc, 0xffffffff, 0xffffffff); + nv_wr32(fifo, 0x002520, save); if (!done) { - nv_error(priv, "channel %d [%s] unload timeout\n", + nv_error(fifo, "channel %d [%s] unload timeout\n", chan->base.chid, nvkm_client_name(chan)); if (suspend) return -EBUSY; @@ -309,7 +309,7 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine, static int g84_fifo_chan_init(struct nvkm_object *object) { - struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo *fifo = (void *)object->engine; struct nv50_fifo_base *base = (void *)object->parent; struct nv50_fifo_chan *chan = (void *)object; struct nvkm_gpuobj *ramfc = base->ramfc; @@ -320,8 +320,8 @@ g84_fifo_chan_init(struct nvkm_object *object) if (ret) return ret; - nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8); - nv50_fifo_playlist_update(priv); + nv_wr32(fifo, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8); + nv50_fifo_playlist_update(fifo); return 0; } @@ -444,34 +444,34 @@ g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { - struct nv50_fifo_priv *priv; + struct nv50_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, - &priv->playlist[0]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, + &fifo->playlist[0]); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, - &priv->playlist[1]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, + &fifo->playlist[1]); if (ret) return ret; - ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &priv->base.uevent); + ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &fifo->base.uevent); if (ret) return ret; - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &g84_fifo_cclass; - nv_engine(priv)->sclass = g84_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &g84_fifo_cclass; + nv_engine(fifo)->sclass = g84_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c index b745252f2261..0a7971a3317c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c @@ -35,7 +35,7 @@ #include <nvif/class.h> #include <nvif/unpack.h> -struct gf100_fifo_priv { +struct gf100_fifo { struct nvkm_fifo base; struct work_struct fault; @@ -74,18 +74,18 @@ struct gf100_fifo_chan { ******************************************************************************/ static void -gf100_fifo_runlist_update(struct gf100_fifo_priv *priv) +gf100_fifo_runlist_update(struct gf100_fifo *fifo) { - struct nvkm_bar *bar = nvkm_bar(priv); + struct nvkm_bar *bar = nvkm_bar(fifo); struct nvkm_gpuobj *cur; int i, p; - mutex_lock(&nv_subdev(priv)->mutex); - cur = priv->runlist.mem[priv->runlist.active]; - priv->runlist.active = !priv->runlist.active; + mutex_lock(&nv_subdev(fifo)->mutex); + cur = fifo->runlist.mem[fifo->runlist.active]; + fifo->runlist.active = !fifo->runlist.active; for (i = 0, p = 0; i < 128; i++) { - struct gf100_fifo_chan *chan = (void *)priv->base.channel[i]; + struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i]; if (chan && chan->state == RUNNING) { nv_wo32(cur, p + 0, i); nv_wo32(cur, p + 4, 0x00000004); @@ -94,14 +94,14 @@ gf100_fifo_runlist_update(struct gf100_fifo_priv *priv) } bar->flush(bar); - nv_wr32(priv, 0x002270, cur->addr >> 12); - nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); + nv_wr32(fifo, 0x002270, cur->addr >> 12); + nv_wr32(fifo, 0x002274, 0x01f00000 | (p >> 3)); - if (wait_event_timeout(priv->runlist.wait, - !(nv_rd32(priv, 0x00227c) & 0x00100000), + if (wait_event_timeout(fifo->runlist.wait, + !(nv_rd32(fifo, 0x00227c) & 0x00100000), msecs_to_jiffies(2000)) == 0) - nv_error(priv, "runlist update timeout\n"); - mutex_unlock(&nv_subdev(priv)->mutex); + nv_error(fifo, "runlist update timeout\n"); + mutex_unlock(&nv_subdev(fifo)->mutex); } static int @@ -146,7 +146,7 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); - struct gf100_fifo_priv *priv = (void *)parent->engine; + struct gf100_fifo *fifo = (void *)parent->engine; struct gf100_fifo_base *base = (void *)parent->parent; struct gf100_fifo_chan *chan = (void *)parent; u32 addr; @@ -163,9 +163,9 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - nv_wr32(priv, 0x002634, chan->base.chid); - if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { - nv_error(priv, "channel %d [%s] kick timeout\n", + nv_wr32(fifo, 0x002634, chan->base.chid); + if (!nv_wait(fifo, 0x002634, 0xffffffff, chan->base.chid)) { + nv_error(fifo, "channel %d [%s] kick timeout\n", chan->base.chid, nvkm_client_name(chan)); if (suspend) return -EBUSY; @@ -186,7 +186,7 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nv50_channel_gpfifo_v0 v0; } *args = data; struct nvkm_bar *bar = nvkm_bar(parent); - struct gf100_fifo_priv *priv = (void *)engine; + struct gf100_fifo *fifo = (void *)engine; struct gf100_fifo_base *base = (void *)parent; struct gf100_fifo_chan *chan; u64 usermem, ioffset, ilength; @@ -202,7 +202,7 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, return ret; ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, - priv->user.bar.offset, 0x1000, + fifo->user.bar.offset, 0x1000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR) | @@ -225,10 +225,10 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ilength = order_base_2(args->v0.ilength / 8); for (i = 0; i < 0x1000; i += 4) - nv_wo32(priv->user.mem, usermem + i, 0x00000000); + nv_wo32(fifo->user.mem, usermem + i, 0x00000000); - nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); - nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); + nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); nv_wo32(base, 0x10, 0x0000face); nv_wo32(base, 0x30, 0xfffff902); nv_wo32(base, 0x48, lower_32_bits(ioffset)); @@ -251,7 +251,7 @@ static int gf100_fifo_chan_init(struct nvkm_object *object) { struct nvkm_gpuobj *base = nv_gpuobj(object->parent); - struct gf100_fifo_priv *priv = (void *)object->engine; + struct gf100_fifo *fifo = (void *)object->engine; struct gf100_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; int ret; @@ -260,33 +260,33 @@ gf100_fifo_chan_init(struct nvkm_object *object) if (ret) return ret; - nv_wr32(priv, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); + nv_wr32(fifo, 0x003000 + (chid * 8), 0xc0000000 | base->addr >> 12); if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { - nv_wr32(priv, 0x003004 + (chid * 8), 0x001f0001); - gf100_fifo_runlist_update(priv); + nv_wr32(fifo, 0x003004 + (chid * 8), 0x001f0001); + gf100_fifo_runlist_update(fifo); } return 0; } -static void gf100_fifo_intr_engine(struct gf100_fifo_priv *priv); +static void gf100_fifo_intr_engine(struct gf100_fifo *fifo); static int gf100_fifo_chan_fini(struct nvkm_object *object, bool suspend) { - struct gf100_fifo_priv *priv = (void *)object->engine; + struct gf100_fifo *fifo = (void *)object->engine; struct gf100_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { - nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); - gf100_fifo_runlist_update(priv); + nv_mask(fifo, 0x003004 + (chid * 8), 0x00000001, 0x00000000); + gf100_fifo_runlist_update(fifo); } - gf100_fifo_intr_engine(priv); + gf100_fifo_intr_engine(fifo); - nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); + nv_wr32(fifo, 0x003000 + (chid * 8), 0x00000000); return nvkm_fifo_channel_fini(&chan->base, suspend); } @@ -371,7 +371,7 @@ gf100_fifo_cclass = { ******************************************************************************/ static inline int -gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn) +gf100_fifo_engidx(struct gf100_fifo *fifo, u32 engn) { switch (engn) { case NVDEV_ENGINE_GR : engn = 0; break; @@ -388,7 +388,7 @@ gf100_fifo_engidx(struct gf100_fifo_priv *priv, u32 engn) } static inline struct nvkm_engine * -gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn) +gf100_fifo_engine(struct gf100_fifo *fifo, u32 engn) { switch (engn) { case 0: engn = NVDEV_ENGINE_GR; break; @@ -401,69 +401,69 @@ gf100_fifo_engine(struct gf100_fifo_priv *priv, u32 engn) return NULL; } - return nvkm_engine(priv, engn); + return nvkm_engine(fifo, engn); } static void gf100_fifo_recover_work(struct work_struct *work) { - struct gf100_fifo_priv *priv = container_of(work, typeof(*priv), fault); + struct gf100_fifo *fifo = container_of(work, typeof(*fifo), fault); struct nvkm_object *engine; unsigned long flags; u32 engn, engm = 0; u64 mask, todo; - spin_lock_irqsave(&priv->base.lock, flags); - mask = priv->mask; - priv->mask = 0ULL; - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); + mask = fifo->mask; + fifo->mask = 0ULL; + spin_unlock_irqrestore(&fifo->base.lock, flags); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) - engm |= 1 << gf100_fifo_engidx(priv, engn); - nv_mask(priv, 0x002630, engm, engm); + engm |= 1 << gf100_fifo_engidx(fifo, engn); + nv_mask(fifo, 0x002630, engm, engm); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { - if ((engine = (void *)nvkm_engine(priv, engn))) { + if ((engine = (void *)nvkm_engine(fifo, engn))) { nv_ofuncs(engine)->fini(engine, false); WARN_ON(nv_ofuncs(engine)->init(engine)); } } - gf100_fifo_runlist_update(priv); - nv_wr32(priv, 0x00262c, engm); - nv_mask(priv, 0x002630, engm, 0x00000000); + gf100_fifo_runlist_update(fifo); + nv_wr32(fifo, 0x00262c, engm); + nv_mask(fifo, 0x002630, engm, 0x00000000); } static void -gf100_fifo_recover(struct gf100_fifo_priv *priv, struct nvkm_engine *engine, +gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine, struct gf100_fifo_chan *chan) { u32 chid = chan->base.chid; unsigned long flags; - nv_error(priv, "%s engine fault on channel %d, recovering...\n", + nv_error(fifo, "%s engine fault on channel %d, recovering...\n", nv_subdev(engine)->name, chid); - nv_mask(priv, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); + nv_mask(fifo, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000); chan->state = KILLED; - spin_lock_irqsave(&priv->base.lock, flags); - priv->mask |= 1ULL << nv_engidx(engine); - spin_unlock_irqrestore(&priv->base.lock, flags); - schedule_work(&priv->fault); + spin_lock_irqsave(&fifo->base.lock, flags); + fifo->mask |= 1ULL << nv_engidx(engine); + spin_unlock_irqrestore(&fifo->base.lock, flags); + schedule_work(&fifo->fault); } static int -gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data) +gf100_fifo_swmthd(struct gf100_fifo *fifo, u32 chid, u32 mthd, u32 data) { struct gf100_fifo_chan *chan = NULL; struct nvkm_handle *bind; unsigned long flags; int ret = -EINVAL; - spin_lock_irqsave(&priv->base.lock, flags); - if (likely(chid >= priv->base.min && chid <= priv->base.max)) - chan = (void *)priv->base.channel[chid]; + spin_lock_irqsave(&fifo->base.lock, flags); + if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) + chan = (void *)fifo->base.channel[chid]; if (unlikely(!chan)) goto out; @@ -475,7 +475,7 @@ gf100_fifo_swmthd(struct gf100_fifo_priv *priv, u32 chid, u32 mthd, u32 data) } out: - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_unlock_irqrestore(&fifo->base.lock, flags); return ret; } @@ -486,14 +486,14 @@ gf100_fifo_sched_reason[] = { }; static void -gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv) +gf100_fifo_intr_sched_ctxsw(struct gf100_fifo *fifo) { struct nvkm_engine *engine; struct gf100_fifo_chan *chan; u32 engn; for (engn = 0; engn < 6; engn++) { - u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); + u32 stat = nv_rd32(fifo, 0x002640 + (engn * 0x04)); u32 busy = (stat & 0x80000000); u32 save = (stat & 0x00100000); /* maybe? */ u32 unk0 = (stat & 0x00040000); @@ -502,19 +502,19 @@ gf100_fifo_intr_sched_ctxsw(struct gf100_fifo_priv *priv) (void)save; if (busy && unk0 && unk1) { - if (!(chan = (void *)priv->base.channel[chid])) + if (!(chan = (void *)fifo->base.channel[chid])) continue; - if (!(engine = gf100_fifo_engine(priv, engn))) + if (!(engine = gf100_fifo_engine(fifo, engn))) continue; - gf100_fifo_recover(priv, engine, chan); + gf100_fifo_recover(fifo, engine, chan); } } } static void -gf100_fifo_intr_sched(struct gf100_fifo_priv *priv) +gf100_fifo_intr_sched(struct gf100_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x00254c); + u32 intr = nv_rd32(fifo, 0x00254c); u32 code = intr & 0x000000ff; const struct nvkm_enum *en; char enunk[6] = ""; @@ -523,11 +523,11 @@ gf100_fifo_intr_sched(struct gf100_fifo_priv *priv) if (!en) snprintf(enunk, sizeof(enunk), "UNK%02x", code); - nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); + nv_error(fifo, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); switch (code) { case 0x0a: - gf100_fifo_intr_sched_ctxsw(priv); + gf100_fifo_intr_sched_ctxsw(fifo); break; default: break; @@ -594,12 +594,12 @@ gf100_fifo_fault_gpcclient[] = { }; static void -gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit) +gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit) { - u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); - u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); - u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); - u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); + u32 inst = nv_rd32(fifo, 0x002800 + (unit * 0x10)); + u32 valo = nv_rd32(fifo, 0x002804 + (unit * 0x10)); + u32 vahi = nv_rd32(fifo, 0x002808 + (unit * 0x10)); + u32 stat = nv_rd32(fifo, 0x00280c + (unit * 0x10)); u32 gpc = (stat & 0x1f000000) >> 24; u32 client = (stat & 0x00001f00) >> 8; u32 write = (stat & 0x00000080); @@ -621,16 +621,16 @@ gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit) if (eu) { switch (eu->data2) { case NVDEV_SUBDEV_BAR: - nv_mask(priv, 0x001704, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001704, 0x00000000, 0x00000000); break; case NVDEV_SUBDEV_INSTMEM: - nv_mask(priv, 0x001714, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001714, 0x00000000, 0x00000000); break; case NVDEV_ENGINE_IFB: - nv_mask(priv, 0x001718, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_engine(priv, eu->data2); + engine = nvkm_engine(fifo, eu->data2); if (engine) engctx = nvkm_engctx_get(engine, inst); break; @@ -649,7 +649,7 @@ gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit) if (!ec) snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); - nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " + nv_error(fifo, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " "channel 0x%010llx [%s]\n", write ? "write" : "read", (u64)vahi << 32 | valo, er ? er->name : erunk, eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", @@ -660,7 +660,7 @@ gf100_fifo_intr_fault(struct gf100_fifo_priv *priv, int unit) while (object) { switch (nv_mclass(object)) { case FERMI_CHANNEL_GPFIFO: - gf100_fifo_recover(priv, engine, (void *)object); + gf100_fifo_recover(fifo, engine, (void *)object); break; } object = object->parent; @@ -678,82 +678,82 @@ gf100_fifo_pbdma_intr[] = { }; static void -gf100_fifo_intr_pbdma(struct gf100_fifo_priv *priv, int unit) +gf100_fifo_intr_pbdma(struct gf100_fifo *fifo, int unit) { - u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); - u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); - u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); - u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0x7f; + u32 stat = nv_rd32(fifo, 0x040108 + (unit * 0x2000)); + u32 addr = nv_rd32(fifo, 0x0400c0 + (unit * 0x2000)); + u32 data = nv_rd32(fifo, 0x0400c4 + (unit * 0x2000)); + u32 chid = nv_rd32(fifo, 0x040120 + (unit * 0x2000)) & 0x7f; u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00003ffc); u32 show = stat; if (stat & 0x00800000) { - if (!gf100_fifo_swmthd(priv, chid, mthd, data)) + if (!gf100_fifo_swmthd(fifo, chid, mthd, data)) show &= ~0x00800000; } if (show) { - nv_error(priv, "PBDMA%d:", unit); + nv_error(fifo, "PBDMA%d:", unit); nvkm_bitfield_print(gf100_fifo_pbdma_intr, show); pr_cont("\n"); - nv_error(priv, + nv_error(fifo, "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", unit, chid, - nvkm_client_name_for_fifo_chid(&priv->base, chid), + nvkm_client_name_for_fifo_chid(&fifo->base, chid), subc, mthd, data); } - nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); - nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); + nv_wr32(fifo, 0x0400c0 + (unit * 0x2000), 0x80600008); + nv_wr32(fifo, 0x040108 + (unit * 0x2000), stat); } static void -gf100_fifo_intr_runlist(struct gf100_fifo_priv *priv) +gf100_fifo_intr_runlist(struct gf100_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x002a00); + u32 intr = nv_rd32(fifo, 0x002a00); if (intr & 0x10000000) { - wake_up(&priv->runlist.wait); - nv_wr32(priv, 0x002a00, 0x10000000); + wake_up(&fifo->runlist.wait); + nv_wr32(fifo, 0x002a00, 0x10000000); intr &= ~0x10000000; } if (intr) { - nv_error(priv, "RUNLIST 0x%08x\n", intr); - nv_wr32(priv, 0x002a00, intr); + nv_error(fifo, "RUNLIST 0x%08x\n", intr); + nv_wr32(fifo, 0x002a00, intr); } } static void -gf100_fifo_intr_engine_unit(struct gf100_fifo_priv *priv, int engn) +gf100_fifo_intr_engine_unit(struct gf100_fifo *fifo, int engn) { - u32 intr = nv_rd32(priv, 0x0025a8 + (engn * 0x04)); - u32 inte = nv_rd32(priv, 0x002628); + u32 intr = nv_rd32(fifo, 0x0025a8 + (engn * 0x04)); + u32 inte = nv_rd32(fifo, 0x002628); u32 unkn; - nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr); + nv_wr32(fifo, 0x0025a8 + (engn * 0x04), intr); for (unkn = 0; unkn < 8; unkn++) { u32 ints = (intr >> (unkn * 0x04)) & inte; if (ints & 0x1) { - nvkm_fifo_uevent(&priv->base); + nvkm_fifo_uevent(&fifo->base); ints &= ~1; } if (ints) { - nv_error(priv, "ENGINE %d %d %01x", engn, unkn, ints); - nv_mask(priv, 0x002628, ints, 0); + nv_error(fifo, "ENGINE %d %d %01x", engn, unkn, ints); + nv_mask(fifo, 0x002628, ints, 0); } } } static void -gf100_fifo_intr_engine(struct gf100_fifo_priv *priv) +gf100_fifo_intr_engine(struct gf100_fifo *fifo) { - u32 mask = nv_rd32(priv, 0x0025a4); + u32 mask = nv_rd32(fifo, 0x0025a4); while (mask) { u32 unit = __ffs(mask); - gf100_fifo_intr_engine_unit(priv, unit); + gf100_fifo_intr_engine_unit(fifo, unit); mask &= ~(1 << unit); } } @@ -761,73 +761,73 @@ gf100_fifo_intr_engine(struct gf100_fifo_priv *priv) static void gf100_fifo_intr(struct nvkm_subdev *subdev) { - struct gf100_fifo_priv *priv = (void *)subdev; - u32 mask = nv_rd32(priv, 0x002140); - u32 stat = nv_rd32(priv, 0x002100) & mask; + struct gf100_fifo *fifo = (void *)subdev; + u32 mask = nv_rd32(fifo, 0x002140); + u32 stat = nv_rd32(fifo, 0x002100) & mask; if (stat & 0x00000001) { - u32 intr = nv_rd32(priv, 0x00252c); - nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr); - nv_wr32(priv, 0x002100, 0x00000001); + u32 intr = nv_rd32(fifo, 0x00252c); + nv_warn(fifo, "INTR 0x00000001: 0x%08x\n", intr); + nv_wr32(fifo, 0x002100, 0x00000001); stat &= ~0x00000001; } if (stat & 0x00000100) { - gf100_fifo_intr_sched(priv); - nv_wr32(priv, 0x002100, 0x00000100); + gf100_fifo_intr_sched(fifo); + nv_wr32(fifo, 0x002100, 0x00000100); stat &= ~0x00000100; } if (stat & 0x00010000) { - u32 intr = nv_rd32(priv, 0x00256c); - nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr); - nv_wr32(priv, 0x002100, 0x00010000); + u32 intr = nv_rd32(fifo, 0x00256c); + nv_warn(fifo, "INTR 0x00010000: 0x%08x\n", intr); + nv_wr32(fifo, 0x002100, 0x00010000); stat &= ~0x00010000; } if (stat & 0x01000000) { - u32 intr = nv_rd32(priv, 0x00258c); - nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr); - nv_wr32(priv, 0x002100, 0x01000000); + u32 intr = nv_rd32(fifo, 0x00258c); + nv_warn(fifo, "INTR 0x01000000: 0x%08x\n", intr); + nv_wr32(fifo, 0x002100, 0x01000000); stat &= ~0x01000000; } if (stat & 0x10000000) { - u32 mask = nv_rd32(priv, 0x00259c); + u32 mask = nv_rd32(fifo, 0x00259c); while (mask) { u32 unit = __ffs(mask); - gf100_fifo_intr_fault(priv, unit); - nv_wr32(priv, 0x00259c, (1 << unit)); + gf100_fifo_intr_fault(fifo, unit); + nv_wr32(fifo, 0x00259c, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x10000000; } if (stat & 0x20000000) { - u32 mask = nv_rd32(priv, 0x0025a0); + u32 mask = nv_rd32(fifo, 0x0025a0); while (mask) { u32 unit = __ffs(mask); - gf100_fifo_intr_pbdma(priv, unit); - nv_wr32(priv, 0x0025a0, (1 << unit)); + gf100_fifo_intr_pbdma(fifo, unit); + nv_wr32(fifo, 0x0025a0, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x20000000; } if (stat & 0x40000000) { - gf100_fifo_intr_runlist(priv); + gf100_fifo_intr_runlist(fifo); stat &= ~0x40000000; } if (stat & 0x80000000) { - gf100_fifo_intr_engine(priv); + gf100_fifo_intr_engine(fifo); stat &= ~0x80000000; } if (stat) { - nv_error(priv, "INTR 0x%08x\n", stat); - nv_mask(priv, 0x002140, stat, 0x00000000); - nv_wr32(priv, 0x002100, stat); + nv_error(fifo, "INTR 0x%08x\n", stat); + nv_mask(fifo, 0x002140, stat, 0x00000000); + nv_wr32(fifo, 0x002100, stat); } } @@ -857,101 +857,101 @@ gf100_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { - struct gf100_fifo_priv *priv; + struct gf100_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 0, 127, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - INIT_WORK(&priv->fault, gf100_fifo_recover_work); + INIT_WORK(&fifo->fault, gf100_fifo_recover_work); - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, - &priv->runlist.mem[0]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0, + &fifo->runlist.mem[0]); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0x1000, 0, - &priv->runlist.mem[1]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x1000, 0x1000, 0, + &fifo->runlist.mem[1]); if (ret) return ret; - init_waitqueue_head(&priv->runlist.wait); + init_waitqueue_head(&fifo->runlist.wait); - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 0x1000, 0x1000, 0, - &priv->user.mem); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 0x1000, 0x1000, 0, + &fifo->user.mem); if (ret) return ret; - ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, - &priv->user.bar); + ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, + &fifo->user.bar); if (ret) return ret; - ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &priv->base.uevent); + ret = nvkm_event_init(&gf100_fifo_uevent_func, 1, 1, &fifo->base.uevent); if (ret) return ret; - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = gf100_fifo_intr; - nv_engine(priv)->cclass = &gf100_fifo_cclass; - nv_engine(priv)->sclass = gf100_fifo_sclass; + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = gf100_fifo_intr; + nv_engine(fifo)->cclass = &gf100_fifo_cclass; + nv_engine(fifo)->sclass = gf100_fifo_sclass; return 0; } static void gf100_fifo_dtor(struct nvkm_object *object) { - struct gf100_fifo_priv *priv = (void *)object; + struct gf100_fifo *fifo = (void *)object; - nvkm_gpuobj_unmap(&priv->user.bar); - nvkm_gpuobj_ref(NULL, &priv->user.mem); - nvkm_gpuobj_ref(NULL, &priv->runlist.mem[0]); - nvkm_gpuobj_ref(NULL, &priv->runlist.mem[1]); + nvkm_gpuobj_unmap(&fifo->user.bar); + nvkm_gpuobj_ref(NULL, &fifo->user.mem); + nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[0]); + nvkm_gpuobj_ref(NULL, &fifo->runlist.mem[1]); - nvkm_fifo_destroy(&priv->base); + nvkm_fifo_destroy(&fifo->base); } static int gf100_fifo_init(struct nvkm_object *object) { - struct gf100_fifo_priv *priv = (void *)object; + struct gf100_fifo *fifo = (void *)object; int ret, i; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; - nv_wr32(priv, 0x000204, 0xffffffff); - nv_wr32(priv, 0x002204, 0xffffffff); + nv_wr32(fifo, 0x000204, 0xffffffff); + nv_wr32(fifo, 0x002204, 0xffffffff); - priv->spoon_nr = hweight32(nv_rd32(priv, 0x002204)); - nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); + fifo->spoon_nr = hweight32(nv_rd32(fifo, 0x002204)); + nv_debug(fifo, "%d PBDMA unit(s)\n", fifo->spoon_nr); /* assign engines to PBDMAs */ - if (priv->spoon_nr >= 3) { - nv_wr32(priv, 0x002208, ~(1 << 0)); /* PGRAPH */ - nv_wr32(priv, 0x00220c, ~(1 << 1)); /* PVP */ - nv_wr32(priv, 0x002210, ~(1 << 1)); /* PMSPP */ - nv_wr32(priv, 0x002214, ~(1 << 1)); /* PMSVLD */ - nv_wr32(priv, 0x002218, ~(1 << 2)); /* PCE0 */ - nv_wr32(priv, 0x00221c, ~(1 << 1)); /* PCE1 */ + if (fifo->spoon_nr >= 3) { + nv_wr32(fifo, 0x002208, ~(1 << 0)); /* PGRAPH */ + nv_wr32(fifo, 0x00220c, ~(1 << 1)); /* PVP */ + nv_wr32(fifo, 0x002210, ~(1 << 1)); /* PMSPP */ + nv_wr32(fifo, 0x002214, ~(1 << 1)); /* PMSVLD */ + nv_wr32(fifo, 0x002218, ~(1 << 2)); /* PCE0 */ + nv_wr32(fifo, 0x00221c, ~(1 << 1)); /* PCE1 */ } /* PBDMA[n] */ - for (i = 0; i < priv->spoon_nr; i++) { - nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); - nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ - nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ + for (i = 0; i < fifo->spoon_nr; i++) { + nv_mask(fifo, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nv_wr32(fifo, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(fifo, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ } - nv_mask(priv, 0x002200, 0x00000001, 0x00000001); - nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); + nv_mask(fifo, 0x002200, 0x00000001, 0x00000001); + nv_wr32(fifo, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); - nv_wr32(priv, 0x002100, 0xffffffff); - nv_wr32(priv, 0x002140, 0x7fffffff); - nv_wr32(priv, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ + nv_wr32(fifo, 0x002100, 0xffffffff); + nv_wr32(fifo, 0x002140, 0x7fffffff); + nv_wr32(fifo, 0x002628, 0x00000001); /* ENGINE_INTR_EN */ return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index e10f9644140f..ed8d3820a044 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -58,7 +58,7 @@ struct gk104_fifo_engn { wait_queue_head_t wait; }; -struct gk104_fifo_priv { +struct gk104_fifo { struct nvkm_fifo base; struct work_struct fault; @@ -93,19 +93,19 @@ struct gk104_fifo_chan { ******************************************************************************/ static void -gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine) +gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine) { - struct nvkm_bar *bar = nvkm_bar(priv); - struct gk104_fifo_engn *engn = &priv->engine[engine]; + struct nvkm_bar *bar = nvkm_bar(fifo); + struct gk104_fifo_engn *engn = &fifo->engine[engine]; struct nvkm_gpuobj *cur; int i, p; - mutex_lock(&nv_subdev(priv)->mutex); + mutex_lock(&nv_subdev(fifo)->mutex); cur = engn->runlist[engn->cur_runlist]; engn->cur_runlist = !engn->cur_runlist; - for (i = 0, p = 0; i < priv->base.max; i++) { - struct gk104_fifo_chan *chan = (void *)priv->base.channel[i]; + for (i = 0, p = 0; i < fifo->base.max; i++) { + struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i]; if (chan && chan->state == RUNNING && chan->engine == engine) { nv_wo32(cur, p + 0, i); nv_wo32(cur, p + 4, 0x00000000); @@ -114,14 +114,14 @@ gk104_fifo_runlist_update(struct gk104_fifo_priv *priv, u32 engine) } bar->flush(bar); - nv_wr32(priv, 0x002270, cur->addr >> 12); - nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); + nv_wr32(fifo, 0x002270, cur->addr >> 12); + nv_wr32(fifo, 0x002274, (engine << 20) | (p >> 3)); - if (wait_event_timeout(engn->wait, !(nv_rd32(priv, 0x002284 + + if (wait_event_timeout(engn->wait, !(nv_rd32(fifo, 0x002284 + (engine * 0x08)) & 0x00100000), msecs_to_jiffies(2000)) == 0) - nv_error(priv, "runlist %d update timeout\n", engine); - mutex_unlock(&nv_subdev(priv)->mutex); + nv_error(fifo, "runlist %d update timeout\n", engine); + mutex_unlock(&nv_subdev(fifo)->mutex); } static int @@ -166,14 +166,30 @@ gk104_fifo_context_attach(struct nvkm_object *parent, } static int +gk104_fifo_chan_kick(struct gk104_fifo_chan *chan) +{ + struct nvkm_object *obj = (void *)chan; + struct gk104_fifo *fifo = (void *)obj->engine; + + nv_wr32(fifo, 0x002634, chan->base.chid); + if (!nv_wait(fifo, 0x002634, 0x100000, 0x000000)) { + nv_error(fifo, "channel %d [%s] kick timeout\n", + chan->base.chid, nvkm_client_name(chan)); + return -EBUSY; + } + + return 0; +} + +static int gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_priv *priv = (void *)parent->engine; struct gk104_fifo_base *base = (void *)parent->parent; struct gk104_fifo_chan *chan = (void *)parent; u32 addr; + int ret; switch (nv_engidx(object->engine)) { case NVDEV_ENGINE_SW : return 0; @@ -188,13 +204,9 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - nv_wr32(priv, 0x002634, chan->base.chid); - if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { - nv_error(priv, "channel %d [%s] kick timeout\n", - chan->base.chid, nvkm_client_name(chan)); - if (suspend) - return -EBUSY; - } + ret = gk104_fifo_chan_kick(chan); + if (ret && suspend) + return ret; if (addr) { nv_wo32(base, addr + 0x00, 0x00000000); @@ -214,7 +226,7 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct kepler_channel_gpfifo_a_v0 v0; } *args = data; struct nvkm_bar *bar = nvkm_bar(parent); - struct gk104_fifo_priv *priv = (void *)engine; + struct gk104_fifo *fifo = (void *)engine; struct gk104_fifo_base *base = (void *)parent; struct gk104_fifo_chan *chan; u64 usermem, ioffset, ilength; @@ -239,12 +251,12 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, } if (i == FIFO_ENGINE_NR) { - nv_error(priv, "unsupported engines 0x%08x\n", args->v0.engine); + nv_error(fifo, "unsupported engines 0x%08x\n", args->v0.engine); return -ENODEV; } ret = nvkm_fifo_channel_create(parent, engine, oclass, 1, - priv->user.bar.offset, 0x200, + fifo->user.bar.offset, 0x200, args->v0.pushbuf, fifo_engine[i].mask, &chan); *pobject = nv_object(chan); @@ -262,10 +274,10 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, ilength = order_base_2(args->v0.ilength / 8); for (i = 0; i < 0x200; i += 4) - nv_wo32(priv->user.mem, usermem + i, 0x00000000); + nv_wo32(fifo->user.mem, usermem + i, 0x00000000); - nv_wo32(base, 0x08, lower_32_bits(priv->user.mem->addr + usermem)); - nv_wo32(base, 0x0c, upper_32_bits(priv->user.mem->addr + usermem)); + nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem)); + nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem)); nv_wo32(base, 0x10, 0x0000face); nv_wo32(base, 0x30, 0xfffff902); nv_wo32(base, 0x48, lower_32_bits(ioffset)); @@ -286,7 +298,7 @@ static int gk104_fifo_chan_init(struct nvkm_object *object) { struct nvkm_gpuobj *base = nv_gpuobj(object->parent); - struct gk104_fifo_priv *priv = (void *)object->engine; + struct gk104_fifo *fifo = (void *)object->engine; struct gk104_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; int ret; @@ -295,13 +307,13 @@ gk104_fifo_chan_init(struct nvkm_object *object) if (ret) return ret; - nv_mask(priv, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); - nv_wr32(priv, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); + nv_mask(fifo, 0x800004 + (chid * 8), 0x000f0000, chan->engine << 16); + nv_wr32(fifo, 0x800000 + (chid * 8), 0x80000000 | base->addr >> 12); if (chan->state == STOPPED && (chan->state = RUNNING) == RUNNING) { - nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); - gk104_fifo_runlist_update(priv, chan->engine); - nv_mask(priv, 0x800004 + (chid * 8), 0x00000400, 0x00000400); + nv_mask(fifo, 0x800004 + (chid * 8), 0x00000400, 0x00000400); + gk104_fifo_runlist_update(fifo, chan->engine); + nv_mask(fifo, 0x800004 + (chid * 8), 0x00000400, 0x00000400); } return 0; @@ -310,16 +322,16 @@ gk104_fifo_chan_init(struct nvkm_object *object) static int gk104_fifo_chan_fini(struct nvkm_object *object, bool suspend) { - struct gk104_fifo_priv *priv = (void *)object->engine; + struct gk104_fifo *fifo = (void *)object->engine; struct gk104_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; if (chan->state == RUNNING && (chan->state = STOPPED) == STOPPED) { - nv_mask(priv, 0x800004 + (chid * 8), 0x00000800, 0x00000800); - gk104_fifo_runlist_update(priv, chan->engine); + nv_mask(fifo, 0x800004 + (chid * 8), 0x00000800, 0x00000800); + gk104_fifo_runlist_update(fifo, chan->engine); } - nv_wr32(priv, 0x800000 + (chid * 8), 0x00000000); + nv_wr32(fifo, 0x800000 + (chid * 8), 0x00000000); return nvkm_fifo_channel_fini(&chan->base, suspend); } @@ -403,7 +415,7 @@ gk104_fifo_cclass = { ******************************************************************************/ static inline int -gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn) +gk104_fifo_engidx(struct gk104_fifo *fifo, u32 engn) { switch (engn) { case NVDEV_ENGINE_GR : @@ -422,73 +434,73 @@ gk104_fifo_engidx(struct gk104_fifo_priv *priv, u32 engn) } static inline struct nvkm_engine * -gk104_fifo_engine(struct gk104_fifo_priv *priv, u32 engn) +gk104_fifo_engine(struct gk104_fifo *fifo, u32 engn) { if (engn >= ARRAY_SIZE(fifo_engine)) return NULL; - return nvkm_engine(priv, fifo_engine[engn].subdev); + return nvkm_engine(fifo, fifo_engine[engn].subdev); } static void gk104_fifo_recover_work(struct work_struct *work) { - struct gk104_fifo_priv *priv = container_of(work, typeof(*priv), fault); + struct gk104_fifo *fifo = container_of(work, typeof(*fifo), fault); struct nvkm_object *engine; unsigned long flags; u32 engn, engm = 0; u64 mask, todo; - spin_lock_irqsave(&priv->base.lock, flags); - mask = priv->mask; - priv->mask = 0ULL; - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); + mask = fifo->mask; + fifo->mask = 0ULL; + spin_unlock_irqrestore(&fifo->base.lock, flags); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) - engm |= 1 << gk104_fifo_engidx(priv, engn); - nv_mask(priv, 0x002630, engm, engm); + engm |= 1 << gk104_fifo_engidx(fifo, engn); + nv_mask(fifo, 0x002630, engm, engm); for (todo = mask; engn = __ffs64(todo), todo; todo &= ~(1 << engn)) { - if ((engine = (void *)nvkm_engine(priv, engn))) { + if ((engine = (void *)nvkm_engine(fifo, engn))) { nv_ofuncs(engine)->fini(engine, false); WARN_ON(nv_ofuncs(engine)->init(engine)); } - gk104_fifo_runlist_update(priv, gk104_fifo_engidx(priv, engn)); + gk104_fifo_runlist_update(fifo, gk104_fifo_engidx(fifo, engn)); } - nv_wr32(priv, 0x00262c, engm); - nv_mask(priv, 0x002630, engm, 0x00000000); + nv_wr32(fifo, 0x00262c, engm); + nv_mask(fifo, 0x002630, engm, 0x00000000); } static void -gk104_fifo_recover(struct gk104_fifo_priv *priv, struct nvkm_engine *engine, +gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine, struct gk104_fifo_chan *chan) { u32 chid = chan->base.chid; unsigned long flags; - nv_error(priv, "%s engine fault on channel %d, recovering...\n", + nv_error(fifo, "%s engine fault on channel %d, recovering...\n", nv_subdev(engine)->name, chid); - nv_mask(priv, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); + nv_mask(fifo, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800); chan->state = KILLED; - spin_lock_irqsave(&priv->base.lock, flags); - priv->mask |= 1ULL << nv_engidx(engine); - spin_unlock_irqrestore(&priv->base.lock, flags); - schedule_work(&priv->fault); + spin_lock_irqsave(&fifo->base.lock, flags); + fifo->mask |= 1ULL << nv_engidx(engine); + spin_unlock_irqrestore(&fifo->base.lock, flags); + schedule_work(&fifo->fault); } static int -gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data) +gk104_fifo_swmthd(struct gk104_fifo *fifo, u32 chid, u32 mthd, u32 data) { struct gk104_fifo_chan *chan = NULL; struct nvkm_handle *bind; unsigned long flags; int ret = -EINVAL; - spin_lock_irqsave(&priv->base.lock, flags); - if (likely(chid >= priv->base.min && chid <= priv->base.max)) - chan = (void *)priv->base.channel[chid]; + spin_lock_irqsave(&fifo->base.lock, flags); + if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) + chan = (void *)fifo->base.channel[chid]; if (unlikely(!chan)) goto out; @@ -500,7 +512,7 @@ gk104_fifo_swmthd(struct gk104_fifo_priv *priv, u32 chid, u32 mthd, u32 data) } out: - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_unlock_irqrestore(&fifo->base.lock, flags); return ret; } @@ -516,9 +528,9 @@ gk104_fifo_bind_reason[] = { }; static void -gk104_fifo_intr_bind(struct gk104_fifo_priv *priv) +gk104_fifo_intr_bind(struct gk104_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x00252c); + u32 intr = nv_rd32(fifo, 0x00252c); u32 code = intr & 0x000000ff; const struct nvkm_enum *en; char enunk[6] = ""; @@ -527,7 +539,7 @@ gk104_fifo_intr_bind(struct gk104_fifo_priv *priv) if (!en) snprintf(enunk, sizeof(enunk), "UNK%02x", code); - nv_error(priv, "BIND_ERROR [ %s ]\n", en ? en->name : enunk); + nv_error(fifo, "BIND_ERROR [ %s ]\n", en ? en->name : enunk); } static const struct nvkm_enum @@ -537,14 +549,14 @@ gk104_fifo_sched_reason[] = { }; static void -gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv) +gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo) { struct nvkm_engine *engine; struct gk104_fifo_chan *chan; u32 engn; for (engn = 0; engn < ARRAY_SIZE(fifo_engine); engn++) { - u32 stat = nv_rd32(priv, 0x002640 + (engn * 0x04)); + u32 stat = nv_rd32(fifo, 0x002640 + (engn * 0x04)); u32 busy = (stat & 0x80000000); u32 next = (stat & 0x07ff0000) >> 16; u32 chsw = (stat & 0x00008000); @@ -555,19 +567,19 @@ gk104_fifo_intr_sched_ctxsw(struct gk104_fifo_priv *priv) (void)save; if (busy && chsw) { - if (!(chan = (void *)priv->base.channel[chid])) + if (!(chan = (void *)fifo->base.channel[chid])) continue; - if (!(engine = gk104_fifo_engine(priv, engn))) + if (!(engine = gk104_fifo_engine(fifo, engn))) continue; - gk104_fifo_recover(priv, engine, chan); + gk104_fifo_recover(fifo, engine, chan); } } } static void -gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) +gk104_fifo_intr_sched(struct gk104_fifo *fifo) { - u32 intr = nv_rd32(priv, 0x00254c); + u32 intr = nv_rd32(fifo, 0x00254c); u32 code = intr & 0x000000ff; const struct nvkm_enum *en; char enunk[6] = ""; @@ -576,11 +588,11 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) if (!en) snprintf(enunk, sizeof(enunk), "UNK%02x", code); - nv_error(priv, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); + nv_error(fifo, "SCHED_ERROR [ %s ]\n", en ? en->name : enunk); switch (code) { case 0x0a: - gk104_fifo_intr_sched_ctxsw(priv); + gk104_fifo_intr_sched_ctxsw(fifo); break; default: break; @@ -588,18 +600,18 @@ gk104_fifo_intr_sched(struct gk104_fifo_priv *priv) } static void -gk104_fifo_intr_chsw(struct gk104_fifo_priv *priv) +gk104_fifo_intr_chsw(struct gk104_fifo *fifo) { - u32 stat = nv_rd32(priv, 0x00256c); - nv_error(priv, "CHSW_ERROR 0x%08x\n", stat); - nv_wr32(priv, 0x00256c, stat); + u32 stat = nv_rd32(fifo, 0x00256c); + nv_error(fifo, "CHSW_ERROR 0x%08x\n", stat); + nv_wr32(fifo, 0x00256c, stat); } static void -gk104_fifo_intr_dropped_fault(struct gk104_fifo_priv *priv) +gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo) { - u32 stat = nv_rd32(priv, 0x00259c); - nv_error(priv, "DROPPED_MMU_FAULT 0x%08x\n", stat); + u32 stat = nv_rd32(fifo, 0x00259c); + nv_error(fifo, "DROPPED_MMU_FAULT 0x%08x\n", stat); } static const struct nvkm_enum @@ -708,12 +720,12 @@ gk104_fifo_fault_gpcclient[] = { }; static void -gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) { - u32 inst = nv_rd32(priv, 0x002800 + (unit * 0x10)); - u32 valo = nv_rd32(priv, 0x002804 + (unit * 0x10)); - u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); - u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); + u32 inst = nv_rd32(fifo, 0x002800 + (unit * 0x10)); + u32 valo = nv_rd32(fifo, 0x002804 + (unit * 0x10)); + u32 vahi = nv_rd32(fifo, 0x002808 + (unit * 0x10)); + u32 stat = nv_rd32(fifo, 0x00280c + (unit * 0x10)); u32 gpc = (stat & 0x1f000000) >> 24; u32 client = (stat & 0x00001f00) >> 8; u32 write = (stat & 0x00000080); @@ -735,16 +747,16 @@ gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) if (eu) { switch (eu->data2) { case NVDEV_SUBDEV_BAR: - nv_mask(priv, 0x001704, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001704, 0x00000000, 0x00000000); break; case NVDEV_SUBDEV_INSTMEM: - nv_mask(priv, 0x001714, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001714, 0x00000000, 0x00000000); break; case NVDEV_ENGINE_IFB: - nv_mask(priv, 0x001718, 0x00000000, 0x00000000); + nv_mask(fifo, 0x001718, 0x00000000, 0x00000000); break; default: - engine = nvkm_engine(priv, eu->data2); + engine = nvkm_engine(fifo, eu->data2); if (engine) engctx = nvkm_engctx_get(engine, inst); break; @@ -763,7 +775,7 @@ gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) if (!ec) snprintf(ecunk, sizeof(ecunk), "UNK%02x", client); - nv_error(priv, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " + nv_error(fifo, "%s fault at 0x%010llx [%s] from %s/%s%s%s%s on " "channel 0x%010llx [%s]\n", write ? "write" : "read", (u64)vahi << 32 | valo, er ? er->name : erunk, eu ? eu->name : euunk, hub ? "" : "GPC", gpcid, hub ? "" : "/", @@ -775,7 +787,7 @@ gk104_fifo_intr_fault(struct gk104_fifo_priv *priv, int unit) switch (nv_mclass(object)) { case KEPLER_CHANNEL_GPFIFO_A: case MAXWELL_CHANNEL_GPFIFO_A: - gk104_fifo_recover(priv, engine, (void *)object); + gk104_fifo_recover(fifo, engine, (void *)object); break; } object = object->parent; @@ -819,35 +831,35 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = { }; static void -gk104_fifo_intr_pbdma_0(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit) { - u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000)); - u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask; - u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); - u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); - u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; + u32 mask = nv_rd32(fifo, 0x04010c + (unit * 0x2000)); + u32 stat = nv_rd32(fifo, 0x040108 + (unit * 0x2000)) & mask; + u32 addr = nv_rd32(fifo, 0x0400c0 + (unit * 0x2000)); + u32 data = nv_rd32(fifo, 0x0400c4 + (unit * 0x2000)); + u32 chid = nv_rd32(fifo, 0x040120 + (unit * 0x2000)) & 0xfff; u32 subc = (addr & 0x00070000) >> 16; u32 mthd = (addr & 0x00003ffc); u32 show = stat; if (stat & 0x00800000) { - if (!gk104_fifo_swmthd(priv, chid, mthd, data)) + if (!gk104_fifo_swmthd(fifo, chid, mthd, data)) show &= ~0x00800000; - nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); + nv_wr32(fifo, 0x0400c0 + (unit * 0x2000), 0x80600008); } if (show) { - nv_error(priv, "PBDMA%d:", unit); + nv_error(fifo, "PBDMA%d:", unit); nvkm_bitfield_print(gk104_fifo_pbdma_intr_0, show); pr_cont("\n"); - nv_error(priv, + nv_error(fifo, "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", unit, chid, - nvkm_client_name_for_fifo_chid(&priv->base, chid), + nvkm_client_name_for_fifo_chid(&fifo->base, chid), subc, mthd, data); } - nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); + nv_wr32(fifo, 0x040108 + (unit * 0x2000), stat); } static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { @@ -860,129 +872,129 @@ static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = { }; static void -gk104_fifo_intr_pbdma_1(struct gk104_fifo_priv *priv, int unit) +gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit) { - u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000)); - u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask; - u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; + u32 mask = nv_rd32(fifo, 0x04014c + (unit * 0x2000)); + u32 stat = nv_rd32(fifo, 0x040148 + (unit * 0x2000)) & mask; + u32 chid = nv_rd32(fifo, 0x040120 + (unit * 0x2000)) & 0xfff; if (stat) { - nv_error(priv, "PBDMA%d:", unit); + nv_error(fifo, "PBDMA%d:", unit); nvkm_bitfield_print(gk104_fifo_pbdma_intr_1, stat); pr_cont("\n"); - nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid, - nv_rd32(priv, 0x040150 + (unit * 0x2000)), - nv_rd32(priv, 0x040154 + (unit * 0x2000))); + nv_error(fifo, "PBDMA%d: ch %d %08x %08x\n", unit, chid, + nv_rd32(fifo, 0x040150 + (unit * 0x2000)), + nv_rd32(fifo, 0x040154 + (unit * 0x2000))); } - nv_wr32(priv, 0x040148 + (unit * 0x2000), stat); + nv_wr32(fifo, 0x040148 + (unit * 0x2000), stat); } static void -gk104_fifo_intr_runlist(struct gk104_fifo_priv *priv) +gk104_fifo_intr_runlist(struct gk104_fifo *fifo) { - u32 mask = nv_rd32(priv, 0x002a00); + u32 mask = nv_rd32(fifo, 0x002a00); while (mask) { u32 engn = __ffs(mask); - wake_up(&priv->engine[engn].wait); - nv_wr32(priv, 0x002a00, 1 << engn); + wake_up(&fifo->engine[engn].wait); + nv_wr32(fifo, 0x002a00, 1 << engn); mask &= ~(1 << engn); } } static void -gk104_fifo_intr_engine(struct gk104_fifo_priv *priv) +gk104_fifo_intr_engine(struct gk104_fifo *fifo) { - nvkm_fifo_uevent(&priv->base); + nvkm_fifo_uevent(&fifo->base); } static void gk104_fifo_intr(struct nvkm_subdev *subdev) { - struct gk104_fifo_priv *priv = (void *)subdev; - u32 mask = nv_rd32(priv, 0x002140); - u32 stat = nv_rd32(priv, 0x002100) & mask; + struct gk104_fifo *fifo = (void *)subdev; + u32 mask = nv_rd32(fifo, 0x002140); + u32 stat = nv_rd32(fifo, 0x002100) & mask; if (stat & 0x00000001) { - gk104_fifo_intr_bind(priv); - nv_wr32(priv, 0x002100, 0x00000001); + gk104_fifo_intr_bind(fifo); + nv_wr32(fifo, 0x002100, 0x00000001); stat &= ~0x00000001; } if (stat & 0x00000010) { - nv_error(priv, "PIO_ERROR\n"); - nv_wr32(priv, 0x002100, 0x00000010); + nv_error(fifo, "PIO_ERROR\n"); + nv_wr32(fifo, 0x002100, 0x00000010); stat &= ~0x00000010; } if (stat & 0x00000100) { - gk104_fifo_intr_sched(priv); - nv_wr32(priv, 0x002100, 0x00000100); + gk104_fifo_intr_sched(fifo); + nv_wr32(fifo, 0x002100, 0x00000100); stat &= ~0x00000100; } if (stat & 0x00010000) { - gk104_fifo_intr_chsw(priv); - nv_wr32(priv, 0x002100, 0x00010000); + gk104_fifo_intr_chsw(fifo); + nv_wr32(fifo, 0x002100, 0x00010000); stat &= ~0x00010000; } if (stat & 0x00800000) { - nv_error(priv, "FB_FLUSH_TIMEOUT\n"); - nv_wr32(priv, 0x002100, 0x00800000); + nv_error(fifo, "FB_FLUSH_TIMEOUT\n"); + nv_wr32(fifo, 0x002100, 0x00800000); stat &= ~0x00800000; } if (stat & 0x01000000) { - nv_error(priv, "LB_ERROR\n"); - nv_wr32(priv, 0x002100, 0x01000000); + nv_error(fifo, "LB_ERROR\n"); + nv_wr32(fifo, 0x002100, 0x01000000); stat &= ~0x01000000; } if (stat & 0x08000000) { - gk104_fifo_intr_dropped_fault(priv); - nv_wr32(priv, 0x002100, 0x08000000); + gk104_fifo_intr_dropped_fault(fifo); + nv_wr32(fifo, 0x002100, 0x08000000); stat &= ~0x08000000; } if (stat & 0x10000000) { - u32 mask = nv_rd32(priv, 0x00259c); + u32 mask = nv_rd32(fifo, 0x00259c); while (mask) { u32 unit = __ffs(mask); - gk104_fifo_intr_fault(priv, unit); - nv_wr32(priv, 0x00259c, (1 << unit)); + gk104_fifo_intr_fault(fifo, unit); + nv_wr32(fifo, 0x00259c, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x10000000; } if (stat & 0x20000000) { - u32 mask = nv_rd32(priv, 0x0025a0); + u32 mask = nv_rd32(fifo, 0x0025a0); while (mask) { u32 unit = __ffs(mask); - gk104_fifo_intr_pbdma_0(priv, unit); - gk104_fifo_intr_pbdma_1(priv, unit); - nv_wr32(priv, 0x0025a0, (1 << unit)); + gk104_fifo_intr_pbdma_0(fifo, unit); + gk104_fifo_intr_pbdma_1(fifo, unit); + nv_wr32(fifo, 0x0025a0, (1 << unit)); mask &= ~(1 << unit); } stat &= ~0x20000000; } if (stat & 0x40000000) { - gk104_fifo_intr_runlist(priv); + gk104_fifo_intr_runlist(fifo); stat &= ~0x40000000; } if (stat & 0x80000000) { - nv_wr32(priv, 0x002100, 0x80000000); - gk104_fifo_intr_engine(priv); + nv_wr32(fifo, 0x002100, 0x80000000); + gk104_fifo_intr_engine(fifo); stat &= ~0x80000000; } if (stat) { - nv_error(priv, "INTR 0x%08x\n", stat); - nv_mask(priv, 0x002140, stat, 0x00000000); - nv_wr32(priv, 0x002100, stat); + nv_error(fifo, "INTR 0x%08x\n", stat); + nv_mask(fifo, 0x002140, stat, 0x00000000); + nv_wr32(fifo, 0x002100, stat); } } @@ -1010,68 +1022,68 @@ gk104_fifo_uevent_func = { int gk104_fifo_fini(struct nvkm_object *object, bool suspend) { - struct gk104_fifo_priv *priv = (void *)object; + struct gk104_fifo *fifo = (void *)object; int ret; - ret = nvkm_fifo_fini(&priv->base, suspend); + ret = nvkm_fifo_fini(&fifo->base, suspend); if (ret) return ret; /* allow mmu fault interrupts, even when we're not using fifo */ - nv_mask(priv, 0x002140, 0x10000000, 0x10000000); + nv_mask(fifo, 0x002140, 0x10000000, 0x10000000); return 0; } int gk104_fifo_init(struct nvkm_object *object) { - struct gk104_fifo_priv *priv = (void *)object; + struct gk104_fifo *fifo = (void *)object; int ret, i; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; /* enable all available PBDMA units */ - nv_wr32(priv, 0x000204, 0xffffffff); - priv->spoon_nr = hweight32(nv_rd32(priv, 0x000204)); - nv_debug(priv, "%d PBDMA unit(s)\n", priv->spoon_nr); + nv_wr32(fifo, 0x000204, 0xffffffff); + fifo->spoon_nr = hweight32(nv_rd32(fifo, 0x000204)); + nv_debug(fifo, "%d PBDMA unit(s)\n", fifo->spoon_nr); /* PBDMA[n] */ - for (i = 0; i < priv->spoon_nr; i++) { - nv_mask(priv, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); - nv_wr32(priv, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ - nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ + for (i = 0; i < fifo->spoon_nr; i++) { + nv_mask(fifo, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000); + nv_wr32(fifo, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(fifo, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ } /* PBDMA[n].HCE */ - for (i = 0; i < priv->spoon_nr; i++) { - nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ - nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ + for (i = 0; i < fifo->spoon_nr; i++) { + nv_wr32(fifo, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */ + nv_wr32(fifo, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */ } - nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); + nv_wr32(fifo, 0x002254, 0x10000000 | fifo->user.bar.offset >> 12); - nv_wr32(priv, 0x002100, 0xffffffff); - nv_wr32(priv, 0x002140, 0x7fffffff); + nv_wr32(fifo, 0x002100, 0xffffffff); + nv_wr32(fifo, 0x002140, 0x7fffffff); return 0; } void gk104_fifo_dtor(struct nvkm_object *object) { - struct gk104_fifo_priv *priv = (void *)object; + struct gk104_fifo *fifo = (void *)object; int i; - nvkm_gpuobj_unmap(&priv->user.bar); - nvkm_gpuobj_ref(NULL, &priv->user.mem); + nvkm_gpuobj_unmap(&fifo->user.bar); + nvkm_gpuobj_ref(NULL, &fifo->user.mem); for (i = 0; i < FIFO_ENGINE_NR; i++) { - nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[1]); - nvkm_gpuobj_ref(NULL, &priv->engine[i].runlist[0]); + nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[1]); + nvkm_gpuobj_ref(NULL, &fifo->engine[i].runlist[0]); } - nvkm_fifo_destroy(&priv->base); + nvkm_fifo_destroy(&fifo->base); } int @@ -1080,49 +1092,49 @@ gk104_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct gk104_fifo_impl *impl = (void *)oclass; - struct gk104_fifo_priv *priv; + struct gk104_fifo *fifo; int ret, i; ret = nvkm_fifo_create(parent, engine, oclass, 0, - impl->channels - 1, &priv); - *pobject = nv_object(priv); + impl->channels - 1, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - INIT_WORK(&priv->fault, gk104_fifo_recover_work); + INIT_WORK(&fifo->fault, gk104_fifo_recover_work); for (i = 0; i < FIFO_ENGINE_NR; i++) { - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, - 0, &priv->engine[i].runlist[0]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, + 0, &fifo->engine[i].runlist[0]); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, - 0, &priv->engine[i].runlist[1]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 0x8000, 0x1000, + 0, &fifo->engine[i].runlist[1]); if (ret) return ret; - init_waitqueue_head(&priv->engine[i].wait); + init_waitqueue_head(&fifo->engine[i].wait); } - ret = nvkm_gpuobj_new(nv_object(priv), NULL, impl->channels * 0x200, - 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, impl->channels * 0x200, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &fifo->user.mem); if (ret) return ret; - ret = nvkm_gpuobj_map(priv->user.mem, NV_MEM_ACCESS_RW, - &priv->user.bar); + ret = nvkm_gpuobj_map(fifo->user.mem, NV_MEM_ACCESS_RW, + &fifo->user.bar); if (ret) return ret; - ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &priv->base.uevent); + ret = nvkm_event_init(&gk104_fifo_uevent_func, 1, 1, &fifo->base.uevent); if (ret) return ret; - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = gk104_fifo_intr; - nv_engine(priv)->cclass = &gk104_fifo_cclass; - nv_engine(priv)->sclass = gk104_fifo_sclass; + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = gk104_fifo_intr; + nv_engine(fifo)->cclass = &gk104_fifo_cclass; + nv_engine(fifo)->sclass = gk104_fifo_sclass; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c index 7596587b0e7c..6a93b911e8a8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm204.c @@ -38,8 +38,8 @@ gm204_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, { int ret = gk104_fifo_ctor(parent, engine, oclass, data, size, pobject); if (ret == 0) { - struct gk104_fifo_priv *priv = (void *)*pobject; - nv_engine(priv)->sclass = gm204_fifo_sclass; + struct gk104_fifo *fifo = (void *)*pobject; + nv_engine(fifo)->sclass = gm204_fifo_sclass; } return ret; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index bdf635f9ab5f..91a2080bdaf9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -54,7 +54,7 @@ int nv04_fifo_object_attach(struct nvkm_object *parent, struct nvkm_object *object, u32 handle) { - struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = (void *)parent; u32 context, chid = chan->base.chid; int ret; @@ -82,19 +82,19 @@ nv04_fifo_object_attach(struct nvkm_object *parent, context |= 0x80000000; /* valid */ context |= chid << 24; - mutex_lock(&nv_subdev(priv)->mutex); - ret = nvkm_ramht_insert(priv->ramht, chid, handle, context); - mutex_unlock(&nv_subdev(priv)->mutex); + mutex_lock(&nv_subdev(fifo)->mutex); + ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context); + mutex_unlock(&nv_subdev(fifo)->mutex); return ret; } void nv04_fifo_object_detach(struct nvkm_object *parent, int cookie) { - struct nv04_fifo_priv *priv = (void *)parent->engine; - mutex_lock(&nv_subdev(priv)->mutex); - nvkm_ramht_remove(priv->ramht, cookie); - mutex_unlock(&nv_subdev(priv)->mutex); + struct nv04_fifo *fifo = (void *)parent->engine; + mutex_lock(&nv_subdev(fifo)->mutex); + nvkm_ramht_remove(fifo->ramht, cookie); + mutex_unlock(&nv_subdev(fifo)->mutex); } int @@ -114,7 +114,7 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, union { struct nv03_channel_dma_v0 v0; } *args = data; - struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo_chan *chan; int ret; @@ -142,10 +142,10 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 32; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); - nv_wo32(priv->ramfc, chan->ramfc + 0x10, + nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4); + nv_wo32(fifo->ramfc, chan->ramfc + 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | #ifdef __BIG_ENDIAN @@ -158,12 +158,12 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent, void nv04_fifo_chan_dtor(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = (void *)object; - struct ramfc_desc *c = priv->ramfc_desc; + struct ramfc_desc *c = fifo->ramfc_desc; do { - nv_wo32(priv->ramfc, chan->ramfc + c->ctxp, 0x00000000); + nv_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000); } while ((++c)->bits); nvkm_fifo_channel_destroy(&chan->base); @@ -172,7 +172,7 @@ nv04_fifo_chan_dtor(struct nvkm_object *object) int nv04_fifo_chan_init(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = (void *)object; u32 mask = 1 << chan->base.chid; unsigned long flags; @@ -182,59 +182,59 @@ nv04_fifo_chan_init(struct nvkm_object *object) if (ret) return ret; - spin_lock_irqsave(&priv->base.lock, flags); - nv_mask(priv, NV04_PFIFO_MODE, mask, mask); - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); + nv_mask(fifo, NV04_PFIFO_MODE, mask, mask); + spin_unlock_irqrestore(&fifo->base.lock, flags); return 0; } int nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend) { - struct nv04_fifo_priv *priv = (void *)object->engine; + struct nv04_fifo *fifo = (void *)object->engine; struct nv04_fifo_chan *chan = (void *)object; - struct nvkm_gpuobj *fctx = priv->ramfc; + struct nvkm_gpuobj *fctx = fifo->ramfc; struct ramfc_desc *c; unsigned long flags; u32 data = chan->ramfc; u32 chid; /* prevent fifo context switches */ - spin_lock_irqsave(&priv->base.lock, flags); - nv_wr32(priv, NV03_PFIFO_CACHES, 0); + spin_lock_irqsave(&fifo->base.lock, flags); + nv_wr32(fifo, NV03_PFIFO_CACHES, 0); /* if this channel is active, replace it with a null context */ - chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + chid = nv_rd32(fifo, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; if (chid == chan->base.chid) { - nv_mask(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 0); - nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); + nv_mask(fifo, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 0); + nv_mask(fifo, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); - c = priv->ramfc_desc; + c = fifo->ramfc_desc; do { u32 rm = ((1ULL << c->bits) - 1) << c->regs; u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; - u32 rv = (nv_rd32(priv, c->regp) & rm) >> c->regs; + u32 rv = (nv_rd32(fifo, c->regp) & rm) >> c->regs; u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm); nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); } while ((++c)->bits); - c = priv->ramfc_desc; + c = fifo->ramfc_desc; do { - nv_wr32(priv, c->regp, 0x00000000); + nv_wr32(fifo, c->regp, 0x00000000); } while ((++c)->bits); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, 0); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUT, 0); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_GET, 0); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUT, 0); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); } /* restore normal operation, after disabling dma mode */ - nv_mask(priv, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); - nv_wr32(priv, NV03_PFIFO_CACHES, 1); - spin_unlock_irqrestore(&priv->base.lock, flags); + nv_mask(fifo, NV04_PFIFO_MODE, 1 << chan->base.chid, 0); + nv_wr32(fifo, NV03_PFIFO_CACHES, 1); + spin_unlock_irqrestore(&fifo->base.lock, flags); return nvkm_fifo_channel_fini(&chan->base, suspend); } @@ -297,17 +297,17 @@ nv04_fifo_cclass = { ******************************************************************************/ void -nv04_fifo_pause(struct nvkm_fifo *pfifo, unsigned long *pflags) -__acquires(priv->base.lock) +nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags) +__acquires(fifo->base.lock) { - struct nv04_fifo_priv *priv = (void *)pfifo; + struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); unsigned long flags; - spin_lock_irqsave(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); *pflags = flags; - nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000000); - nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); + nv_wr32(fifo, NV03_PFIFO_CACHES, 0x00000000); + nv_mask(fifo, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000); /* in some cases the puller may be left in an inconsistent state * if you try to stop it while it's busy translating handles. @@ -318,28 +318,28 @@ __acquires(priv->base.lock) * to avoid this, we invalidate the most recently calculated * instance. */ - if (!nv_wait(priv, NV04_PFIFO_CACHE1_PULL0, + if (!nv_wait(fifo, NV04_PFIFO_CACHE1_PULL0, NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0x00000000)) - nv_warn(priv, "timeout idling puller\n"); + nv_warn(fifo, "timeout idling puller\n"); - if (nv_rd32(priv, NV04_PFIFO_CACHE1_PULL0) & + if (nv_rd32(fifo, NV04_PFIFO_CACHE1_PULL0) & NV04_PFIFO_CACHE1_PULL0_HASH_FAILED) - nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); + nv_wr32(fifo, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); - nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0x00000000); + nv_wr32(fifo, NV04_PFIFO_CACHE1_HASH, 0x00000000); } void -nv04_fifo_start(struct nvkm_fifo *pfifo, unsigned long *pflags) -__releases(priv->base.lock) +nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags) +__releases(fifo->base.lock) { - struct nv04_fifo_priv *priv = (void *)pfifo; + struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base); unsigned long flags = *pflags; - nv_mask(priv, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); - nv_wr32(priv, NV03_PFIFO_CACHES, 0x00000001); + nv_mask(fifo, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001); + nv_wr32(fifo, NV03_PFIFO_CACHES, 0x00000001); - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_unlock_irqrestore(&fifo->base.lock, flags); } static const char * @@ -353,7 +353,7 @@ nv_dma_state_err(u32 state) } static bool -nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) +nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data) { struct nv04_fifo_chan *chan = NULL; struct nvkm_handle *bind; @@ -363,9 +363,9 @@ nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) unsigned long flags; u32 engine; - spin_lock_irqsave(&priv->base.lock, flags); - if (likely(chid >= priv->base.min && chid <= priv->base.max)) - chan = (void *)priv->base.channel[chid]; + spin_lock_irqsave(&fifo->base.lock, flags); + if (likely(chid >= fifo->base.min && chid <= fifo->base.max)) + chan = (void *)fifo->base.channel[chid]; if (unlikely(!chan)) goto out; @@ -380,13 +380,13 @@ nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) chan->subc[subc] = data; handled = true; - nv_mask(priv, NV04_PFIFO_CACHE1_ENGINE, engine, 0); + nv_mask(fifo, NV04_PFIFO_CACHE1_ENGINE, engine, 0); } nvkm_namedb_put(bind); break; default: - engine = nv_rd32(priv, NV04_PFIFO_CACHE1_ENGINE); + engine = nv_rd32(fifo, NV04_PFIFO_CACHE1_ENGINE); if (unlikely(((engine >> (subc * 4)) & 0xf) != 0)) break; @@ -400,13 +400,13 @@ nv04_fifo_swmthd(struct nv04_fifo_priv *priv, u32 chid, u32 addr, u32 data) } out: - spin_unlock_irqrestore(&priv->base.lock, flags); + spin_unlock_irqrestore(&fifo->base.lock, flags); return handled; } static void nv04_fifo_cache_error(struct nvkm_device *device, - struct nv04_fifo_priv *priv, u32 chid, u32 get) + struct nv04_fifo *fifo, u32 chid, u32 get) { u32 mthd, data; int ptr; @@ -419,139 +419,139 @@ nv04_fifo_cache_error(struct nvkm_device *device, ptr = (get & 0x7ff) >> 2; if (device->card_type < NV_40) { - mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr)); + mthd = nv_rd32(fifo, NV04_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(fifo, NV04_PFIFO_CACHE1_DATA(ptr)); } else { - mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr)); - data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr)); + mthd = nv_rd32(fifo, NV40_PFIFO_CACHE1_METHOD(ptr)); + data = nv_rd32(fifo, NV40_PFIFO_CACHE1_DATA(ptr)); } - if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { + if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) { const char *client_name = - nvkm_client_name_for_fifo_chid(&priv->base, chid); - nv_error(priv, + nvkm_client_name_for_fifo_chid(&fifo->base, chid); + nv_error(fifo, "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc, data); } - nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0); - nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); + nv_wr32(fifo, NV04_PFIFO_CACHE1_DMA_PUSH, 0); + nv_wr32(fifo, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, - nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(fifo, NV03_PFIFO_CACHE1_PUSH0) & ~1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, + nv_rd32(fifo, NV03_PFIFO_CACHE1_PUSH0) | 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_HASH, 0); - nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, - nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_DMA_PUSH, + nv_rd32(fifo, NV04_PFIFO_CACHE1_DMA_PUSH) | 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); } static void nv04_fifo_dma_pusher(struct nvkm_device *device, - struct nv04_fifo_priv *priv, u32 chid) + struct nv04_fifo *fifo, u32 chid) { const char *client_name; - u32 dma_get = nv_rd32(priv, 0x003244); - u32 dma_put = nv_rd32(priv, 0x003240); - u32 push = nv_rd32(priv, 0x003220); - u32 state = nv_rd32(priv, 0x003228); + u32 dma_get = nv_rd32(fifo, 0x003244); + u32 dma_put = nv_rd32(fifo, 0x003240); + u32 push = nv_rd32(fifo, 0x003220); + u32 state = nv_rd32(fifo, 0x003228); - client_name = nvkm_client_name_for_fifo_chid(&priv->base, chid); + client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid); if (device->card_type == NV_50) { - u32 ho_get = nv_rd32(priv, 0x003328); - u32 ho_put = nv_rd32(priv, 0x003320); - u32 ib_get = nv_rd32(priv, 0x003334); - u32 ib_put = nv_rd32(priv, 0x003330); + u32 ho_get = nv_rd32(fifo, 0x003328); + u32 ho_put = nv_rd32(fifo, 0x003320); + u32 ib_get = nv_rd32(fifo, 0x003334); + u32 ib_put = nv_rd32(fifo, 0x003330); - nv_error(priv, + nv_error(fifo, "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", chid, client_name, ho_get, dma_get, ho_put, dma_put, ib_get, ib_put, state, nv_dma_state_err(state), push); /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ - nv_wr32(priv, 0x003364, 0x00000000); + nv_wr32(fifo, 0x003364, 0x00000000); if (dma_get != dma_put || ho_get != ho_put) { - nv_wr32(priv, 0x003244, dma_put); - nv_wr32(priv, 0x003328, ho_put); + nv_wr32(fifo, 0x003244, dma_put); + nv_wr32(fifo, 0x003328, ho_put); } else if (ib_get != ib_put) - nv_wr32(priv, 0x003334, ib_put); + nv_wr32(fifo, 0x003334, ib_put); } else { - nv_error(priv, + nv_error(fifo, "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n", chid, client_name, dma_get, dma_put, state, nv_dma_state_err(state), push); if (dma_get != dma_put) - nv_wr32(priv, 0x003244, dma_put); + nv_wr32(fifo, 0x003244, dma_put); } - nv_wr32(priv, 0x003228, 0x00000000); - nv_wr32(priv, 0x003220, 0x00000001); - nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); + nv_wr32(fifo, 0x003228, 0x00000000); + nv_wr32(fifo, 0x003220, 0x00000001); + nv_wr32(fifo, 0x002100, NV_PFIFO_INTR_DMA_PUSHER); } void nv04_fifo_intr(struct nvkm_subdev *subdev) { struct nvkm_device *device = nv_device(subdev); - struct nv04_fifo_priv *priv = (void *)subdev; - u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); - u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; + struct nv04_fifo *fifo = (void *)subdev; + u32 mask = nv_rd32(fifo, NV03_PFIFO_INTR_EN_0); + u32 stat = nv_rd32(fifo, NV03_PFIFO_INTR_0) & mask; u32 reassign, chid, get, sem; - reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; - nv_wr32(priv, NV03_PFIFO_CACHES, 0); + reassign = nv_rd32(fifo, NV03_PFIFO_CACHES) & 1; + nv_wr32(fifo, NV03_PFIFO_CACHES, 0); - chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; - get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); + chid = nv_rd32(fifo, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max; + get = nv_rd32(fifo, NV03_PFIFO_CACHE1_GET); if (stat & NV_PFIFO_INTR_CACHE_ERROR) { - nv04_fifo_cache_error(device, priv, chid, get); + nv04_fifo_cache_error(device, fifo, chid, get); stat &= ~NV_PFIFO_INTR_CACHE_ERROR; } if (stat & NV_PFIFO_INTR_DMA_PUSHER) { - nv04_fifo_dma_pusher(device, priv, chid); + nv04_fifo_dma_pusher(device, fifo, chid); stat &= ~NV_PFIFO_INTR_DMA_PUSHER; } if (stat & NV_PFIFO_INTR_SEMAPHORE) { stat &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); + nv_wr32(fifo, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); - sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + sem = nv_rd32(fifo, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(fifo, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); } if (device->card_type == NV_50) { if (stat & 0x00000010) { stat &= ~0x00000010; - nv_wr32(priv, 0x002100, 0x00000010); + nv_wr32(fifo, 0x002100, 0x00000010); } if (stat & 0x40000000) { - nv_wr32(priv, 0x002100, 0x40000000); - nvkm_fifo_uevent(&priv->base); + nv_wr32(fifo, 0x002100, 0x40000000); + nvkm_fifo_uevent(&fifo->base); stat &= ~0x40000000; } } if (stat) { - nv_warn(priv, "unknown intr 0x%08x\n", stat); - nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); - nv_wr32(priv, NV03_PFIFO_INTR_0, stat); + nv_warn(fifo, "unknown intr 0x%08x\n", stat); + nv_mask(fifo, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); + nv_wr32(fifo, NV03_PFIFO_INTR_0, stat); } - nv_wr32(priv, NV03_PFIFO_CACHES, reassign); + nv_wr32(fifo, NV03_PFIFO_CACHES, reassign); } static int @@ -560,65 +560,65 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv04_instmem *imem = nv04_instmem(parent); - struct nv04_fifo_priv *priv; + struct nv04_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - nvkm_ramht_ref(imem->ramht, &priv->ramht); - nvkm_gpuobj_ref(imem->ramro, &priv->ramro); - nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); - - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &nv04_fifo_cclass; - nv_engine(priv)->sclass = nv04_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; - priv->ramfc_desc = nv04_ramfc; + nvkm_ramht_ref(imem->ramht, &fifo->ramht); + nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); + nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); + + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &nv04_fifo_cclass; + nv_engine(fifo)->sclass = nv04_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; + fifo->ramfc_desc = nv04_ramfc; return 0; } void nv04_fifo_dtor(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object; - nvkm_gpuobj_ref(NULL, &priv->ramfc); - nvkm_gpuobj_ref(NULL, &priv->ramro); - nvkm_ramht_ref(NULL, &priv->ramht); - nvkm_fifo_destroy(&priv->base); + struct nv04_fifo *fifo = (void *)object; + nvkm_gpuobj_ref(NULL, &fifo->ramfc); + nvkm_gpuobj_ref(NULL, &fifo->ramro); + nvkm_ramht_ref(NULL, &fifo->ramht); + nvkm_fifo_destroy(&fifo->base); } int nv04_fifo_init(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object; + struct nv04_fifo *fifo = (void *)object; int ret; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; - nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); - nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); + nv_wr32(fifo, NV04_PFIFO_DELAY_0, 0x000000ff); + nv_wr32(fifo, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); - nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((priv->ramht->bits - 9) << 16) | - (priv->ramht->gpuobj.addr >> 8)); - nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); - nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8); + nv_wr32(fifo, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((fifo->ramht->bits - 9) << 16) | + (fifo->ramht->gpuobj.addr >> 8)); + nv_wr32(fifo, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); + nv_wr32(fifo, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); - nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_EN_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - nv_wr32(priv, NV03_PFIFO_CACHES, 1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV03_PFIFO_CACHES, 1); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h index e0e0c47cb4ca..cb4ec7bd7c51 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.h @@ -139,7 +139,7 @@ struct ramfc_desc { unsigned regp; }; -struct nv04_fifo_priv { +struct nv04_fifo { struct nvkm_fifo base; struct ramfc_desc *ramfc_desc; struct nvkm_ramht *ramht; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c index 3537accc927b..7c31c31edd9a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c @@ -58,7 +58,7 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, union { struct nv03_channel_dma_v0 v0; } *args = data; - struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo_chan *chan; int ret; @@ -86,10 +86,10 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent, nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 32; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); - nv_wo32(priv->ramfc, chan->ramfc + 0x14, + nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(fifo->ramfc, chan->ramfc + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | #ifdef __BIG_ENDIAN @@ -144,25 +144,25 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv04_instmem *imem = nv04_instmem(parent); - struct nv04_fifo_priv *priv; + struct nv04_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - nvkm_ramht_ref(imem->ramht, &priv->ramht); - nvkm_gpuobj_ref(imem->ramro, &priv->ramro); - nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); - - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &nv10_fifo_cclass; - nv_engine(priv)->sclass = nv10_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; - priv->ramfc_desc = nv10_ramfc; + nvkm_ramht_ref(imem->ramht, &fifo->ramht); + nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); + nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); + + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &nv10_fifo_cclass; + nv_engine(fifo)->sclass = nv10_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; + fifo->ramfc_desc = nv10_ramfc; return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c index e9c88da81f10..6f8787fbacc0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv17.c @@ -63,7 +63,7 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, union { struct nv03_channel_dma_v0 v0; } *args = data; - struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo_chan *chan; int ret; @@ -93,10 +93,10 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent, nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 64; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); - nv_wo32(priv->ramfc, chan->ramfc + 0x14, + nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(fifo->ramfc, chan->ramfc + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | #ifdef __BIG_ENDIAN @@ -151,55 +151,55 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv04_instmem *imem = nv04_instmem(parent); - struct nv04_fifo_priv *priv; + struct nv04_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - nvkm_ramht_ref(imem->ramht, &priv->ramht); - nvkm_gpuobj_ref(imem->ramro, &priv->ramro); - nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); - - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &nv17_fifo_cclass; - nv_engine(priv)->sclass = nv17_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; - priv->ramfc_desc = nv17_ramfc; + nvkm_ramht_ref(imem->ramht, &fifo->ramht); + nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); + nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); + + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &nv17_fifo_cclass; + nv_engine(fifo)->sclass = nv17_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; + fifo->ramfc_desc = nv17_ramfc; return 0; } static int nv17_fifo_init(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object; + struct nv04_fifo *fifo = (void *)object; int ret; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; - nv_wr32(priv, NV04_PFIFO_DELAY_0, 0x000000ff); - nv_wr32(priv, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); + nv_wr32(fifo, NV04_PFIFO_DELAY_0, 0x000000ff); + nv_wr32(fifo, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff); - nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((priv->ramht->bits - 9) << 16) | - (priv->ramht->gpuobj.addr >> 8)); - nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); - nv_wr32(priv, NV03_PFIFO_RAMFC, priv->ramfc->addr >> 8 | 0x00010000); + nv_wr32(fifo, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((fifo->ramht->bits - 9) << 16) | + (fifo->ramht->gpuobj.addr >> 8)); + nv_wr32(fifo, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); + nv_wr32(fifo, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8 | 0x00010000); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); - nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_EN_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - nv_wr32(priv, NV03_PFIFO_CACHES, 1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV03_PFIFO_CACHES, 1); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c index f9456a5c762c..4c1ed3f29e6b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv40.c @@ -67,7 +67,7 @@ static int nv40_fifo_object_attach(struct nvkm_object *parent, struct nvkm_object *object, u32 handle) { - struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = (void *)parent; u32 context, chid = chan->base.chid; int ret; @@ -94,16 +94,16 @@ nv40_fifo_object_attach(struct nvkm_object *parent, context |= chid << 23; - mutex_lock(&nv_subdev(priv)->mutex); - ret = nvkm_ramht_insert(priv->ramht, chid, handle, context); - mutex_unlock(&nv_subdev(priv)->mutex); + mutex_lock(&nv_subdev(fifo)->mutex); + ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context); + mutex_unlock(&nv_subdev(fifo)->mutex); return ret; } static int nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) { - struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = (void *)parent; unsigned long flags; u32 reg, ctx; @@ -123,16 +123,16 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx) return -EINVAL; } - spin_lock_irqsave(&priv->base.lock, flags); + spin_lock_irqsave(&fifo->base.lock, flags); nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4; - nv_mask(priv, 0x002500, 0x00000001, 0x00000000); + nv_mask(fifo, 0x002500, 0x00000001, 0x00000000); - if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid) - nv_wr32(priv, reg, nv_engctx(engctx)->addr); - nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr); + if ((nv_rd32(fifo, 0x003204) & fifo->base.max) == chan->base.chid) + nv_wr32(fifo, reg, nv_engctx(engctx)->addr); + nv_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr); - nv_mask(priv, 0x002500, 0x00000001, 0x00000001); - spin_unlock_irqrestore(&priv->base.lock, flags); + nv_mask(fifo, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&fifo->base.lock, flags); return 0; } @@ -140,7 +140,7 @@ static int nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *engctx) { - struct nv04_fifo_priv *priv = (void *)parent->engine; + struct nv04_fifo *fifo = (void *)parent->engine; struct nv04_fifo_chan *chan = (void *)parent; unsigned long flags; u32 reg, ctx; @@ -160,15 +160,15 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend, return -EINVAL; } - spin_lock_irqsave(&priv->base.lock, flags); - nv_mask(priv, 0x002500, 0x00000001, 0x00000000); + spin_lock_irqsave(&fifo->base.lock, flags); + nv_mask(fifo, 0x002500, 0x00000001, 0x00000000); - if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid) - nv_wr32(priv, reg, 0x00000000); - nv_wo32(priv->ramfc, chan->ramfc + ctx, 0x00000000); + if ((nv_rd32(fifo, 0x003204) & fifo->base.max) == chan->base.chid) + nv_wr32(fifo, reg, 0x00000000); + nv_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000); - nv_mask(priv, 0x002500, 0x00000001, 0x00000001); - spin_unlock_irqrestore(&priv->base.lock, flags); + nv_mask(fifo, 0x002500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&fifo->base.lock, flags); return 0; } @@ -180,7 +180,7 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, union { struct nv03_channel_dma_v0 v0; } *args = data; - struct nv04_fifo_priv *priv = (void *)engine; + struct nv04_fifo *fifo = (void *)engine; struct nv04_fifo_chan *chan; int ret; @@ -210,17 +210,17 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, nv_parent(chan)->object_detach = nv04_fifo_object_detach; chan->ramfc = chan->base.chid * 128; - nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); - nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); - nv_wo32(priv->ramfc, chan->ramfc + 0x18, 0x30000000 | + nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset); + nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); + nv_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 | NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | #ifdef __BIG_ENDIAN NV_PFIFO_CACHE1_BIG_ENDIAN | #endif NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); - nv_wo32(priv->ramfc, chan->ramfc + 0x3c, 0x0001ffff); + nv_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff); return 0; } @@ -269,77 +269,77 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_object **pobject) { struct nv04_instmem *imem = nv04_instmem(parent); - struct nv04_fifo_priv *priv; + struct nv04_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - nvkm_ramht_ref(imem->ramht, &priv->ramht); - nvkm_gpuobj_ref(imem->ramro, &priv->ramro); - nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); - - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &nv40_fifo_cclass; - nv_engine(priv)->sclass = nv40_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; - priv->ramfc_desc = nv40_ramfc; + nvkm_ramht_ref(imem->ramht, &fifo->ramht); + nvkm_gpuobj_ref(imem->ramro, &fifo->ramro); + nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc); + + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &nv40_fifo_cclass; + nv_engine(fifo)->sclass = nv40_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; + fifo->ramfc_desc = nv40_ramfc; return 0; } static int nv40_fifo_init(struct nvkm_object *object) { - struct nv04_fifo_priv *priv = (void *)object; + struct nv04_fifo *fifo = (void *)object; struct nvkm_fb *fb = nvkm_fb(object); int ret; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; - nv_wr32(priv, 0x002040, 0x000000ff); - nv_wr32(priv, 0x002044, 0x2101ffff); - nv_wr32(priv, 0x002058, 0x00000001); + nv_wr32(fifo, 0x002040, 0x000000ff); + nv_wr32(fifo, 0x002044, 0x2101ffff); + nv_wr32(fifo, 0x002058, 0x00000001); - nv_wr32(priv, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | - ((priv->ramht->bits - 9) << 16) | - (priv->ramht->gpuobj.addr >> 8)); - nv_wr32(priv, NV03_PFIFO_RAMRO, priv->ramro->addr >> 8); + nv_wr32(fifo, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ | + ((fifo->ramht->bits - 9) << 16) | + (fifo->ramht->gpuobj.addr >> 8)); + nv_wr32(fifo, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8); - switch (nv_device(priv)->chipset) { + switch (nv_device(fifo)->chipset) { case 0x47: case 0x49: case 0x4b: - nv_wr32(priv, 0x002230, 0x00000001); + nv_wr32(fifo, 0x002230, 0x00000001); case 0x40: case 0x41: case 0x42: case 0x43: case 0x45: case 0x48: - nv_wr32(priv, 0x002220, 0x00030002); + nv_wr32(fifo, 0x002220, 0x00030002); break; default: - nv_wr32(priv, 0x002230, 0x00000000); - nv_wr32(priv, 0x002220, ((fb->ram->size - 512 * 1024 + - priv->ramfc->addr) >> 16) | + nv_wr32(fifo, 0x002230, 0x00000000); + nv_wr32(fifo, 0x002220, ((fb->ram->size - 512 * 1024 + + fifo->ramfc->addr) >> 16) | 0x00030000); break; } - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH1, priv->base.max); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max); - nv_wr32(priv, NV03_PFIFO_INTR_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_INTR_EN_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_0, 0xffffffff); + nv_wr32(fifo, NV03_PFIFO_INTR_EN_0, 0xffffffff); - nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0, 1); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - nv_wr32(priv, NV03_PFIFO_CACHES, 1); + nv_wr32(fifo, NV03_PFIFO_CACHE1_PUSH0, 1); + nv_wr32(fifo, NV04_PFIFO_CACHE1_PULL0, 1); + nv_wr32(fifo, NV03_PFIFO_CACHES, 1); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c index f25f0fd0655d..a36f7efc4658 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.c @@ -39,33 +39,33 @@ ******************************************************************************/ static void -nv50_fifo_playlist_update_locked(struct nv50_fifo_priv *priv) +nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo) { - struct nvkm_bar *bar = nvkm_bar(priv); + struct nvkm_bar *bar = nvkm_bar(fifo); struct nvkm_gpuobj *cur; int i, p; - cur = priv->playlist[priv->cur_playlist]; - priv->cur_playlist = !priv->cur_playlist; + cur = fifo->playlist[fifo->cur_playlist]; + fifo->cur_playlist = !fifo->cur_playlist; - for (i = priv->base.min, p = 0; i < priv->base.max; i++) { - if (nv_rd32(priv, 0x002600 + (i * 4)) & 0x80000000) + for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) { + if (nv_rd32(fifo, 0x002600 + (i * 4)) & 0x80000000) nv_wo32(cur, p++ * 4, i); } bar->flush(bar); - nv_wr32(priv, 0x0032f4, cur->addr >> 12); - nv_wr32(priv, 0x0032ec, p); - nv_wr32(priv, 0x002500, 0x00000101); + nv_wr32(fifo, 0x0032f4, cur->addr >> 12); + nv_wr32(fifo, 0x0032ec, p); + nv_wr32(fifo, 0x002500, 0x00000101); } void -nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) +nv50_fifo_playlist_update(struct nv50_fifo *fifo) { - mutex_lock(&nv_subdev(priv)->mutex); - nv50_fifo_playlist_update_locked(priv); - mutex_unlock(&nv_subdev(priv)->mutex); + mutex_lock(&nv_subdev(fifo)->mutex); + nv50_fifo_playlist_update_locked(fifo); + mutex_unlock(&nv_subdev(fifo)->mutex); } static int @@ -103,7 +103,7 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend, struct nvkm_object *object) { struct nvkm_bar *bar = nvkm_bar(parent); - struct nv50_fifo_priv *priv = (void *)parent->engine; + struct nv50_fifo *fifo = (void *)parent->engine; struct nv50_fifo_base *base = (void *)parent->parent; struct nv50_fifo_chan *chan = (void *)parent; u32 addr, me; @@ -129,17 +129,17 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend, * there's also a "ignore these engines" bitmask reg we can use * if we hit the issue there.. */ - me = nv_mask(priv, 0x00b860, 0x00000001, 0x00000001); + me = nv_mask(fifo, 0x00b860, 0x00000001, 0x00000001); /* do the kickoff... */ - nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); - if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) { - nv_error(priv, "channel %d [%s] unload timeout\n", + nv_wr32(fifo, 0x0032fc, nv_gpuobj(base)->addr >> 12); + if (!nv_wait_ne(fifo, 0x0032fc, 0xffffffff, 0xffffffff)) { + nv_error(fifo, "channel %d [%s] unload timeout\n", chan->base.chid, nvkm_client_name(chan)); if (suspend) ret = -EBUSY; } - nv_wr32(priv, 0x00b860, me); + nv_wr32(fifo, 0x00b860, me); if (ret == 0) { nv_wo32(base->eng, addr + 0x00, 0x00000000); @@ -320,7 +320,7 @@ nv50_fifo_chan_dtor(struct nvkm_object *object) static int nv50_fifo_chan_init(struct nvkm_object *object) { - struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo *fifo = (void *)object->engine; struct nv50_fifo_base *base = (void *)object->parent; struct nv50_fifo_chan *chan = (void *)object; struct nvkm_gpuobj *ramfc = base->ramfc; @@ -331,22 +331,22 @@ nv50_fifo_chan_init(struct nvkm_object *object) if (ret) return ret; - nv_wr32(priv, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12); - nv50_fifo_playlist_update(priv); + nv_wr32(fifo, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 12); + nv50_fifo_playlist_update(fifo); return 0; } int nv50_fifo_chan_fini(struct nvkm_object *object, bool suspend) { - struct nv50_fifo_priv *priv = (void *)object->engine; + struct nv50_fifo *fifo = (void *)object->engine; struct nv50_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; /* remove channel from playlist, fifo will unload context */ - nv_mask(priv, 0x002600 + (chid * 4), 0x80000000, 0x00000000); - nv50_fifo_playlist_update(priv); - nv_wr32(priv, 0x002600 + (chid * 4), 0x00000000); + nv_mask(fifo, 0x002600 + (chid * 4), 0x80000000, 0x00000000); + nv50_fifo_playlist_update(fifo); + nv_wr32(fifo, 0x002600 + (chid * 4), 0x00000000); return nvkm_fifo_channel_fini(&chan->base, suspend); } @@ -456,69 +456,69 @@ nv50_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { - struct nv50_fifo_priv *priv; + struct nv50_fifo *fifo; int ret; - ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &priv); - *pobject = nv_object(priv); + ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo); + *pobject = nv_object(fifo); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, - &priv->playlist[0]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, + &fifo->playlist[0]); if (ret) return ret; - ret = nvkm_gpuobj_new(nv_object(priv), NULL, 128 * 4, 0x1000, 0, - &priv->playlist[1]); + ret = nvkm_gpuobj_new(nv_object(fifo), NULL, 128 * 4, 0x1000, 0, + &fifo->playlist[1]); if (ret) return ret; - nv_subdev(priv)->unit = 0x00000100; - nv_subdev(priv)->intr = nv04_fifo_intr; - nv_engine(priv)->cclass = &nv50_fifo_cclass; - nv_engine(priv)->sclass = nv50_fifo_sclass; - priv->base.pause = nv04_fifo_pause; - priv->base.start = nv04_fifo_start; + nv_subdev(fifo)->unit = 0x00000100; + nv_subdev(fifo)->intr = nv04_fifo_intr; + nv_engine(fifo)->cclass = &nv50_fifo_cclass; + nv_engine(fifo)->sclass = nv50_fifo_sclass; + fifo->base.pause = nv04_fifo_pause; + fifo->base.start = nv04_fifo_start; return 0; } void nv50_fifo_dtor(struct nvkm_object *object) { - struct nv50_fifo_priv *priv = (void *)object; + struct nv50_fifo *fifo = (void *)object; - nvkm_gpuobj_ref(NULL, &priv->playlist[1]); - nvkm_gpuobj_ref(NULL, &priv->playlist[0]); + nvkm_gpuobj_ref(NULL, &fifo->playlist[1]); + nvkm_gpuobj_ref(NULL, &fifo->playlist[0]); - nvkm_fifo_destroy(&priv->base); + nvkm_fifo_destroy(&fifo->base); } int nv50_fifo_init(struct nvkm_object *object) { - struct nv50_fifo_priv *priv = (void *)object; + struct nv50_fifo *fifo = (void *)object; int ret, i; - ret = nvkm_fifo_init(&priv->base); + ret = nvkm_fifo_init(&fifo->base); if (ret) return ret; - nv_mask(priv, 0x000200, 0x00000100, 0x00000000); - nv_mask(priv, 0x000200, 0x00000100, 0x00000100); - nv_wr32(priv, 0x00250c, 0x6f3cfc34); - nv_wr32(priv, 0x002044, 0x01003fff); + nv_mask(fifo, 0x000200, 0x00000100, 0x00000000); + nv_mask(fifo, 0x000200, 0x00000100, 0x00000100); + nv_wr32(fifo, 0x00250c, 0x6f3cfc34); + nv_wr32(fifo, 0x002044, 0x01003fff); - nv_wr32(priv, 0x002100, 0xffffffff); - nv_wr32(priv, 0x002140, 0xbfffffff); + nv_wr32(fifo, 0x002100, 0xffffffff); + nv_wr32(fifo, 0x002140, 0xbfffffff); for (i = 0; i < 128; i++) - nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); - nv50_fifo_playlist_update_locked(priv); + nv_wr32(fifo, 0x002600 + (i * 4), 0x00000000); + nv50_fifo_playlist_update_locked(fifo); - nv_wr32(priv, 0x003200, 0x00000001); - nv_wr32(priv, 0x003250, 0x00000001); - nv_wr32(priv, 0x002500, 0x00000001); + nv_wr32(fifo, 0x003200, 0x00000001); + nv_wr32(fifo, 0x003250, 0x00000001); + nv_wr32(fifo, 0x002500, 0x00000001); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h index 09ed93c66567..722fcce7070e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv50.h @@ -2,7 +2,7 @@ #define __NV50_FIFO_H__ #include <engine/fifo.h> -struct nv50_fifo_priv { +struct nv50_fifo { struct nvkm_fifo base; struct nvkm_gpuobj *playlist[2]; int cur_playlist; @@ -23,7 +23,7 @@ struct nv50_fifo_chan { struct nvkm_ramht *ramht; }; -void nv50_fifo_playlist_update(struct nv50_fifo_priv *); +void nv50_fifo_playlist_update(struct nv50_fifo *); void nv50_fifo_object_detach(struct nvkm_object *, int); void nv50_fifo_chan_dtor(struct nvkm_object *); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 1ca9385f5479..b0b5fadfc550 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1121,7 +1121,7 @@ gf100_gr_ctxctl_isr(struct gf100_gr_priv *priv) static void gf100_gr_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct nvkm_handle *handle; @@ -1142,7 +1142,7 @@ gf100_gr_intr(struct nvkm_subdev *subdev) class = 0x0000; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & 0x00000001) { /* diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c index 535f5930c40b..57f05c86a591 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv10.c @@ -1119,18 +1119,18 @@ static void nv10_gr_tile_prog(struct nvkm_engine *engine, int i) { struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; - struct nvkm_fifo *pfifo = nvkm_fifo(engine); + struct nvkm_fifo *fifo = nvkm_fifo(engine); struct nv10_gr_priv *priv = (void *)engine; unsigned long flags; - pfifo->pause(pfifo, &flags); + fifo->pause(fifo, &flags); nv04_gr_idle(priv); nv_wr32(priv, NV10_PGRAPH_TLIMIT(i), tile->limit); nv_wr32(priv, NV10_PGRAPH_TSIZE(i), tile->pitch); nv_wr32(priv, NV10_PGRAPH_TILE(i), tile->addr); - pfifo->start(pfifo, &flags); + fifo->start(fifo, &flags); } const struct nvkm_bitfield nv10_gr_intr_name[] = { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c index 0aa4cc9f74e1..14a83f2a8127 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv20.c @@ -157,11 +157,11 @@ void nv20_gr_tile_prog(struct nvkm_engine *engine, int i) { struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; - struct nvkm_fifo *pfifo = nvkm_fifo(engine); + struct nvkm_fifo *fifo = nvkm_fifo(engine); struct nv20_gr_priv *priv = (void *)engine; unsigned long flags; - pfifo->pause(pfifo, &flags); + fifo->pause(fifo, &flags); nv04_gr_idle(priv); nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); @@ -181,7 +181,7 @@ nv20_gr_tile_prog(struct nvkm_engine *engine, int i) nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); } - pfifo->start(pfifo, &flags); + fifo->start(fifo, &flags); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c index ed05c6d7875b..c0a1751a1e88 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c @@ -206,11 +206,11 @@ static void nv40_gr_tile_prog(struct nvkm_engine *engine, int i) { struct nvkm_fb_tile *tile = &nvkm_fb(engine)->tile.region[i]; - struct nvkm_fifo *pfifo = nvkm_fifo(engine); + struct nvkm_fifo *fifo = nvkm_fifo(engine); struct nv40_gr_priv *priv = (void *)engine; unsigned long flags; - pfifo->pause(pfifo, &flags); + fifo->pause(fifo, &flags); nv04_gr_idle(priv); switch (nv_device(priv)->chipset) { @@ -277,13 +277,13 @@ nv40_gr_tile_prog(struct nvkm_engine *engine, int i) break; } - pfifo->start(pfifo, &flags); + fifo->start(fifo, &flags); } static void nv40_gr_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct nvkm_handle *handle = NULL; @@ -301,7 +301,7 @@ nv40_gr_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & NV_PGRAPH_INTR_ERROR) { if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c index f18b75b883ac..e232cb8e2f9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv50.c @@ -782,7 +782,7 @@ nv50_gr_trap_handler(struct nv50_gr_priv *priv, u32 display, static void nv50_gr_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct nvkm_handle *handle = NULL; @@ -798,7 +798,7 @@ nv50_gr_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & 0x00000010) { handle = nvkm_handle_get_class(engctx, class); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c index 891004157ea8..4199684a4b28 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c @@ -194,7 +194,7 @@ void nv31_mpeg_intr(struct nvkm_subdev *subdev) { struct nv31_mpeg_priv *priv = (void *)subdev; - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_handle *handle; struct nvkm_object *engctx; u32 stat = nv_rd32(priv, 0x00b100); @@ -227,7 +227,7 @@ nv31_mpeg_intr(struct nvkm_subdev *subdev) if (show) { nv_error(priv, "ch %d [%s] 0x%08x 0x%08x 0x%08x 0x%08x\n", - pfifo->chid(pfifo, engctx), + fifo->chid(fifo, engctx), nvkm_client_name(engctx), stat, type, mthd, data); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c index 4720ac884468..aeed7f850f65 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c @@ -93,7 +93,7 @@ nv44_mpeg_cclass = { static void nv44_mpeg_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct nvkm_handle *handle; @@ -107,7 +107,7 @@ nv44_mpeg_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & 0x01000000) { /* happens on initial binding of the object */ diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c index 9d5c1b8b1f8c..a598d6dbff46 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec/g98.c @@ -75,7 +75,7 @@ static const struct nvkm_enum g98_sec_isr_error_name[] = { static void g98_sec_intr(struct nvkm_subdev *subdev) { - struct nvkm_fifo *pfifo = nvkm_fifo(subdev); + struct nvkm_fifo *fifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct g98_sec_priv *priv = (void *)subdev; @@ -90,7 +90,7 @@ g98_sec_intr(struct nvkm_subdev *subdev) int chid; engctx = nvkm_engctx_get(engine, inst); - chid = pfifo->chid(pfifo, engctx); + chid = fifo->chid(fifo, engctx); if (stat & 0x00000040) { nv_error(priv, "DISPATCH_ERROR ["); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c index 5dc637840a65..e17135a1ec83 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c @@ -297,7 +297,7 @@ calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate) int gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) { - struct nvkm_fifo *pfifo = nvkm_fifo(clk); + struct nvkm_fifo *fifo = nvkm_fifo(clk); /* halt and idle execution engines */ nv_mask(clk, 0x020060, 0x00070000, 0x00000000); @@ -306,8 +306,8 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) if (!nv_wait(clk, 0x000100, 0xffffffff, 0x00000000)) return -EBUSY; - if (pfifo) - pfifo->pause(pfifo, flags); + if (fifo) + fifo->pause(fifo, flags); if (!nv_wait(clk, 0x002504, 0x00000010, 0x00000010)) return -EIO; @@ -320,10 +320,10 @@ gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags) void gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags) { - struct nvkm_fifo *pfifo = nvkm_fifo(clk); + struct nvkm_fifo *fifo = nvkm_fifo(clk); - if (pfifo && flags) - pfifo->start(pfifo, flags); + if (fifo && flags) + fifo->start(fifo, flags); nv_mask(clk, 0x002504, 0x00000001, 0x00000000); nv_mask(clk, 0x020060, 0x00070000, 0x00040000); |