summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nvkm/core
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2022-06-01 20:46:52 +1000
committerBen Skeggs <bskeggs@redhat.com>2022-11-09 10:44:35 +1000
commit3ebd64aa3c4fe7fa2e73f6fa5f81490721a9c4e1 (patch)
tree6a06b9cadb1c6e5b42d05d1ff2e7f96eaafb1e4b /drivers/gpu/drm/nouveau/nvkm/core
parent727fd72f2402afe7cc320844b0aef165f7eb544e (diff)
downloadlinux-stable-3ebd64aa3c4fe7fa2e73f6fa5f81490721a9c4e1.tar.gz
linux-stable-3ebd64aa3c4fe7fa2e73f6fa5f81490721a9c4e1.tar.bz2
linux-stable-3ebd64aa3c4fe7fa2e73f6fa5f81490721a9c4e1.zip
drm/nouveau/intr: support multiple trees, and explicit interfaces
Turing adds a second top-level interrupt tree in HW, in addition to the trees available via NV_PMC. Most of the interrupts we care about are exposed in both trees, but not all of them, and we have some rather nasty hacks to route the fault buffer interrupts. Ampere removes the NV_PMC trees entirely. Here we add some infrastructure to be able to handle all of this more cleanly, as well as providing more explicit control over handlers. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/core')
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/intr.c284
1 files changed, 282 insertions, 2 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/intr.c b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
index 89f601567749..ff95e018fa78 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/intr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/intr.c
@@ -20,19 +20,147 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/intr.h>
-
+#include <core/device.h>
+#include <core/subdev.h>
#include <subdev/pci.h>
+#include <subdev/top.h>
+
#include <subdev/mc.h>
+static int
+nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
+ enum nvkm_intr_type type, int *leaf, u32 *mask)
+{
+ struct nvkm_device *device = subdev->device;
+
+ if (type < NVKM_INTR_VECTOR_0) {
+ if (type == NVKM_INTR_SUBDEV) {
+ const struct nvkm_intr_data *data = intr->data;
+ struct nvkm_top_device *tdev;
+
+ while (data && data->mask) {
+ if (data->type == NVKM_SUBDEV_TOP) {
+ list_for_each_entry(tdev, &device->top->device, head) {
+ if (tdev->intr >= 0 &&
+ tdev->type == subdev->type &&
+ tdev->inst == subdev->inst) {
+ if (data->mask & BIT(tdev->intr)) {
+ *leaf = data->leaf;
+ *mask = BIT(tdev->intr);
+ return 0;
+ }
+ }
+ }
+ } else
+ if (data->type == subdev->type && data->inst == subdev->inst) {
+ *leaf = data->leaf;
+ *mask = data->mask;
+ return 0;
+ }
+
+ data++;
+ }
+ } else {
+ return -ENOSYS;
+ }
+ } else {
+ if (type < intr->leaves * sizeof(*intr->stat) * 8) {
+ *leaf = type / 32;
+ *mask = BIT(type % 32);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static struct nvkm_intr *
+nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
+{
+ struct nvkm_intr *intr;
+ int ret;
+
+ list_for_each_entry(intr, &subdev->device->intr.intr, head) {
+ ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
+ if (ret == 0)
+ return intr;
+ }
+
+ return NULL;
+}
+
+static void
+nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
+{
+ intr->mask[leaf] |= mask;
+ if (intr->func->allow) {
+ if (intr->func->reset)
+ intr->func->reset(intr, leaf, mask);
+ intr->func->allow(intr, leaf, mask);
+ }
+}
+
+void
+nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_intr *intr;
+ unsigned long flags;
+ int leaf;
+ u32 mask;
+
+ intr = nvkm_intr_find(subdev, type, &leaf, &mask);
+ if (intr) {
+ nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
+ spin_lock_irqsave(&device->intr.lock, flags);
+ nvkm_intr_allow_locked(intr, leaf, mask);
+ spin_unlock_irqrestore(&device->intr.lock, flags);
+ }
+}
+
+static void
+nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
+{
+ intr->mask[leaf] &= ~mask;
+ if (intr->func->block)
+ intr->func->block(intr, leaf, mask);
+}
+
+void
+nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
+{
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_intr *intr;
+ unsigned long flags;
+ int leaf;
+ u32 mask;
+
+ intr = nvkm_intr_find(subdev, type, &leaf, &mask);
+ if (intr) {
+ nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
+ spin_lock_irqsave(&device->intr.lock, flags);
+ nvkm_intr_block_locked(intr, leaf, mask);
+ spin_unlock_irqrestore(&device->intr.lock, flags);
+ }
+}
+
static void
nvkm_intr_rearm_locked(struct nvkm_device *device)
{
+ struct nvkm_intr *intr;
+
+ list_for_each_entry(intr, &device->intr.intr, head)
+ intr->func->rearm(intr);
nvkm_mc_intr_rearm(device);
}
static void
nvkm_intr_unarm_locked(struct nvkm_device *device)
{
+ struct nvkm_intr *intr;
+
+ list_for_each_entry(intr, &device->intr.intr, head)
+ intr->func->unarm(intr);
nvkm_mc_intr_unarm(device);
}
@@ -40,9 +168,13 @@ static irqreturn_t
nvkm_intr(int irq, void *arg)
{
struct nvkm_device *device = arg;
+ struct nvkm_intr *intr;
+ struct nvkm_inth *inth;
irqreturn_t ret = IRQ_NONE;
- bool handled;
+ bool pending = false, handled;
+ int prio, leaf;
+ /* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
spin_lock(&device->intr.lock);
if (!device->intr.armed)
goto done_unlock;
@@ -50,20 +182,103 @@ nvkm_intr(int irq, void *arg)
nvkm_intr_unarm_locked(device);
nvkm_pci_msi_rearm(device);
+ /* Fetch pending interrupt masks. */
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ if (intr->func->pending(intr))
+ pending = true;
+ }
+
nvkm_mc_intr(device, &handled);
if (handled)
ret = IRQ_HANDLED;
+ if (!pending)
+ goto done;
+
+ /* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
+ if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
+ goto done;
+
+ /* Execute handlers. */
+ for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
+ list_for_each_entry(inth, &device->intr.prio[prio], head) {
+ struct nvkm_intr *intr = inth->intr;
+
+ if (intr->stat[inth->leaf] & inth->mask) {
+ if (atomic_read(&inth->allowed)) {
+ if (intr->func->reset)
+ intr->func->reset(intr, inth->leaf, inth->mask);
+ if (inth->func(inth) == IRQ_HANDLED)
+ ret = IRQ_HANDLED;
+ }
+ }
+ }
+ }
+
+ /* Nothing handled? Some debugging/protection from IRQ storms is in order... */
+ if (ret == IRQ_NONE) {
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ for (leaf = 0; leaf < intr->leaves; leaf++) {
+ if (intr->stat[leaf]) {
+ nvkm_warn(intr->subdev, "intr%d: %08x\n",
+ leaf, intr->stat[leaf]);
+ nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
+ }
+ }
+ }
+ }
+
+done:
+ /* Re-enable all top-level interrupt sources. */
nvkm_intr_rearm_locked(device);
done_unlock:
spin_unlock(&device->intr.lock);
return ret;
}
+int
+nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
+ struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
+{
+ struct nvkm_device *device = subdev->device;
+ int i;
+
+ intr->func = func;
+ intr->data = data;
+ intr->subdev = subdev;
+ intr->leaves = leaves;
+ intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
+ intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
+ if (!intr->stat || !intr->mask) {
+ kfree(intr->stat);
+ return -ENOMEM;
+ }
+
+ if (intr->subdev->debug >= NV_DBG_DEBUG) {
+ for (i = 0; i < intr->leaves; i++)
+ intr->mask[i] = ~0;
+ }
+
+ spin_lock_irq(&device->intr.lock);
+ list_add_tail(&intr->head, &device->intr.intr);
+ spin_unlock_irq(&device->intr.lock);
+ return 0;
+}
+
void
nvkm_intr_rearm(struct nvkm_device *device)
{
+ struct nvkm_intr *intr;
+ int i;
+
spin_lock_irq(&device->intr.lock);
+ list_for_each_entry(intr, &device->intr.intr, head) {
+ for (i = 0; intr->func->block && i < intr->leaves; i++) {
+ intr->func->block(intr, i, ~0);
+ intr->func->allow(intr, i, intr->mask[i]);
+ }
+ }
+
nvkm_intr_rearm_locked(device);
device->intr.armed = true;
spin_unlock_irq(&device->intr.lock);
@@ -98,6 +313,14 @@ nvkm_intr_install(struct nvkm_device *device)
void
nvkm_intr_dtor(struct nvkm_device *device)
{
+ struct nvkm_intr *intr, *intt;
+
+ list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
+ list_del(&intr->head);
+ kfree(intr->mask);
+ kfree(intr->stat);
+ }
+
if (device->intr.alloc)
free_irq(device->intr.irq, device);
}
@@ -105,5 +328,62 @@ nvkm_intr_dtor(struct nvkm_device *device)
void
nvkm_intr_ctor(struct nvkm_device *device)
{
+ int i;
+
+ INIT_LIST_HEAD(&device->intr.intr);
+ for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
+ INIT_LIST_HEAD(&device->intr.prio[i]);
+
spin_lock_init(&device->intr.lock);
+ device->intr.armed = false;
+}
+
+void
+nvkm_inth_block(struct nvkm_inth *inth)
+{
+ if (unlikely(!inth->intr))
+ return;
+
+ atomic_set(&inth->allowed, 0);
+}
+
+void
+nvkm_inth_allow(struct nvkm_inth *inth)
+{
+ struct nvkm_intr *intr = inth->intr;
+ unsigned long flags;
+
+ if (unlikely(!inth->intr))
+ return;
+
+ spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
+ if (!atomic_xchg(&inth->allowed, 1)) {
+ if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
+ nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
+ }
+ spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
+}
+
+int
+nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
+ struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
+{
+ struct nvkm_device *device = subdev->device;
+ int ret;
+
+ if (WARN_ON(inth->mask))
+ return -EBUSY;
+
+ ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
+ if (ret)
+ return ret;
+
+ nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
+ inth->leaf, inth->mask, subdev->name);
+
+ inth->intr = intr;
+ inth->func = func;
+ atomic_set(&inth->allowed, 0);
+ list_add_tail(&inth->head, &device->intr.prio[prio]);
+ return 0;
}