drm/nouveau/fifo: use explicit intr interfaces
More control, and shallower call-chain to get to the point. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
parent
0fc72ee9d8
commit
2fc71a0566
@ -193,19 +193,16 @@ nvkm_fifo_class_get(struct nvkm_oclass *oclass, int index, const struct nvkm_dev
|
||||
return c;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_fifo_intr(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_fifo *fifo = nvkm_fifo(engine);
|
||||
fifo->func->intr(fifo);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_fini(struct nvkm_engine *engine, bool suspend)
|
||||
{
|
||||
struct nvkm_fifo *fifo = nvkm_fifo(engine);
|
||||
|
||||
nvkm_inth_block(&fifo->engine.subdev.inth);
|
||||
|
||||
if (fifo->func->fini)
|
||||
fifo->func->fini(fifo);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -213,7 +210,10 @@ static int
|
||||
nvkm_fifo_init(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_fifo *fifo = nvkm_fifo(engine);
|
||||
|
||||
fifo->func->init(fifo);
|
||||
|
||||
nvkm_inth_allow(&fifo->engine.subdev.inth);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -290,6 +290,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
|
||||
static int
|
||||
nvkm_fifo_oneinit(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &engine->subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_fifo *fifo = nvkm_fifo(engine);
|
||||
struct nvkm_runl *runl;
|
||||
struct nvkm_engn *engn;
|
||||
@ -322,6 +324,16 @@ nvkm_fifo_oneinit(struct nvkm_engine *engine)
|
||||
}
|
||||
}
|
||||
|
||||
/* Register interrupt handler. */
|
||||
if (fifo->func->intr) {
|
||||
ret = nvkm_inth_add(&device->mc->intr, NVKM_INTR_SUBDEV, NVKM_INTR_PRIO_NORMAL,
|
||||
subdev, fifo->func->intr, &subdev->inth);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "intr %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (fifo->func->oneinit)
|
||||
return fifo->func->oneinit(fifo);
|
||||
|
||||
@ -366,7 +378,6 @@ nvkm_fifo = {
|
||||
.info = nvkm_fifo_info,
|
||||
.init = nvkm_fifo_init,
|
||||
.fini = nvkm_fifo_fini,
|
||||
.intr = nvkm_fifo_intr,
|
||||
.base.sclass = nvkm_fifo_class_get,
|
||||
};
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fault.h>
|
||||
#include <subdev/mc.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
@ -525,11 +526,11 @@ gf100_fifo_intr_engine(struct gf100_fifo *fifo)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
static irqreturn_t
|
||||
gf100_fifo_intr(struct nvkm_inth *inth)
|
||||
{
|
||||
struct gf100_fifo *fifo = gf100_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mask = nvkm_rd32(device, 0x002140);
|
||||
u32 stat = nvkm_rd32(device, 0x002100) & mask;
|
||||
@ -542,7 +543,7 @@ gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x00000100) {
|
||||
gf100_fifo_intr_sched(fifo);
|
||||
gf100_fifo_intr_sched(gf100_fifo(fifo));
|
||||
nvkm_wr32(device, 0x002100, 0x00000100);
|
||||
stat &= ~0x00000100;
|
||||
}
|
||||
@ -565,7 +566,7 @@ gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
u32 mask = nvkm_rd32(device, 0x00259c);
|
||||
while (mask) {
|
||||
u32 unit = __ffs(mask);
|
||||
gf100_fifo_intr_mmu_fault_unit(&fifo->base, unit);
|
||||
gf100_fifo_intr_mmu_fault_unit(fifo, unit);
|
||||
nvkm_wr32(device, 0x00259c, (1 << unit));
|
||||
mask &= ~(1 << unit);
|
||||
}
|
||||
@ -576,7 +577,7 @@ gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
u32 mask = nvkm_rd32(device, 0x0025a0);
|
||||
while (mask) {
|
||||
u32 unit = __ffs(mask);
|
||||
gf100_fifo_intr_pbdma(fifo, unit);
|
||||
gf100_fifo_intr_pbdma(gf100_fifo(fifo), unit);
|
||||
nvkm_wr32(device, 0x0025a0, (1 << unit));
|
||||
mask &= ~(1 << unit);
|
||||
}
|
||||
@ -584,12 +585,12 @@ gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x40000000) {
|
||||
gf100_fifo_intr_runlist(fifo);
|
||||
gf100_fifo_intr_runlist(gf100_fifo(fifo));
|
||||
stat &= ~0x40000000;
|
||||
}
|
||||
|
||||
if (stat & 0x80000000) {
|
||||
gf100_fifo_intr_engine(fifo);
|
||||
gf100_fifo_intr_engine(gf100_fifo(fifo));
|
||||
stat &= ~0x80000000;
|
||||
}
|
||||
|
||||
@ -598,6 +599,8 @@ gf100_fifo_intr(struct nvkm_fifo *base)
|
||||
nvkm_mask(device, 0x002140, stat, 0x00000000);
|
||||
nvkm_wr32(device, 0x002100, stat);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fault.h>
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/top.h>
|
||||
#include <engine/sw.h>
|
||||
@ -720,7 +721,7 @@ gk104_fifo_mmu_fault = {
|
||||
};
|
||||
|
||||
static const struct nvkm_enum
|
||||
gk104_fifo_bind_reason[] = {
|
||||
gk104_fifo_intr_bind_reason[] = {
|
||||
{ 0x01, "BIND_NOT_UNBOUND" },
|
||||
{ 0x02, "SNOOP_WITHOUT_BAR1" },
|
||||
{ 0x03, "UNBIND_WHILE_RUNNING" },
|
||||
@ -731,14 +732,12 @@ gk104_fifo_bind_reason[] = {
|
||||
};
|
||||
|
||||
void
|
||||
gk104_fifo_intr_bind(struct gk104_fifo *fifo)
|
||||
gk104_fifo_intr_bind(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 intr = nvkm_rd32(device, 0x00252c);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
u32 intr = nvkm_rd32(subdev->device, 0x00252c);
|
||||
u32 code = intr & 0x000000ff;
|
||||
const struct nvkm_enum *en =
|
||||
nvkm_enum_find(gk104_fifo_bind_reason, code);
|
||||
const struct nvkm_enum *en = nvkm_enum_find(gk104_fifo_intr_bind_reason, code);
|
||||
|
||||
nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
|
||||
}
|
||||
@ -802,21 +801,22 @@ gk104_fifo_intr_sched(struct gk104_fifo *fifo)
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
|
||||
gk104_fifo_intr_chsw(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 stat = nvkm_rd32(device, 0x00256c);
|
||||
|
||||
nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
|
||||
nvkm_wr32(device, 0x00256c, stat);
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
|
||||
static void
|
||||
gk104_fifo_intr_dropped_fault(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 stat = nvkm_rd32(device, 0x00259c);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
u32 stat = nvkm_rd32(subdev->device, 0x00259c);
|
||||
|
||||
nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
|
||||
}
|
||||
|
||||
@ -879,11 +879,11 @@ gk104_fifo_intr_engine(struct gk104_fifo *fifo)
|
||||
nvkm_fifo_uevent(&fifo->base);
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
irqreturn_t
|
||||
gk104_fifo_intr(struct nvkm_inth *inth)
|
||||
{
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mask = nvkm_rd32(device, 0x002140);
|
||||
u32 stat = nvkm_rd32(device, 0x002100) & mask;
|
||||
@ -901,7 +901,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x00000100) {
|
||||
gk104_fifo_intr_sched(fifo);
|
||||
gk104_fifo_intr_sched(gk104_fifo(fifo));
|
||||
nvkm_wr32(device, 0x002100, 0x00000100);
|
||||
stat &= ~0x00000100;
|
||||
}
|
||||
@ -934,7 +934,7 @@ gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
u32 mask = nvkm_rd32(device, 0x00259c);
|
||||
while (mask) {
|
||||
u32 unit = __ffs(mask);
|
||||
fifo->func->intr_mmu_fault_unit(&fifo->base, unit);
|
||||
fifo->func->intr_mmu_fault_unit(fifo, unit);
|
||||
nvkm_wr32(device, 0x00259c, (1 << unit));
|
||||
mask &= ~(1 << unit);
|
||||
}
|
||||
@ -945,8 +945,8 @@ gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
u32 mask = nvkm_rd32(device, 0x0025a0);
|
||||
while (mask) {
|
||||
u32 unit = __ffs(mask);
|
||||
gk104_fifo_intr_pbdma_0(fifo, unit);
|
||||
gk104_fifo_intr_pbdma_1(fifo, unit);
|
||||
gk104_fifo_intr_pbdma_0(gk104_fifo(fifo), unit);
|
||||
gk104_fifo_intr_pbdma_1(gk104_fifo(fifo), unit);
|
||||
nvkm_wr32(device, 0x0025a0, (1 << unit));
|
||||
mask &= ~(1 << unit);
|
||||
}
|
||||
@ -954,13 +954,13 @@ gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x40000000) {
|
||||
gk104_fifo_intr_runlist(fifo);
|
||||
gk104_fifo_intr_runlist(gk104_fifo(fifo));
|
||||
stat &= ~0x40000000;
|
||||
}
|
||||
|
||||
if (stat & 0x80000000) {
|
||||
nvkm_wr32(device, 0x002100, 0x80000000);
|
||||
gk104_fifo_intr_engine(fifo);
|
||||
gk104_fifo_intr_engine(gk104_fifo(fifo));
|
||||
stat &= ~0x80000000;
|
||||
}
|
||||
|
||||
@ -969,16 +969,15 @@ gk104_fifo_intr(struct nvkm_fifo *base)
|
||||
nvkm_mask(device, 0x002140, stat, 0x00000000);
|
||||
nvkm_wr32(device, 0x002100, stat);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_fini(struct nvkm_fifo *base)
|
||||
{
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
flush_work(&fifo->recover.work);
|
||||
/* allow mmu fault interrupts, even when we're not using fifo */
|
||||
nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -66,9 +66,6 @@ void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
|
||||
void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
|
||||
void gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
|
||||
struct gk104_fifo_engine_status *status);
|
||||
void gk104_fifo_intr_bind(struct gk104_fifo *fifo);
|
||||
void gk104_fifo_intr_chsw(struct gk104_fifo *fifo);
|
||||
void gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo);
|
||||
void gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit);
|
||||
void gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit);
|
||||
void gk104_fifo_intr_runlist(struct gk104_fifo *fifo);
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
@ -236,9 +237,9 @@ nv04_fifo_swmthd(struct nvkm_device *device, u32 chid, u32 addr, u32 data)
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
|
||||
nv04_fifo_intr_cache_error(struct nvkm_fifo *fifo, u32 chid, u32 get)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_fifo_chan *chan;
|
||||
unsigned long flags;
|
||||
@ -263,12 +264,12 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
|
||||
|
||||
if (!(pull0 & 0x00000100) ||
|
||||
!nv04_fifo_swmthd(device, chid, mthd, data)) {
|
||||
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
|
||||
chan = nvkm_fifo_chan_chid(fifo, chid, &flags);
|
||||
nvkm_error(subdev, "CACHE_ERROR - "
|
||||
"ch %d [%s] subc %d mthd %04x data %08x\n",
|
||||
chid, chan ? chan->object.client->name : "unknown",
|
||||
(mthd >> 13) & 7, mthd & 0x1ffc, data);
|
||||
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
|
||||
nvkm_fifo_chan_put(fifo, flags, &chan);
|
||||
}
|
||||
|
||||
nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
|
||||
@ -287,9 +288,9 @@ nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
|
||||
nv04_fifo_intr_dma_pusher(struct nvkm_fifo *fifo, u32 chid)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 dma_get = nvkm_rd32(device, 0x003244);
|
||||
u32 dma_put = nvkm_rd32(device, 0x003240);
|
||||
@ -299,7 +300,7 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
|
||||
unsigned long flags;
|
||||
const char *name;
|
||||
|
||||
chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
|
||||
chan = nvkm_fifo_chan_chid(fifo, chid, &flags);
|
||||
name = chan ? chan->object.client->name : "unknown";
|
||||
if (device->card_type == NV_50) {
|
||||
u32 ho_get = nvkm_rd32(device, 0x003328);
|
||||
@ -331,18 +332,18 @@ nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
|
||||
if (dma_get != dma_put)
|
||||
nvkm_wr32(device, 0x003244, dma_put);
|
||||
}
|
||||
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
|
||||
nvkm_fifo_chan_put(fifo, flags, &chan);
|
||||
|
||||
nvkm_wr32(device, 0x003228, 0x00000000);
|
||||
nvkm_wr32(device, 0x003220, 0x00000001);
|
||||
nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
|
||||
}
|
||||
|
||||
void
|
||||
nv04_fifo_intr(struct nvkm_fifo *base)
|
||||
irqreturn_t
|
||||
nv04_fifo_intr(struct nvkm_inth *inth)
|
||||
{
|
||||
struct nv04_fifo *fifo = nv04_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
|
||||
u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
|
||||
@ -351,16 +352,16 @@ nv04_fifo_intr(struct nvkm_fifo *base)
|
||||
reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
|
||||
|
||||
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.chid->mask;
|
||||
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->chid->mask;
|
||||
get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
|
||||
|
||||
if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
|
||||
nv04_fifo_cache_error(fifo, chid, get);
|
||||
nv04_fifo_intr_cache_error(fifo, chid, get);
|
||||
stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
|
||||
}
|
||||
|
||||
if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
|
||||
nv04_fifo_dma_pusher(fifo, chid);
|
||||
nv04_fifo_intr_dma_pusher(fifo, chid);
|
||||
stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
|
||||
}
|
||||
|
||||
@ -383,7 +384,7 @@ nv04_fifo_intr(struct nvkm_fifo *base)
|
||||
|
||||
if (stat & 0x40000000) {
|
||||
nvkm_wr32(device, 0x002100, 0x40000000);
|
||||
nvkm_fifo_uevent(&fifo->base);
|
||||
nvkm_fifo_uevent(fifo);
|
||||
stat &= ~0x40000000;
|
||||
}
|
||||
}
|
||||
@ -395,6 +396,7 @@ nv04_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -28,7 +28,7 @@ struct nvkm_fifo_func {
|
||||
void (*init)(struct nvkm_fifo *);
|
||||
void (*fini)(struct nvkm_fifo *);
|
||||
|
||||
void (*intr)(struct nvkm_fifo *);
|
||||
irqreturn_t (*intr)(struct nvkm_inth *);
|
||||
void (*intr_mmu_fault_unit)(struct nvkm_fifo *, int unit);
|
||||
|
||||
const struct nvkm_fifo_func_mmu_fault {
|
||||
@ -95,7 +95,7 @@ int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvk
|
||||
int nv04_fifo_chid_ctor(struct nvkm_fifo *, int);
|
||||
int nv04_fifo_runl_ctor(struct nvkm_fifo *);
|
||||
void nv04_fifo_init(struct nvkm_fifo *);
|
||||
void nv04_fifo_intr(struct nvkm_fifo *);
|
||||
irqreturn_t nv04_fifo_intr(struct nvkm_inth *);
|
||||
int nv04_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
|
||||
void nv04_fifo_pause(struct nvkm_fifo *, unsigned long *);
|
||||
void nv04_fifo_start(struct nvkm_fifo *, unsigned long *);
|
||||
@ -120,7 +120,9 @@ extern const struct nvkm_engn_func gf100_engn_sw;
|
||||
|
||||
int gk104_fifo_chid_nr(struct nvkm_fifo *);
|
||||
int gk104_fifo_runl_ctor(struct nvkm_fifo *);
|
||||
void gk104_fifo_intr(struct nvkm_fifo *);
|
||||
irqreturn_t gk104_fifo_intr(struct nvkm_inth *);
|
||||
void gk104_fifo_intr_chsw(struct nvkm_fifo *);
|
||||
void gk104_fifo_intr_bind(struct nvkm_fifo *);
|
||||
extern const struct nvkm_fifo_func_mmu_fault gk104_fifo_mmu_fault;
|
||||
void gk104_fifo_fault(struct nvkm_fifo *, struct nvkm_fault_data *);
|
||||
void gk104_fifo_recover_chan(struct nvkm_fifo *, int);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <core/memory.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/fault.h>
|
||||
#include <subdev/mc.h>
|
||||
#include <subdev/top.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
@ -367,21 +368,20 @@ tu102_fifo_intr_ctxsw_timeout(struct gk104_fifo *fifo)
|
||||
}
|
||||
|
||||
static void
|
||||
tu102_fifo_intr_sched(struct gk104_fifo *fifo)
|
||||
tu102_fifo_intr_sched(struct nvkm_fifo *fifo)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 intr = nvkm_rd32(device, 0x00254c);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
u32 intr = nvkm_rd32(subdev->device, 0x00254c);
|
||||
u32 code = intr & 0x000000ff;
|
||||
|
||||
nvkm_error(subdev, "SCHED_ERROR %02x\n", code);
|
||||
}
|
||||
|
||||
static void
|
||||
tu102_fifo_intr(struct nvkm_fifo *base)
|
||||
static irqreturn_t
|
||||
tu102_fifo_intr(struct nvkm_inth *inth)
|
||||
{
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_fifo *fifo = container_of(inth, typeof(*fifo), engine.subdev.inth);
|
||||
struct nvkm_subdev *subdev = &fifo->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 mask = nvkm_rd32(device, 0x002140);
|
||||
u32 stat = nvkm_rd32(device, 0x002100) & mask;
|
||||
@ -393,7 +393,7 @@ tu102_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x00000002) {
|
||||
tu102_fifo_intr_ctxsw_timeout(fifo);
|
||||
tu102_fifo_intr_ctxsw_timeout(gk104_fifo(fifo));
|
||||
stat &= ~0x00000002;
|
||||
}
|
||||
|
||||
@ -415,8 +415,8 @@ tu102_fifo_intr(struct nvkm_fifo *base)
|
||||
while (mask) {
|
||||
u32 unit = __ffs(mask);
|
||||
|
||||
gk104_fifo_intr_pbdma_0(fifo, unit);
|
||||
gk104_fifo_intr_pbdma_1(fifo, unit);
|
||||
gk104_fifo_intr_pbdma_0(gk104_fifo(fifo), unit);
|
||||
gk104_fifo_intr_pbdma_1(gk104_fifo(fifo), unit);
|
||||
nvkm_wr32(device, 0x0025a0, (1 << unit));
|
||||
mask &= ~(1 << unit);
|
||||
}
|
||||
@ -424,13 +424,13 @@ tu102_fifo_intr(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
if (stat & 0x40000000) {
|
||||
gk104_fifo_intr_runlist(fifo);
|
||||
gk104_fifo_intr_runlist(gk104_fifo(fifo));
|
||||
stat &= ~0x40000000;
|
||||
}
|
||||
|
||||
if (stat & 0x80000000) {
|
||||
nvkm_wr32(device, 0x002100, 0x80000000);
|
||||
gk104_fifo_intr_engine(fifo);
|
||||
gk104_fifo_intr_engine(gk104_fifo(fifo));
|
||||
stat &= ~0x80000000;
|
||||
}
|
||||
|
||||
@ -439,6 +439,8 @@ tu102_fifo_intr(struct nvkm_fifo *base)
|
||||
nvkm_mask(device, 0x002140, stat, 0x00000000);
|
||||
nvkm_wr32(device, 0x002100, stat);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static const struct nvkm_fifo_func
|
||||
|
@ -41,7 +41,7 @@ g84_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_BSP , 0, 0, 0x00008000, true },
|
||||
{ NVKM_ENGINE_CIPHER, 0, 0, 0x00004000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
|
||||
{ NVKM_SUBDEV_FB , 0, 0, 0x0002d101, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
|
@ -41,7 +41,7 @@ g98_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
|
||||
{ NVKM_ENGINE_SEC , 0, 0, 0x00004000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
|
||||
{ NVKM_SUBDEV_FB , 0, 0, 0x0002d101, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
|
@ -42,7 +42,7 @@ gf100_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_MSPDEC , 0, 0, 0x00020000, true },
|
||||
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_CE , 1, 0, 0x00000040, true },
|
||||
{ NVKM_ENGINE_CE , 0, 0, 0x00000020, true },
|
||||
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
|
||||
|
@ -33,7 +33,7 @@ gk104_mc_reset[] = {
|
||||
const struct nvkm_intr_data
|
||||
gk104_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_SUBDEV_PRIVRING, 0, 0, 0x40000000, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
{ NVKM_SUBDEV_FB , 0, 0, 0x08002000, true },
|
||||
|
@ -26,7 +26,7 @@
|
||||
const struct nvkm_intr_data
|
||||
gp100_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_SUBDEV_FAULT , 0, 0, 0x00000200, true },
|
||||
{ NVKM_SUBDEV_PRIVRING, 0, 0, 0x40000000, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
|
@ -41,7 +41,7 @@ gt215_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_MSPDEC, 0, 0, 0x00020000, true },
|
||||
{ NVKM_ENGINE_MSVLD , 0, 0, 0x00008000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_MSPPP , 0, 0, 0x00000001, true },
|
||||
{ NVKM_SUBDEV_FB , 0, 0, 0x00429101, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
|
@ -62,7 +62,7 @@ static const struct nvkm_intr_data
|
||||
nv04_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x01010000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
|
||||
{}
|
||||
|
@ -27,7 +27,7 @@ static const struct nvkm_intr_data
|
||||
nv11_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x03010000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
|
||||
{}
|
||||
|
@ -35,7 +35,7 @@ const struct nvkm_intr_data
|
||||
nv17_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x03010000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
{ NVKM_SUBDEV_TIMER, 0, 0, 0x00100000, true },
|
||||
|
@ -27,7 +27,7 @@ static const struct nvkm_intr_data
|
||||
nv50_mc_intrs[] = {
|
||||
{ NVKM_ENGINE_DISP , 0, 0, 0x04000000, true },
|
||||
{ NVKM_ENGINE_GR , 0, 0, 0x00001000, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100, true },
|
||||
{ NVKM_ENGINE_FIFO , 0, 0, 0x00000100 },
|
||||
{ NVKM_ENGINE_MPEG , 0, 0, 0x00000001, true },
|
||||
{ NVKM_SUBDEV_FB , 0, 0, 0x00001101, true },
|
||||
{ NVKM_SUBDEV_BUS , 0, 0, 0x10000000, true },
|
||||
|
Loading…
x
Reference in New Issue
Block a user