drm/nouveau/fifo: tidy global PBDMA init
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
d67f3b9646
commit
965c41d911
@ -184,6 +184,15 @@ static int
|
||||
nvkm_fifo_init(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_fifo *fifo = nvkm_fifo(engine);
|
||||
struct nvkm_runq *runq;
|
||||
u32 mask = 0;
|
||||
|
||||
if (fifo->func->init_pbdmas) {
|
||||
nvkm_runq_foreach(runq, fifo)
|
||||
mask |= BIT(runq->id);
|
||||
|
||||
fifo->func->init_pbdmas(fifo, mask);
|
||||
}
|
||||
|
||||
fifo->func->init(fifo);
|
||||
|
||||
|
@ -627,18 +627,16 @@ gf100_fifo_fini(struct nvkm_fifo *base)
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_init(struct nvkm_fifo *base)
|
||||
gf100_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
|
||||
{
|
||||
struct gf100_fifo *fifo = gf100_fifo(base);
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
int i;
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
|
||||
/* Enable PBDMAs. */
|
||||
nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
|
||||
nvkm_wr32(device, 0x002204, (1 << fifo->pbdma_nr) - 1);
|
||||
nvkm_wr32(device, 0x000204, mask);
|
||||
nvkm_wr32(device, 0x002204, mask);
|
||||
|
||||
/* Assign engines to PBDMAs. */
|
||||
if (fifo->pbdma_nr >= 3) {
|
||||
if ((mask & 7) == 7) {
|
||||
nvkm_wr32(device, 0x002208, ~(1 << 0)); /* PGRAPH */
|
||||
nvkm_wr32(device, 0x00220c, ~(1 << 1)); /* PVP */
|
||||
nvkm_wr32(device, 0x002210, ~(1 << 1)); /* PMSPP */
|
||||
@ -647,6 +645,15 @@ gf100_fifo_init(struct nvkm_fifo *base)
|
||||
nvkm_wr32(device, 0x00221c, ~(1 << 1)); /* PCE1 */
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_fifo_init(struct nvkm_fifo *base)
|
||||
{
|
||||
struct gf100_fifo *fifo = gf100_fifo(base);
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
int i;
|
||||
|
||||
/* PBDMA[n] */
|
||||
for (i = 0; i < fifo->pbdma_nr; i++) {
|
||||
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
|
||||
@ -756,6 +763,7 @@ gf100_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gf100_fifo_runl_ctor,
|
||||
.init = gf100_fifo_init,
|
||||
.init_pbdmas = gf100_fifo_init_pbdmas,
|
||||
.fini = gf100_fifo_fini,
|
||||
.intr = gf100_fifo_intr,
|
||||
.mmu_fault = &gf100_fifo_mmu_fault,
|
||||
|
@ -278,16 +278,8 @@ static const struct nvkm_runl_func
|
||||
gk104_runl = {
|
||||
};
|
||||
|
||||
void
|
||||
gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
|
||||
{
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
|
||||
}
|
||||
|
||||
const struct gk104_fifo_pbdma_func
|
||||
gk104_fifo_pbdma = {
|
||||
.init = gk104_fifo_pbdma_init,
|
||||
};
|
||||
|
||||
int
|
||||
@ -962,6 +954,14 @@ gk104_fifo_fini(struct nvkm_fifo *base)
|
||||
flush_work(&fifo->recover.work);
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
|
||||
{
|
||||
struct nvkm_device *device = fifo->engine.subdev.device;
|
||||
|
||||
nvkm_wr32(device, 0x000204, mask);
|
||||
}
|
||||
|
||||
void
|
||||
gk104_fifo_init(struct nvkm_fifo *base)
|
||||
{
|
||||
@ -969,9 +969,6 @@ gk104_fifo_init(struct nvkm_fifo *base)
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
int i;
|
||||
|
||||
/* Enable PBDMAs. */
|
||||
fifo->func->pbdma->init(fifo);
|
||||
|
||||
/* PBDMA[n] */
|
||||
for (i = 0; i < fifo->pbdma_nr; i++) {
|
||||
nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
|
||||
@ -1147,6 +1144,7 @@ gk104_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
|
||||
|
@ -75,7 +75,6 @@ void gk104_fifo_init(struct nvkm_fifo *base);
|
||||
void gk104_fifo_fini(struct nvkm_fifo *base);
|
||||
|
||||
extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
|
||||
void gk104_fifo_pbdma_init(struct gk104_fifo *);
|
||||
extern const struct nvkm_enum gk104_fifo_fault_access[];
|
||||
extern const struct nvkm_enum gk104_fifo_fault_engine[];
|
||||
extern const struct nvkm_enum gk104_fifo_fault_reason[];
|
||||
|
@ -83,6 +83,7 @@ gk110_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
|
||||
|
@ -39,7 +39,6 @@ gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
|
||||
|
||||
const struct gk104_fifo_pbdma_func
|
||||
gk208_fifo_pbdma = {
|
||||
.init = gk104_fifo_pbdma_init,
|
||||
.init_timeout = gk208_fifo_pbdma_init_timeout,
|
||||
};
|
||||
|
||||
@ -62,6 +61,7 @@ gk208_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
|
||||
|
@ -33,6 +33,7 @@ gk20a_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gf100_fifo_intr_mmu_fault_unit,
|
||||
|
@ -123,6 +123,7 @@ gm107_fifo = {
|
||||
.runq_nr = gf100_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
|
||||
|
@ -34,7 +34,6 @@ gm200_fifo_runq_nr(struct nvkm_fifo *fifo)
|
||||
|
||||
const struct gk104_fifo_pbdma_func
|
||||
gm200_fifo_pbdma = {
|
||||
.init = gk104_fifo_pbdma_init,
|
||||
.init_timeout = gk208_fifo_pbdma_init_timeout,
|
||||
};
|
||||
|
||||
@ -53,6 +52,7 @@ gm200_fifo = {
|
||||
.runq_nr = gm200_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gm107_fifo_intr_mmu_fault_unit,
|
||||
|
@ -95,6 +95,7 @@ gp100_fifo = {
|
||||
.runq_nr = gm200_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.intr_mmu_fault_unit = gp100_fifo_intr_mmu_fault_unit,
|
||||
|
@ -323,6 +323,7 @@ gv100_fifo = {
|
||||
.runq_nr = gm200_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = gk104_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = gk104_fifo_intr,
|
||||
.mmu_fault = &gv100_fifo_mmu_fault,
|
||||
|
@ -25,6 +25,8 @@ struct nvkm_fifo_func {
|
||||
int (*runl_ctor)(struct nvkm_fifo *);
|
||||
|
||||
void (*init)(struct nvkm_fifo *);
|
||||
void (*init_pbdmas)(struct nvkm_fifo *, u32 mask);
|
||||
|
||||
void (*fini)(struct nvkm_fifo *);
|
||||
|
||||
irqreturn_t (*intr)(struct nvkm_inth *);
|
||||
@ -58,7 +60,6 @@ struct nvkm_fifo_func {
|
||||
} *runlist;
|
||||
|
||||
const struct gk104_fifo_pbdma_func {
|
||||
void (*init)(struct gk104_fifo *);
|
||||
void (*init_timeout)(struct gk104_fifo *);
|
||||
} *pbdma;
|
||||
|
||||
@ -121,6 +122,7 @@ extern const struct nvkm_engn_func gf100_engn_sw;
|
||||
|
||||
int gk104_fifo_chid_nr(struct nvkm_fifo *);
|
||||
int gk104_fifo_runl_ctor(struct nvkm_fifo *);
|
||||
void gk104_fifo_init_pbdmas(struct nvkm_fifo *, u32);
|
||||
irqreturn_t gk104_fifo_intr(struct nvkm_inth *);
|
||||
void gk104_fifo_intr_chsw(struct nvkm_fifo *);
|
||||
void gk104_fifo_intr_bind(struct nvkm_fifo *);
|
||||
|
@ -15,6 +15,7 @@ struct nvkm_runq {
|
||||
struct nvkm_runq *nvkm_runq_new(struct nvkm_fifo *, int pbid);
|
||||
void nvkm_runq_del(struct nvkm_runq *);
|
||||
|
||||
#define nvkm_runq_foreach(runq,fifo) list_for_each_entry((runq), &(fifo)->runqs, head)
|
||||
#define nvkm_runq_foreach_cond(runq,fifo,cond) nvkm_list_foreach(runq, &(fifo)->runqs, head, (cond))
|
||||
|
||||
#define RUNQ_PRINT(r,l,p,f,a...) \
|
||||
|
@ -94,18 +94,8 @@ tu102_fifo_fault_engine[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static void
|
||||
tu102_fifo_pbdma_init(struct gk104_fifo *fifo)
|
||||
{
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
const u32 mask = (1 << fifo->pbdma_nr) - 1;
|
||||
/*XXX: this is a bit of a guess at this point in time. */
|
||||
nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
|
||||
}
|
||||
|
||||
static const struct gk104_fifo_pbdma_func
|
||||
tu102_fifo_pbdma = {
|
||||
.init = tu102_fifo_pbdma_init,
|
||||
.init_timeout = gk208_fifo_pbdma_init_timeout,
|
||||
};
|
||||
|
||||
@ -445,6 +435,13 @@ tu102_fifo_intr(struct nvkm_inth *inth)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
tu102_fifo_init_pbdmas(struct nvkm_fifo *fifo, u32 mask)
|
||||
{
|
||||
/* Not directly related to PBDMAs, but, enables doorbell to function. */
|
||||
nvkm_mask(fifo->engine.subdev.device, 0xb65000, 0x80000000, 0x80000000);
|
||||
}
|
||||
|
||||
static const struct nvkm_fifo_func
|
||||
tu102_fifo = {
|
||||
.dtor = gk104_fifo_dtor,
|
||||
@ -454,6 +451,7 @@ tu102_fifo = {
|
||||
.runq_nr = gm200_fifo_runq_nr,
|
||||
.runl_ctor = gk104_fifo_runl_ctor,
|
||||
.init = gk104_fifo_init,
|
||||
.init_pbdmas = tu102_fifo_init_pbdmas,
|
||||
.fini = gk104_fifo_fini,
|
||||
.intr = tu102_fifo_intr,
|
||||
.mmu_fault = &tu102_fifo_mmu_fault,
|
||||
|
Loading…
x
Reference in New Issue
Block a user