drm/nouveau/gr: switch to new-style timer macros
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
af3082b3c6
commit
c4584adc37
@ -1312,7 +1312,10 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|||||||
|
|
||||||
nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8);
|
nvkm_wr32(device, 0x100cb8, (chan->addr + 0x1000) >> 8);
|
||||||
nvkm_wr32(device, 0x100cbc, 0x80000001);
|
nvkm_wr32(device, 0x100cbc, 0x80000001);
|
||||||
nv_wait(gr, 0x100c80, 0x00008000, 0x00008000);
|
nvkm_msec(device, 2000,
|
||||||
|
if (nvkm_rd32(device, 0x100c80) & 0x00008000)
|
||||||
|
break;
|
||||||
|
);
|
||||||
|
|
||||||
/* setup default state for mmio list construction */
|
/* setup default state for mmio list construction */
|
||||||
info.gr = gr;
|
info.gr = gr;
|
||||||
@ -1326,8 +1329,10 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|||||||
nvkm_wr32(device, 0x409840, 0x00000030);
|
nvkm_wr32(device, 0x409840, 0x00000030);
|
||||||
nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
|
nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000003);
|
nvkm_wr32(device, 0x409504, 0x00000003);
|
||||||
if (!nv_wait(gr, 0x409800, 0x00000010, 0x00000010))
|
nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "load_ctx timeout\n");
|
if (nvkm_rd32(device, 0x409800) & 0x00000010)
|
||||||
|
break;
|
||||||
|
);
|
||||||
|
|
||||||
nv_wo32(chan, 0x8001c, 1);
|
nv_wo32(chan, 0x8001c, 1);
|
||||||
nv_wo32(chan, 0x80020, 0);
|
nv_wo32(chan, 0x80020, 0);
|
||||||
@ -1338,8 +1343,10 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|||||||
nvkm_wr32(device, 0x409840, 0x80000000);
|
nvkm_wr32(device, 0x409840, 0x80000000);
|
||||||
nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
|
nvkm_wr32(device, 0x409500, 0x80000000 | chan->addr >> 12);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000001);
|
nvkm_wr32(device, 0x409504, 0x00000001);
|
||||||
if (!nv_wait(gr, 0x409800, 0x80000000, 0x80000000))
|
nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "HUB_SET_CHAN timeout\n");
|
if (nvkm_rd32(device, 0x409800) & 0x80000000)
|
||||||
|
break;
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
oclass->main(gr, &info);
|
oclass->main(gr, &info);
|
||||||
@ -1349,8 +1356,10 @@ gf100_grctx_generate(struct gf100_gr *gr)
|
|||||||
*/
|
*/
|
||||||
nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
|
nvkm_mask(device, 0x409b04, 0x80000000, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409000, 0x00000100);
|
nvkm_wr32(device, 0x409000, 0x00000100);
|
||||||
if (!nv_wait(gr, 0x409b00, 0x80000000, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "grctx template channel unload timeout\n");
|
if (!(nvkm_rd32(device, 0x409b00) & 0x80000000))
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -741,7 +741,10 @@ gf100_gr_icmd(struct gf100_gr *gr, const struct gf100_gr_pack *p)
|
|||||||
*/
|
*/
|
||||||
if ((addr & 0xffff) == 0xe100)
|
if ((addr & 0xffff) == 0xe100)
|
||||||
gf100_gr_wait_idle(gr);
|
gf100_gr_wait_idle(gr);
|
||||||
nv_wait(gr, 0x400700, 0x00000004, 0x00000000);
|
nvkm_msec(device, 2000,
|
||||||
|
if (!(nvkm_rd32(device, 0x400700) & 0x00000004))
|
||||||
|
break;
|
||||||
|
);
|
||||||
addr += init->pitch;
|
addr += init->pitch;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1312,8 +1315,11 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
|||||||
nvkm_wr32(device, 0x40910c, 0x00000000);
|
nvkm_wr32(device, 0x40910c, 0x00000000);
|
||||||
nvkm_wr32(device, 0x41a100, 0x00000002);
|
nvkm_wr32(device, 0x41a100, 0x00000002);
|
||||||
nvkm_wr32(device, 0x409100, 0x00000002);
|
nvkm_wr32(device, 0x409100, 0x00000002);
|
||||||
if (!nv_wait(gr, 0x409800, 0x00000001, 0x00000001))
|
if (nvkm_msec(device, 2000,
|
||||||
nv_warn(gr, "0x409800 wait failed\n");
|
if (nvkm_rd32(device, 0x409800) & 0x00000001)
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||||
nvkm_wr32(device, 0x409500, 0x7fffffff);
|
nvkm_wr32(device, 0x409500, 0x7fffffff);
|
||||||
@ -1322,54 +1328,59 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
|||||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000010);
|
nvkm_wr32(device, 0x409504, 0x00000010);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x10 timeout\n");
|
if ((gr->size = nvkm_rd32(device, 0x409800)))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
gr->size = nvkm_rd32(device, 0x409800);
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000016);
|
nvkm_wr32(device, 0x409504, 0x00000016);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x16 timeout\n");
|
if (nvkm_rd32(device, 0x409800))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000025);
|
nvkm_wr32(device, 0x409504, 0x00000025);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x25 timeout\n");
|
if (nvkm_rd32(device, 0x409800))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
if (nv_device(gr)->chipset >= 0xe0) {
|
if (nv_device(gr)->chipset >= 0xe0) {
|
||||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000030);
|
nvkm_wr32(device, 0x409504, 0x00000030);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x30 timeout\n");
|
if (nvkm_rd32(device, 0x409800))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409810, 0xb00095c8);
|
nvkm_wr32(device, 0x409810, 0xb00095c8);
|
||||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000031);
|
nvkm_wr32(device, 0x409504, 0x00000031);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x31 timeout\n");
|
if (nvkm_rd32(device, 0x409800))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409810, 0x00080420);
|
nvkm_wr32(device, 0x409810, 0x00080420);
|
||||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||||
nvkm_wr32(device, 0x409504, 0x00000032);
|
nvkm_wr32(device, 0x409504, 0x00000032);
|
||||||
if (!nv_wait_ne(gr, 0x409800, 0xffffffff, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "fuc09 req 0x32 timeout\n");
|
if (nvkm_rd32(device, 0x409800))
|
||||||
|
break;
|
||||||
|
) < 0)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x409614, 0x00000070);
|
nvkm_wr32(device, 0x409614, 0x00000070);
|
||||||
nvkm_wr32(device, 0x409614, 0x00000770);
|
nvkm_wr32(device, 0x409614, 0x00000770);
|
||||||
@ -1425,8 +1436,10 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
|||||||
/* start HUB ucode running, it'll init the GPCs */
|
/* start HUB ucode running, it'll init the GPCs */
|
||||||
nvkm_wr32(device, 0x40910c, 0x00000000);
|
nvkm_wr32(device, 0x40910c, 0x00000000);
|
||||||
nvkm_wr32(device, 0x409100, 0x00000002);
|
nvkm_wr32(device, 0x409100, 0x00000002);
|
||||||
if (!nv_wait(gr, 0x409800, 0x80000000, 0x80000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "HUB_INIT timed out\n");
|
if (nvkm_rd32(device, 0x409800) & 0x80000000)
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
gf100_gr_ctxctl_debug(gr);
|
gf100_gr_ctxctl_debug(gr);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
@ -220,12 +220,20 @@ gk20a_gr_dtor(struct nvkm_object *object)
|
|||||||
static int
|
static int
|
||||||
gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
|
gk20a_gr_wait_mem_scrubbing(struct gf100_gr *gr)
|
||||||
{
|
{
|
||||||
if (!nv_wait(gr, 0x40910c, 0x6, 0x0)) {
|
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||||
|
|
||||||
|
if (nvkm_msec(device, 2000,
|
||||||
|
if (!(nvkm_rd32(device, 0x40910c) & 0x00000006))
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
nv_error(gr, "FECS mem scrubbing timeout\n");
|
nv_error(gr, "FECS mem scrubbing timeout\n");
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!nv_wait(gr, 0x41a10c, 0x6, 0x0)) {
|
if (nvkm_msec(device, 2000,
|
||||||
|
if (!(nvkm_rd32(device, 0x41a10c) & 0x00000006))
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
nv_error(gr, "GPCCS mem scrubbing timeout\n");
|
nv_error(gr, "GPCCS mem scrubbing timeout\n");
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
}
|
}
|
||||||
|
@ -1206,7 +1206,10 @@ nv04_gr_idle(void *obj)
|
|||||||
if (nv_device(obj)->card_type == NV_40)
|
if (nv_device(obj)->card_type == NV_40)
|
||||||
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
|
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
|
||||||
|
|
||||||
if (!nv_wait(gr, NV04_PGRAPH_STATUS, mask, 0)) {
|
if (nvkm_msec(device, 2000,
|
||||||
|
if (!(nvkm_rd32(device, NV04_PGRAPH_STATUS) & mask))
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
nv_error(gr, "idle timed out with status 0x%08x\n",
|
nv_error(gr, "idle timed out with status 0x%08x\n",
|
||||||
nvkm_rd32(device, NV04_PGRAPH_STATUS));
|
nvkm_rd32(device, NV04_PGRAPH_STATUS));
|
||||||
return false;
|
return false;
|
||||||
|
@ -127,7 +127,10 @@ nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
|
|||||||
if (chan->chid == chid) {
|
if (chan->chid == chid) {
|
||||||
nvkm_wr32(device, 0x400784, nv_gpuobj(chan)->addr >> 4);
|
nvkm_wr32(device, 0x400784, nv_gpuobj(chan)->addr >> 4);
|
||||||
nvkm_wr32(device, 0x400788, 0x00000002);
|
nvkm_wr32(device, 0x400788, 0x00000002);
|
||||||
nv_wait(gr, 0x400700, 0xffffffff, 0x00000000);
|
nvkm_msec(device, 2000,
|
||||||
|
if (!nvkm_rd32(device, 0x400700))
|
||||||
|
break;
|
||||||
|
);
|
||||||
nvkm_wr32(device, 0x400144, 0x10000000);
|
nvkm_wr32(device, 0x400144, 0x10000000);
|
||||||
nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
|
nvkm_mask(device, 0x400148, 0xff000000, 0x1f000000);
|
||||||
}
|
}
|
||||||
@ -289,12 +292,18 @@ nv20_gr_init(struct nvkm_object *object)
|
|||||||
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
|
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x003d0000);
|
||||||
for (i = 0; i < 15; i++)
|
for (i = 0; i < 15; i++)
|
||||||
nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
|
nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
|
||||||
nv_wait(gr, 0x400700, 0xffffffff, 0x00000000);
|
nvkm_msec(device, 2000,
|
||||||
|
if (!nvkm_rd32(device, 0x400700))
|
||||||
|
break;
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
|
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x02c80000);
|
||||||
for (i = 0; i < 32; i++)
|
for (i = 0; i < 32; i++)
|
||||||
nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
|
nvkm_wr32(device, NV10_PGRAPH_RDI_DATA, 0x00000000);
|
||||||
nv_wait(gr, 0x400700, 0xffffffff, 0x00000000);
|
nvkm_msec(device, 2000,
|
||||||
|
if (!nvkm_rd32(device, 0x400700))
|
||||||
|
break;
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
nvkm_wr32(device, NV03_PGRAPH_INTR , 0xFFFFFFFF);
|
||||||
|
@ -167,7 +167,10 @@ nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
|
|||||||
nvkm_wr32(device, 0x400784, inst);
|
nvkm_wr32(device, 0x400784, inst);
|
||||||
nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
|
nvkm_mask(device, 0x400310, 0x00000020, 0x00000020);
|
||||||
nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
|
nvkm_mask(device, 0x400304, 0x00000001, 0x00000001);
|
||||||
if (!nv_wait(gr, 0x400300, 0x00000001, 0x00000000)) {
|
if (nvkm_msec(device, 2000,
|
||||||
|
if (!(nvkm_rd32(device, 0x400300) & 0x00000001))
|
||||||
|
break;
|
||||||
|
) < 0) {
|
||||||
u32 insn = nvkm_rd32(device, 0x400308);
|
u32 insn = nvkm_rd32(device, 0x400308);
|
||||||
nv_warn(gr, "ctxprog timeout 0x%08x\n", insn);
|
nv_warn(gr, "ctxprog timeout 0x%08x\n", insn);
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
@ -285,8 +285,10 @@ g84_gr_tlb_flush(struct nvkm_engine *engine)
|
|||||||
|
|
||||||
|
|
||||||
nvkm_wr32(device, 0x100c80, 0x00000001);
|
nvkm_wr32(device, 0x100c80, 0x00000001);
|
||||||
if (!nv_wait(gr, 0x100c80, 0x00000001, 0x00000000))
|
nvkm_msec(device, 2000,
|
||||||
nv_error(gr, "vm flush timeout\n");
|
if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
|
||||||
|
break;
|
||||||
|
);
|
||||||
nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
|
nvkm_mask(device, 0x400500, 0x00000001, 0x00000001);
|
||||||
spin_unlock_irqrestore(&gr->lock, flags);
|
spin_unlock_irqrestore(&gr->lock, flags);
|
||||||
return timeout ? -EBUSY : 0;
|
return timeout ? -EBUSY : 0;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user