drm/nouveau/fifo: add chan/cgrp preempt()
Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
parent
67059b9fb8
commit
acff941535
@ -33,17 +33,6 @@
|
||||
#include <nvif/cl0080.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
void
|
||||
nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
|
||||
{
|
||||
unsigned long flags;
|
||||
if (WARN_ON(!fifo->func->recover_chan))
|
||||
return;
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
fifo->func->recover_chan(fifo, chid);
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
|
||||
{
|
||||
|
@ -22,6 +22,7 @@ struct nvkm_ectx {
|
||||
|
||||
struct nvkm_cgrp {
|
||||
const struct nvkm_cgrp_func {
|
||||
void (*preempt)(struct nvkm_cgrp *);
|
||||
} *func;
|
||||
char name[64];
|
||||
struct nvkm_runl *runl;
|
||||
|
@ -222,6 +222,7 @@ nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct n
|
||||
nvkm_runl_block(runl);
|
||||
else
|
||||
nvkm_chan_block(chan);
|
||||
nvkm_chan_preempt(chan, true);
|
||||
|
||||
/* Update context pointer. */
|
||||
if (cctx)
|
||||
@ -300,6 +301,33 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
|
||||
{
|
||||
struct nvkm_runl *runl = chan->cgrp->runl;
|
||||
|
||||
CHAN_TRACE(chan, "preempt");
|
||||
chan->func->preempt(chan);
|
||||
if (!wait)
|
||||
return 0;
|
||||
|
||||
return nvkm_runl_preempt_wait(runl);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!chan->func->preempt)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&chan->cgrp->runl->mutex);
|
||||
ret = nvkm_chan_preempt_locked(chan, wait);
|
||||
mutex_unlock(&chan->cgrp->runl->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
|
||||
enum nvkm_object_map *type, u64 *addr, u64 *size)
|
||||
@ -346,6 +374,8 @@ nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
|
||||
if (atomic_inc_return(&chan->errored) == 1) {
|
||||
CHAN_ERROR(chan, "errored - disabling channel");
|
||||
nvkm_chan_block_locked(chan);
|
||||
if (preempt)
|
||||
chan->func->preempt(chan);
|
||||
nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
|
||||
}
|
||||
spin_unlock_irqrestore(&chan->lock, flags);
|
||||
|
@ -20,6 +20,7 @@ struct nvkm_chan_func {
|
||||
void (*unbind)(struct nvkm_chan *);
|
||||
void (*start)(struct nvkm_chan *);
|
||||
void (*stop)(struct nvkm_chan *);
|
||||
void (*preempt)(struct nvkm_chan *);
|
||||
u32 (*doorbell_handle)(struct nvkm_chan *);
|
||||
|
||||
void *(*dtor)(struct nvkm_fifo_chan *);
|
||||
@ -43,6 +44,8 @@ void nvkm_chan_del(struct nvkm_chan **);
|
||||
void nvkm_chan_allow(struct nvkm_chan *);
|
||||
void nvkm_chan_block(struct nvkm_chan *);
|
||||
void nvkm_chan_error(struct nvkm_chan *, bool preempt);
|
||||
int nvkm_chan_preempt(struct nvkm_chan *, bool wait);
|
||||
int nvkm_chan_preempt_locked(struct nvkm_chan *, bool wait);
|
||||
int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
|
||||
struct nvkm_client * /*TODO: remove need for this */);
|
||||
void nvkm_chan_cctx_put(struct nvkm_chan *, struct nvkm_cctx **);
|
||||
|
@ -33,8 +33,6 @@ int gk104_fifo_gpfifo_engine_ctor(struct nvkm_fifo_chan *, struct nvkm_engine *,
|
||||
struct nvkm_object *);
|
||||
void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
|
||||
struct nvkm_engine *);
|
||||
int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *);
|
||||
int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
|
||||
|
||||
int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
|
||||
void *data, u32 size, struct nvkm_object **);
|
||||
|
@ -39,6 +39,12 @@
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
void
|
||||
gf100_chan_preempt(struct nvkm_chan *chan)
|
||||
{
|
||||
nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_chan_stop(struct nvkm_chan *chan)
|
||||
{
|
||||
@ -83,6 +89,7 @@ gf100_chan = {
|
||||
.unbind = gf100_chan_unbind,
|
||||
.start = gf100_chan_start,
|
||||
.stop = gf100_chan_stop,
|
||||
.preempt = gf100_chan_preempt,
|
||||
};
|
||||
|
||||
static const struct nvkm_engn_func
|
||||
@ -158,6 +165,12 @@ gf100_runq = {
|
||||
.intr_0_names = gf100_runq_intr_0_names,
|
||||
};
|
||||
|
||||
bool
|
||||
gf100_runl_preempt_pending(struct nvkm_runl *runl)
|
||||
{
|
||||
return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000;
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
|
||||
{
|
||||
@ -238,6 +251,7 @@ gf100_runl = {
|
||||
.pending = gf100_runl_pending,
|
||||
.block = gf100_runl_block,
|
||||
.allow = gf100_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
static void
|
||||
|
@ -86,6 +86,7 @@ gk104_chan = {
|
||||
.unbind = gk104_chan_unbind,
|
||||
.start = gk104_chan_start,
|
||||
.stop = gk104_chan_stop,
|
||||
.preempt = gf100_chan_preempt,
|
||||
};
|
||||
|
||||
void
|
||||
@ -366,6 +367,7 @@ gk104_runl = {
|
||||
.pending = gk104_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -30,19 +30,41 @@
|
||||
#include "changk104.h"
|
||||
|
||||
#include <core/memory.h>
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
void
|
||||
gk110_chan_preempt(struct nvkm_chan *chan)
|
||||
{
|
||||
struct nvkm_cgrp *cgrp = chan->cgrp;
|
||||
|
||||
if (cgrp->hw) {
|
||||
cgrp->func->preempt(cgrp);
|
||||
return;
|
||||
}
|
||||
|
||||
gf100_chan_preempt(chan);
|
||||
}
|
||||
|
||||
const struct nvkm_chan_func
|
||||
gk110_chan = {
|
||||
.bind = gk104_chan_bind,
|
||||
.unbind = gk104_chan_unbind,
|
||||
.start = gk104_chan_start,
|
||||
.stop = gk104_chan_stop,
|
||||
.preempt = gk110_chan_preempt,
|
||||
};
|
||||
|
||||
static void
|
||||
gk110_cgrp_preempt(struct nvkm_cgrp *cgrp)
|
||||
{
|
||||
nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id);
|
||||
}
|
||||
|
||||
const struct nvkm_cgrp_func
|
||||
gk110_cgrp = {
|
||||
.preempt = gk110_cgrp_preempt,
|
||||
};
|
||||
|
||||
void
|
||||
@ -68,6 +90,7 @@ gk110_runl = {
|
||||
.pending = gk104_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -38,6 +38,7 @@ gm107_chan = {
|
||||
.unbind = gk104_chan_unbind,
|
||||
.start = gk104_chan_start,
|
||||
.stop = gk104_chan_stop,
|
||||
.preempt = gk110_chan_preempt,
|
||||
};
|
||||
|
||||
static void
|
||||
@ -62,6 +63,7 @@ gm107_runl = {
|
||||
.pending = gk104_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
static const struct nvkm_enum
|
||||
|
@ -35,6 +35,7 @@ gp100_runl = {
|
||||
.pending = gk104_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
static const struct nvkm_enum
|
||||
|
@ -62,26 +62,9 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
|
||||
{
|
||||
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
|
||||
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
|
||||
struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_gpuobj *inst = chan->base.inst;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&chan->fifo->base.mutex);
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, chan->base.object.client->name);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
mutex_unlock(&chan->fifo->base.mutex);
|
||||
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (offset) {
|
||||
nvkm_kmap(inst);
|
||||
nvkm_wo32(inst, offset + 0x00, 0x00000000);
|
||||
|
@ -33,43 +33,6 @@
|
||||
#include <nvif/cla06f.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
int
|
||||
gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
|
||||
{
|
||||
struct gk104_fifo *fifo = chan->fifo;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_client *client = chan->base.object.client;
|
||||
struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
|
||||
int ret = 0;
|
||||
|
||||
if (cgrp)
|
||||
nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
|
||||
else
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
|
||||
break;
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "%s %d [%s] kick timeout\n",
|
||||
cgrp ? "tsg" : "channel",
|
||||
cgrp ? cgrp->id : chan->base.chid, client->name);
|
||||
nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
|
||||
{
|
||||
int ret;
|
||||
mutex_lock(&chan->base.fifo->mutex);
|
||||
ret = gk104_fifo_gpfifo_kick_locked(chan);
|
||||
mutex_unlock(&chan->base.fifo->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32
|
||||
gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
|
||||
{
|
||||
@ -110,11 +73,6 @@ gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
|
||||
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
|
||||
struct nvkm_gpuobj *inst = chan->base.inst;
|
||||
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
|
||||
int ret;
|
||||
|
||||
ret = gk104_fifo_gpfifo_kick(chan);
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (offset) {
|
||||
nvkm_kmap(inst);
|
||||
@ -127,7 +85,7 @@ gk104_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
|
||||
nvkm_done(inst);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -202,7 +160,6 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
|
||||
|
||||
if (!list_empty(&chan->head)) {
|
||||
gk104_fifo_runlist_remove(fifo, chan);
|
||||
gk104_fifo_gpfifo_kick(chan);
|
||||
gk104_fifo_runlist_update(fifo, chan->runl);
|
||||
}
|
||||
}
|
||||
|
@ -33,23 +33,15 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
|
||||
{
|
||||
const u32 mask = ce ? 0x00020000 : 0x00010000;
|
||||
const u32 data = valid ? mask : 0x00000000;
|
||||
int ret;
|
||||
|
||||
/* Block runlist to prevent the channel from being rescheduled. */
|
||||
mutex_lock(&chan->fifo->base.mutex);
|
||||
|
||||
/* Preempt the channel. */
|
||||
ret = gk104_fifo_gpfifo_kick_locked(chan);
|
||||
if (ret == 0) {
|
||||
if (1) {
|
||||
/* Update engine context validity. */
|
||||
nvkm_kmap(chan->base.inst);
|
||||
nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
|
||||
nvkm_done(chan->base.inst);
|
||||
}
|
||||
|
||||
/* Resume runlist. */
|
||||
mutex_unlock(&chan->fifo->base.mutex);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -43,6 +43,7 @@ gv100_chan = {
|
||||
.unbind = gk104_chan_unbind,
|
||||
.start = gk104_chan_start,
|
||||
.stop = gk104_chan_stop,
|
||||
.preempt = gk110_chan_preempt,
|
||||
.doorbell_handle = gv100_chan_doorbell_handle,
|
||||
};
|
||||
|
||||
@ -99,6 +100,7 @@ gv100_runl = {
|
||||
.pending = gk104_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
const struct nvkm_enum
|
||||
|
@ -11,8 +11,6 @@ struct nvkm_runq;
|
||||
struct gk104_fifo;
|
||||
struct gk104_fifo_chan;
|
||||
|
||||
void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
|
||||
|
||||
struct nvkm_fifo_chan_oclass;
|
||||
struct nvkm_fifo_func {
|
||||
void *(*dtor)(struct nvkm_fifo *);
|
||||
@ -107,6 +105,7 @@ extern const struct nvkm_engn_func nv50_engn_sw;
|
||||
void nv50_chan_unbind(struct nvkm_chan *);
|
||||
void nv50_chan_start(struct nvkm_chan *);
|
||||
void nv50_chan_stop(struct nvkm_chan *);
|
||||
void nv50_chan_preempt(struct nvkm_chan *);
|
||||
|
||||
extern const struct nvkm_event_func g84_fifo_nonstall;
|
||||
extern const struct nvkm_engn_func g84_engn;
|
||||
@ -120,9 +119,11 @@ void gf100_fifo_intr_mmu_fault_unit(struct nvkm_fifo *, int);
|
||||
void gf100_fifo_mmu_fault_recover(struct nvkm_fifo *, struct nvkm_fault_data *);
|
||||
extern const struct nvkm_enum gf100_fifo_mmu_fault_access[];
|
||||
extern const struct nvkm_event_func gf100_fifo_nonstall;
|
||||
bool gf100_runl_preempt_pending(struct nvkm_runl *);
|
||||
void gf100_runq_init(struct nvkm_runq *);
|
||||
bool gf100_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
|
||||
extern const struct nvkm_engn_func gf100_engn_sw;
|
||||
void gf100_chan_preempt(struct nvkm_chan *);
|
||||
|
||||
int gk104_fifo_chid_nr(struct nvkm_fifo *);
|
||||
int gk104_fifo_runl_ctor(struct nvkm_fifo *);
|
||||
@ -156,6 +157,7 @@ int gk110_fifo_chid_ctor(struct nvkm_fifo *, int);
|
||||
extern const struct nvkm_runl_func gk110_runl;
|
||||
extern const struct nvkm_cgrp_func gk110_cgrp;
|
||||
extern const struct nvkm_chan_func gk110_chan;
|
||||
void gk110_chan_preempt(struct nvkm_chan *);
|
||||
|
||||
extern const struct nvkm_runq_func gk208_runq;
|
||||
void gk208_runq_init(struct nvkm_runq *);
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "priv.h"
|
||||
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/top.h>
|
||||
|
||||
struct nvkm_chan *
|
||||
@ -73,6 +74,17 @@ nvkm_runl_chan_get_chid(struct nvkm_runl *runl, int id, unsigned long *pirqflags
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_runl_preempt_wait(struct nvkm_runl *runl)
|
||||
{
|
||||
return nvkm_msec(runl->fifo->engine.subdev.device, runl->fifo->timeout.chan_msec,
|
||||
if (!runl->func->preempt_pending(runl))
|
||||
break;
|
||||
|
||||
usleep_range(1, 2);
|
||||
) < 0 ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
bool
|
||||
nvkm_runl_update_pending(struct nvkm_runl *runl)
|
||||
{
|
||||
|
@ -28,6 +28,7 @@ struct nvkm_runl {
|
||||
bool (*pending)(struct nvkm_runl *);
|
||||
void (*block)(struct nvkm_runl *, u32 engm);
|
||||
void (*allow)(struct nvkm_runl *, u32 engm);
|
||||
bool (*preempt_pending)(struct nvkm_runl *);
|
||||
} *func;
|
||||
struct nvkm_fifo *fifo;
|
||||
int id;
|
||||
@ -60,6 +61,7 @@ void nvkm_runl_del(struct nvkm_runl *);
|
||||
void nvkm_runl_block(struct nvkm_runl *);
|
||||
void nvkm_runl_allow(struct nvkm_runl *);
|
||||
bool nvkm_runl_update_pending(struct nvkm_runl *);
|
||||
int nvkm_runl_preempt_wait(struct nvkm_runl *);
|
||||
|
||||
struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
|
||||
struct nvkm_chan *nvkm_runl_chan_get_inst(struct nvkm_runl *, u64 inst, unsigned long *irqflags);
|
||||
|
@ -53,6 +53,7 @@ tu102_chan = {
|
||||
.unbind = gk104_chan_unbind,
|
||||
.start = tu102_chan_start,
|
||||
.stop = gk104_chan_stop,
|
||||
.preempt = gk110_chan_preempt,
|
||||
.doorbell_handle = tu102_chan_doorbell_handle,
|
||||
};
|
||||
|
||||
@ -91,6 +92,7 @@ tu102_runl = {
|
||||
.pending = tu102_runl_pending,
|
||||
.block = gk104_runl_block,
|
||||
.allow = gk104_runl_allow,
|
||||
.preempt_pending = gf100_runl_preempt_pending,
|
||||
};
|
||||
|
||||
static const struct nvkm_enum
|
||||
|
@ -216,6 +216,7 @@ nvkm_uchan_fini(struct nvkm_object *object, bool suspend)
|
||||
int ret;
|
||||
|
||||
nvkm_chan_block(chan);
|
||||
nvkm_chan_preempt(chan, true);
|
||||
|
||||
ret = chan->object.func->fini(&chan->object, suspend);
|
||||
if (ret && suspend)
|
||||
|
Loading…
x
Reference in New Issue
Block a user