drm/nouveau/gr/gf100-: use new interfaces for vmm operations
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
01f349fcad
commit
019e4d76c6
@ -986,14 +986,14 @@ gf100_grctx_pack_tpc[] = {
|
||||
******************************************************************************/
|
||||
|
||||
int
|
||||
gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, u32 access)
|
||||
gf100_grctx_mmio_data(struct gf100_grctx *info, u32 size, u32 align, bool priv)
|
||||
{
|
||||
if (info->data) {
|
||||
info->buffer[info->buffer_nr] = round_up(info->addr, align);
|
||||
info->addr = info->buffer[info->buffer_nr] + size;
|
||||
info->data->size = size;
|
||||
info->data->align = align;
|
||||
info->data->access = access;
|
||||
info->data->priv = priv;
|
||||
info->data++;
|
||||
return info->buffer_nr++;
|
||||
}
|
||||
@ -1028,9 +1028,8 @@ void
|
||||
gf100_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418808, 0x00000000, s, b);
|
||||
@ -1041,9 +1040,8 @@ void
|
||||
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
|
||||
mmio_refn(info, 0x40800c, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408010, 0x80000000);
|
||||
mmio_refn(info, 0x419004, 0x00000000, s, b);
|
||||
@ -1057,9 +1055,8 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
||||
const u32 attrib = grctx->attrib_nr;
|
||||
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
|
||||
int gpc, tpc;
|
||||
u32 bo = 0;
|
||||
|
||||
|
@ -11,7 +11,7 @@ struct gf100_grctx {
|
||||
u64 addr;
|
||||
};
|
||||
|
||||
int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, u32 access);
|
||||
int gf100_grctx_mmio_data(struct gf100_grctx *, u32 size, u32 align, bool priv);
|
||||
void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int);
|
||||
|
||||
#define mmio_vram(a,b,c,d) gf100_grctx_mmio_data((a), (b), (c), (d))
|
||||
|
@ -735,9 +735,8 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const u32 alpha = grctx->alpha_nr;
|
||||
const u32 beta = grctx->attrib_nr;
|
||||
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
|
||||
const int timeslice_mode = 1;
|
||||
const int max_batches = 0xffff;
|
||||
u32 bo = 0;
|
||||
|
@ -187,9 +187,8 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const u32 alpha = grctx->alpha_nr;
|
||||
const u32 beta = grctx->attrib_nr;
|
||||
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
|
||||
const int timeslice_mode = 1;
|
||||
const int max_batches = 0xffff;
|
||||
u32 bo = 0;
|
||||
|
@ -847,9 +847,8 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
|
||||
grctx->bundle_size / 0x20);
|
||||
const u32 token_limit = grctx->bundle_token_limit;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418808, 0x00000000, s, b);
|
||||
@ -861,9 +860,8 @@ void
|
||||
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
|
||||
mmio_refn(info, 0x40800c, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408010, 0x80000000);
|
||||
mmio_refn(info, 0x419004, 0x00000000, s, b);
|
||||
|
@ -867,9 +867,8 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info)
|
||||
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
|
||||
grctx->bundle_size / 0x20);
|
||||
const u32 token_limit = grctx->bundle_token_limit;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), true);
|
||||
mmio_refn(info, 0x408004, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
|
||||
mmio_refn(info, 0x418e24, 0x00000000, s, b);
|
||||
@ -881,9 +880,8 @@ void
|
||||
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
|
||||
mmio_refn(info, 0x40800c, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408010, 0x80000000);
|
||||
mmio_refn(info, 0x419004, 0x00000000, s, b);
|
||||
@ -900,9 +898,8 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const u32 alpha = grctx->alpha_nr;
|
||||
const u32 attrib = grctx->attrib_nr;
|
||||
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
|
||||
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), false);
|
||||
const int max_batches = 0xffff;
|
||||
u32 bo = 0;
|
||||
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
|
||||
|
@ -33,9 +33,8 @@ void
|
||||
gp100_grctx_generate_pagepool(struct gf100_grctx *info)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
|
||||
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
|
||||
const int s = 8;
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
|
||||
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), true);
|
||||
mmio_refn(info, 0x40800c, 0x00000000, s, b);
|
||||
mmio_wr32(info, 0x408010, 0x80000000);
|
||||
mmio_refn(info, 0x419004, 0x00000000, s, b);
|
||||
@ -51,9 +50,8 @@ gp100_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const u32 attrib = grctx->attrib_nr;
|
||||
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size, (1 << s), access);
|
||||
const int b = mmio_vram(info, size, (1 << s), false);
|
||||
const int max_batches = 0xffff;
|
||||
u32 ao = 0;
|
||||
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
|
||||
|
@ -38,9 +38,8 @@ gp102_grctx_generate_attrib(struct gf100_grctx *info)
|
||||
const u32 attrib = grctx->attrib_nr;
|
||||
const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
|
||||
const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
|
||||
const u32 access = NV_MEM_ACCESS_RW;
|
||||
const int s = 12;
|
||||
const int b = mmio_vram(info, size, (1 << s), access);
|
||||
const int b = mmio_vram(info, size, (1 << s), false);
|
||||
const int max_batches = 0xffff;
|
||||
u32 ao = 0;
|
||||
u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/cl9097.h>
|
||||
#include <nvif/if900d.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
/*******************************************************************************
|
||||
@ -327,13 +328,13 @@ gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
|
||||
|
||||
if (!gr->firmware) {
|
||||
nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
|
||||
nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
|
||||
nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma->addr >> 8);
|
||||
} else {
|
||||
nvkm_wo32(*pgpuobj, 0xf4, 0);
|
||||
nvkm_wo32(*pgpuobj, 0xf8, 0);
|
||||
nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
|
||||
nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
|
||||
nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
|
||||
nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma->addr));
|
||||
nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma->addr));
|
||||
nvkm_wo32(*pgpuobj, 0x1c, 1);
|
||||
nvkm_wo32(*pgpuobj, 0x20, 0);
|
||||
nvkm_wo32(*pgpuobj, 0x28, 0);
|
||||
@ -350,18 +351,13 @@ gf100_gr_chan_dtor(struct nvkm_object *object)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
|
||||
if (chan->data[i].vma.node) {
|
||||
nvkm_vm_unmap(&chan->data[i].vma);
|
||||
nvkm_vm_put(&chan->data[i].vma);
|
||||
}
|
||||
nvkm_vmm_put(chan->vmm, &chan->data[i].vma);
|
||||
nvkm_memory_unref(&chan->data[i].mem);
|
||||
}
|
||||
|
||||
if (chan->mmio_vma.node) {
|
||||
nvkm_vm_unmap(&chan->mmio_vma);
|
||||
nvkm_vm_put(&chan->mmio_vma);
|
||||
}
|
||||
nvkm_vmm_put(chan->vmm, &chan->mmio_vma);
|
||||
nvkm_memory_unref(&chan->mmio);
|
||||
nvkm_vmm_unref(&chan->vmm);
|
||||
return chan;
|
||||
}
|
||||
|
||||
@ -380,6 +376,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
||||
struct gf100_gr_data *data = gr->mmio_data;
|
||||
struct gf100_gr_mmio *mmio = gr->mmio_list;
|
||||
struct gf100_gr_chan *chan;
|
||||
struct gf100_vmm_map_v0 args = { .priv = 1 };
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
int ret, i;
|
||||
|
||||
@ -387,6 +384,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
||||
return -ENOMEM;
|
||||
nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
|
||||
chan->gr = gr;
|
||||
chan->vmm = nvkm_vmm_ref(fifoch->vmm);
|
||||
*pobject = &chan->object;
|
||||
|
||||
/* allocate memory for a "mmio list" buffer that's used by the HUB
|
||||
@ -398,13 +396,12 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(fifoch->vmm, 0x1000, 12, NV_MEM_ACCESS_RW |
|
||||
NV_MEM_ACCESS_SYS, &chan->mmio_vma);
|
||||
ret = nvkm_vmm_get(fifoch->vmm, 12, 0x1000, &chan->mmio_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_map(chan->mmio, 0, fifoch->vmm,
|
||||
&chan->mmio_vma, NULL, 0);
|
||||
chan->mmio_vma, &args, sizeof(args));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -416,14 +413,16 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(fifoch->vmm,
|
||||
nvkm_memory_size(chan->data[i].mem), 12,
|
||||
data->access, &chan->data[i].vma);
|
||||
ret = nvkm_vmm_get(fifoch->vmm, 12,
|
||||
nvkm_memory_size(chan->data[i].mem),
|
||||
&chan->data[i].vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_memory_map(chan->data[i].mem, 0, fifoch->vmm,
|
||||
&chan->data[i].vma, NULL, 0);
|
||||
args.priv = data->priv;
|
||||
|
||||
ret = nvkm_memory_map(chan->data[i].mem, 0, chan->vmm,
|
||||
chan->data[i].vma, &args, sizeof(args));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -437,7 +436,7 @@ gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
|
||||
u32 data = mmio->data;
|
||||
|
||||
if (mmio->buffer >= 0) {
|
||||
u64 info = chan->data[mmio->buffer].vma.offset;
|
||||
u64 info = chan->data[mmio->buffer].vma->addr;
|
||||
data |= info >> mmio->shift;
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@
|
||||
struct gf100_gr_data {
|
||||
u32 size;
|
||||
u32 align;
|
||||
u32 access;
|
||||
bool priv;
|
||||
};
|
||||
|
||||
struct gf100_gr_mmio {
|
||||
@ -161,14 +161,15 @@ void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
|
||||
struct gf100_gr_chan {
|
||||
struct nvkm_object object;
|
||||
struct gf100_gr *gr;
|
||||
struct nvkm_vmm *vmm;
|
||||
|
||||
struct nvkm_memory *mmio;
|
||||
struct nvkm_vma mmio_vma;
|
||||
struct nvkm_vma *mmio_vma;
|
||||
int mmio_nr;
|
||||
|
||||
struct {
|
||||
struct nvkm_memory *mem;
|
||||
struct nvkm_vma vma;
|
||||
struct nvkm_vma *vma;
|
||||
} data[4];
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user