drm/nouveau/gr: convert user classes to new-style nvkm_object

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2015-08-20 14:54:19 +10:00
parent b3c9815047
commit 27f3d6cf80
55 changed files with 1767 additions and 1745 deletions

View File

@ -65,5 +65,5 @@ u64 nvif_device_time(struct nvif_device *);
#include <engine/sw.h>
#define nvxx_fifo(a) nvxx_device(a)->fifo
#define nvxx_gr(a) nvkm_gr(nvxx_device(a))
#define nvxx_gr(a) nvxx_device(a)->gr
#endif

View File

@ -30,7 +30,6 @@ int nvkm_client_new(const char *name, u64 device, const char *cfg,
void nvkm_client_del(struct nvkm_client **);
int nvkm_client_init(struct nvkm_client *);
int nvkm_client_fini(struct nvkm_client *, bool suspend);
const char *nvkm_client_name(void *obj);
static inline struct nvkm_client *
nvkm_client(struct nvkm_object *object)

View File

@ -1,44 +1,22 @@
#ifndef __NVKM_GR_H__
#define __NVKM_GR_H__
#include <core/engctx.h>
struct nvkm_gr_chan {
struct nvkm_engctx base;
};
#define nvkm_gr_context_create(p,e,c,g,s,a,f,d) \
nvkm_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
#define nvkm_gr_context_destroy(d) \
nvkm_engctx_destroy(&(d)->base)
#define nvkm_gr_context_init(d) \
nvkm_engctx_init(&(d)->base)
#define nvkm_gr_context_fini(d,s) \
nvkm_engctx_fini(&(d)->base, (s))
#define _nvkm_gr_context_dtor _nvkm_engctx_dtor
#define _nvkm_gr_context_init _nvkm_engctx_init
#define _nvkm_gr_context_fini _nvkm_engctx_fini
#define _nvkm_gr_context_rd32 _nvkm_engctx_rd32
#define _nvkm_gr_context_wr32 _nvkm_engctx_wr32
#include <core/engine.h>
struct nvkm_gr {
struct nvkm_engine engine;
const struct nvkm_gr_func *func;
/* Returns chipset-specific counts of units packed into an u64.
*/
u64 (*units)(struct nvkm_gr *);
};
static inline struct nvkm_gr *
nvkm_gr(void *obj)
{
return (void *)nvkm_engine(obj, NVDEV_ENGINE_GR);
}
#define nvkm_gr_create(p,e,c,y,d) \
nvkm_engine_create((p), (e), (c), (y), "PGRAPH", "graphics", (d))
nvkm_gr_create_((p), (e), (c), (y), sizeof(**d), (void **)(d))
int
nvkm_gr_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, bool enable,
int length, void **pobject);
#define nvkm_gr_destroy(d) \
nvkm_engine_destroy(&(d)->engine)
#define nvkm_gr_init(d) \
@ -79,8 +57,7 @@ extern struct nvkm_oclass *gm20b_gr_oclass;
#include <core/enum.h>
extern const struct nvkm_bitfield nv04_gr_nsource[];
extern struct nvkm_ofuncs nv04_gr_ofuncs;
bool nv04_gr_idle(void *obj);
bool nv04_gr_idle(struct nvkm_gr *);
extern const struct nvkm_bitfield nv10_gr_intr_name[];
extern const struct nvkm_bitfield nv10_gr_nstatus[];

View File

@ -321,13 +321,3 @@ nvkm_client_new(const char *name, u64 device, const char *cfg,
nvkm_client_del(pclient);
return ret;
}
const char *
nvkm_client_name(void *obj)
{
const char *client_name = "unknown";
struct nvkm_client *client = nvkm_client(obj);
if (client)
client_name = client->name;
return client_name;
}

View File

@ -1,3 +1,4 @@
nvkm-y += nvkm/engine/gr/base.o
nvkm-y += nvkm/engine/gr/nv04.o
nvkm-y += nvkm/engine/gr/nv10.o
nvkm-y += nvkm/engine/gr/nv20.o

View File

@ -0,0 +1,84 @@
/*
* Copyright 2015 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include "priv.h"
#include <engine/fifo.h>
static int
nvkm_gr_oclass_get(struct nvkm_oclass *oclass, int index)
{
struct nvkm_gr *gr = nvkm_gr(oclass->engine);
int c = 0;
if (gr->func->object_get) {
int ret = gr->func->object_get(gr, index, &oclass->base);
if (oclass->base.oclass)
return index;
return ret;
}
while (gr->func->sclass[c].oclass) {
if (c++ == index) {
oclass->base = gr->func->sclass[index];
return index;
}
}
return c;
}
static int
nvkm_gr_cclass_new(struct nvkm_fifo_chan *chan,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct nvkm_gr *gr = nvkm_gr(oclass->engine);
if (gr->func->chan_new)
return gr->func->chan_new(gr, chan, oclass, pobject);
return 0;
}
struct nvkm_engine_func
nvkm_gr = {
.fifo.cclass = nvkm_gr_cclass_new,
.fifo.sclass = nvkm_gr_oclass_get,
};
int
nvkm_gr_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, bool enable,
int length, void **pobject)
{
struct nvkm_gr *gr;
int ret;
ret = nvkm_engine_create_(parent, engine, oclass, enable,
"gr", "gr", length, pobject);
gr = *pobject;
if (ret)
return ret;
gr->engine.func = &nvkm_gr;
return 0;
}

View File

@ -1027,23 +1027,23 @@ gf100_grctx_mmio_item(struct gf100_grctx *info, u32 addr, u32 data,
void
gf100_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
}
void
gf100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@ -1054,9 +1054,9 @@ void
gf100_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
@ -1072,7 +1072,7 @@ gf100_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = TPC_UNIT(gpc, tpc, 0x0520);
mmio_skip(info, o, (attrib << 16) | ++bo);
mmio_wr32(info, o, (attrib << 16) | --bo);
bo += impl->attrib_nr_max;
bo += grctx->attrib_nr_max;
}
}
}
@ -1237,22 +1237,22 @@ void
gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -1260,16 +1260,16 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_grctx_generate_r418bb8(gr);
gf100_grctx_generate_r406800(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
}
int
gf100_grctx_generate(struct gf100_gr *gr)
{
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_memory *chan;
@ -1352,7 +1352,7 @@ gf100_grctx_generate(struct gf100_gr *gr)
);
}
oclass->main(gr, &info);
grctx->main(gr, &info);
/* trigger a context unload by unsetting the "next channel valid" bit
* and faking a context switch interrupt
@ -1383,17 +1383,8 @@ done:
return ret;
}
struct nvkm_oclass *
gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc0),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf100_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@ -1409,4 +1400,4 @@ gf100_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};

View File

@ -19,8 +19,7 @@ void gf100_grctx_mmio_item(struct gf100_grctx *, u32 addr, u32 data, int s, int)
#define mmio_skip(a,b,c) mmio_refn((a), (b), (c), -1, -1)
#define mmio_wr32(a,b,c) mmio_refn((a), (b), (c), 0, -1)
struct gf100_grctx_oclass {
struct nvkm_oclass base;
struct gf100_grctx_func {
/* main context generation function */
void (*main)(struct gf100_gr *, struct gf100_grctx *);
/* context-specific modify-on-first-load list generation function */
@ -50,13 +49,7 @@ struct gf100_grctx_oclass {
u32 alpha_nr;
};
static inline const struct gf100_grctx_oclass *
gf100_grctx_impl(struct gf100_gr *gr)
{
return (void *)nv_engine(gr)->cclass;
}
extern struct nvkm_oclass *gf100_grctx_oclass;
extern const struct gf100_grctx_func gf100_grctx;
int gf100_grctx_generate(struct gf100_gr *);
void gf100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gf100_grctx_generate_bundle(struct gf100_grctx *);
@ -69,20 +62,20 @@ void gf100_grctx_generate_r4060a8(struct gf100_gr *);
void gf100_grctx_generate_r418bb8(struct gf100_gr *);
void gf100_grctx_generate_r406800(struct gf100_gr *);
extern struct nvkm_oclass *gf108_grctx_oclass;
extern const struct gf100_grctx_func gf108_grctx;
void gf108_grctx_generate_attrib(struct gf100_grctx *);
void gf108_grctx_generate_unkn(struct gf100_gr *);
extern struct nvkm_oclass *gf104_grctx_oclass;
extern struct nvkm_oclass *gf110_grctx_oclass;
extern const struct gf100_grctx_func gf104_grctx;
extern const struct gf100_grctx_func gf110_grctx;
extern struct nvkm_oclass *gf117_grctx_oclass;
extern const struct gf100_grctx_func gf117_grctx;
void gf117_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gf119_grctx_oclass;
extern const struct gf100_grctx_func gf119_grctx;
extern struct nvkm_oclass *gk104_grctx_oclass;
extern struct nvkm_oclass *gk20a_grctx_oclass;
extern const struct gf100_grctx_func gk104_grctx;
extern const struct gf100_grctx_func gk20a_grctx;
void gk104_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gk104_grctx_generate_bundle(struct gf100_grctx *);
void gk104_grctx_generate_pagepool(struct gf100_grctx *);
@ -95,22 +88,22 @@ void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gk110_grctx_oclass;
extern struct nvkm_oclass *gk110b_grctx_oclass;
extern struct nvkm_oclass *gk208_grctx_oclass;
extern const struct gf100_grctx_func gk110_grctx;
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;
extern struct nvkm_oclass *gm107_grctx_oclass;
extern const struct gf100_grctx_func gm107_grctx;
void gm107_grctx_generate_bundle(struct gf100_grctx *);
void gm107_grctx_generate_pagepool(struct gf100_grctx *);
void gm107_grctx_generate_attrib(struct gf100_grctx *);
extern struct nvkm_oclass *gm204_grctx_oclass;
extern const struct gf100_grctx_func gm204_grctx;
void gm204_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gm204_grctx_generate_tpcid(struct gf100_gr *);
void gm204_grctx_generate_405b60(struct gf100_gr *);
extern struct nvkm_oclass *gm206_grctx_oclass;
extern struct nvkm_oclass *gm20b_grctx_oclass;
extern const struct gf100_grctx_func gm206_grctx;
extern const struct gf100_grctx_func gm20b_grctx;
/* context init value lists */

View File

@ -79,17 +79,8 @@ gf104_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc3),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf104_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@ -105,4 +96,4 @@ gf104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};

View File

@ -731,17 +731,17 @@ void
gf108_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, tpc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@ -757,9 +757,9 @@ gf108_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = TPC_UNIT(gpc, tpc, 0x500);
mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo);
bo += impl->attrib_nr_max;
bo += grctx->attrib_nr_max;
mmio_wr32(info, o + 0x44, (a << 16) | ao);
ao += impl->alpha_nr_max;
ao += grctx->alpha_nr_max;
}
}
}
@ -776,17 +776,8 @@ gf108_grctx_generate_unkn(struct gf100_gr *gr)
nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008);
}
struct nvkm_oclass *
gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc1),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf108_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf108_grctx_pack_hub,
@ -804,4 +795,4 @@ gf108_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
}.base;
};

View File

@ -330,17 +330,8 @@ gf110_grctx_pack_gpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xc8),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf110_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf100_grctx_generate_unkn,
.hub = gf100_grctx_pack_hub,
@ -356,4 +347,4 @@ gf110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib = gf100_grctx_generate_attrib,
.attrib_nr_max = 0x324,
.attrib_nr = 0x218,
}.base;
};

View File

@ -183,17 +183,17 @@ void
gf117_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 beta = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 beta = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int timeslice_mode = 1;
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@ -209,9 +209,9 @@ gf117_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo);
mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo);
bo += impl->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, (a << 16) | ao);
ao += impl->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
}
}
}
@ -220,23 +220,23 @@ void
gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -247,23 +247,14 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
for (i = 0; i < 8; i++)
nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
}
struct nvkm_oclass *
gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd7),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf117_grctx = {
.main = gf117_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gf117_grctx_pack_hub,
@ -282,4 +273,4 @@ gf117_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x324,
}.base;
};

View File

@ -498,17 +498,8 @@ gf119_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xd9),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gf119_grctx = {
.main = gf100_grctx_generate_main,
.unkn = gf108_grctx_generate_unkn,
.hub = gf119_grctx_pack_hub,
@ -526,4 +517,4 @@ gf119_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x324,
.alpha_nr = 0x218,
}.base;
};

View File

@ -843,27 +843,27 @@ gk104_grctx_pack_ppc[] = {
void
gk104_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418808, 0x00000000, s, b);
mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x41880c, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gk104_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@ -955,23 +955,23 @@ void
gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
nvkm_mc(gr)->unk260(nvkm_mc(gr), 0);
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -985,26 +985,17 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gk104_grctx_generate_rop_active_fbps(gr);
nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
}
struct nvkm_oclass *
gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xe4),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk104_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk104_grctx_pack_hub,
@ -1025,4 +1016,4 @@ gk104_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};

View File

@ -808,17 +808,8 @@ gk110_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf0),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk110_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
@ -839,4 +830,4 @@ gk110_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};

View File

@ -69,17 +69,8 @@ gk110b_grctx_pack_tpc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xf1),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk110b_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk110_grctx_pack_hub,
@ -100,4 +91,4 @@ gk110b_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};

View File

@ -530,17 +530,8 @@ gk208_grctx_pack_ppc[] = {
* PGRAPH context implementation
******************************************************************************/
struct nvkm_oclass *
gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk208_grctx = {
.main = gk104_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gk208_grctx_pack_hub,
@ -561,4 +552,4 @@ gk208_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x218,
.alpha_nr_max = 0x7ff,
.alpha_nr = 0x648,
}.base;
};

View File

@ -29,7 +29,7 @@ static void
gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int idle_timeout_save;
int i;
@ -40,9 +40,9 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
idle_timeout_save = nvkm_rd32(device, 0x404154);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->attrib(info);
grctx->attrib(info);
oclass->unkn(gr);
grctx->unkn(gr);
gf100_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -67,21 +67,12 @@ gk20a_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
gf100_gr_icmd(gr, gr->fuc_bundle);
oclass->pagepool(info);
oclass->bundle(info);
grctx->pagepool(info);
grctx->bundle(info);
}
struct nvkm_oclass *
gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0xea),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gk20a_grctx = {
.main = gk20a_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gk104_grctx_generate_bundle,
@ -95,4 +86,4 @@ gk20a_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x240,
.alpha_nr_max = 0x648 + (0x648 / 2),
.alpha_nr = 0x648,
}.base;
};

View File

@ -863,27 +863,27 @@ gm107_grctx_pack_ppc[] = {
void
gm107_grctx_generate_bundle(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const u32 state_limit = min(impl->bundle_min_gpm_fifo_depth,
impl->bundle_size / 0x20);
const u32 token_limit = impl->bundle_token_limit;
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 state_limit = min(grctx->bundle_min_gpm_fifo_depth,
grctx->bundle_size / 0x20);
const u32 token_limit = grctx->bundle_token_limit;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->bundle_size, (1 << s), access);
const int b = mmio_vram(info, grctx->bundle_size, (1 << s), access);
mmio_refn(info, 0x408004, 0x00000000, s, b);
mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x408008, 0x80000000 | (grctx->bundle_size >> s));
mmio_refn(info, 0x418e24, 0x00000000, s, b);
mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s));
mmio_wr32(info, 0x418e28, 0x80000000 | (grctx->bundle_size >> s));
mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit);
}
void
gm107_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_oclass *impl = gf100_grctx_impl(info->gr);
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
const int s = 8;
const int b = mmio_vram(info, impl->pagepool_size, (1 << s), access);
const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
mmio_refn(info, 0x40800c, 0x00000000, s, b);
mmio_wr32(info, 0x408010, 0x80000000);
mmio_refn(info, 0x419004, 0x00000000, s, b);
@ -896,16 +896,16 @@ void
gm107_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
const struct gf100_grctx_oclass *impl = (void *)gf100_grctx_impl(gr);
const u32 alpha = impl->alpha_nr;
const u32 attrib = impl->attrib_nr;
const u32 size = 0x20 * (impl->attrib_nr_max + impl->alpha_nr_max);
const struct gf100_grctx_func *grctx = gr->func->grctx;
const u32 alpha = grctx->alpha_nr;
const u32 attrib = grctx->attrib_nr;
const u32 size = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
const u32 access = NV_MEM_ACCESS_RW;
const int s = 12;
const int b = mmio_vram(info, size * gr->tpc_total, (1 << s), access);
const int max_batches = 0xffff;
u32 bo = 0;
u32 ao = bo + impl->attrib_nr_max * gr->tpc_total;
u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total;
int gpc, ppc, n = 0;
mmio_refn(info, 0x418810, 0x80000000, s, b);
@ -922,10 +922,10 @@ gm107_grctx_generate_attrib(struct gf100_grctx *info)
const u32 o = PPC_UNIT(gpc, ppc, 0);
mmio_wr32(info, o + 0xc0, bs);
mmio_wr32(info, o + 0xf4, bo);
bo += impl->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, o + 0xe4, as);
mmio_wr32(info, o + 0xf8, ao);
ao += impl->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
mmio_wr32(info, u, ((bs / 3 /*XXX*/) << 16) | bs);
}
}
@ -956,21 +956,21 @@ static void
gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int i;
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gm107_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -986,9 +986,9 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gk104_grctx_generate_rop_active_fbps(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000400);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mask(device, 0x419e00, 0x00808080, 0x00808080);
nvkm_mask(device, 0x419ccc, 0x80000000, 0x80000000);
@ -996,17 +996,8 @@ gm107_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
nvkm_mask(device, 0x419f88, 0x80000000, 0x80000000);
}
struct nvkm_oclass *
gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x08),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm107_grctx = {
.main = gm107_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm107_grctx_pack_hub,
@ -1027,4 +1018,4 @@ gm107_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0xaa0,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};

View File

@ -981,22 +981,22 @@ void
gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
u32 tmp;
int i;
gf100_gr_mmio(gr, oclass->hub);
gf100_gr_mmio(gr, oclass->gpc);
gf100_gr_mmio(gr, oclass->zcull);
gf100_gr_mmio(gr, oclass->tpc);
gf100_gr_mmio(gr, oclass->ppc);
gf100_gr_mmio(gr, grctx->hub);
gf100_gr_mmio(gr, grctx->gpc);
gf100_gr_mmio(gr, grctx->zcull);
gf100_gr_mmio(gr, grctx->tpc);
gf100_gr_mmio(gr, grctx->ppc);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->bundle(info);
oclass->pagepool(info);
oclass->attrib(info);
oclass->unkn(gr);
grctx->bundle(info);
grctx->pagepool(info);
grctx->attrib(info);
grctx->unkn(gr);
gm204_grctx_generate_tpcid(gr);
gf100_grctx_generate_r406028(gr);
@ -1016,25 +1016,16 @@ gm204_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gm204_grctx_generate_405b60(gr);
gf100_gr_icmd(gr, oclass->icmd);
gf100_gr_icmd(gr, grctx->icmd);
nvkm_wr32(device, 0x404154, 0x00000800);
gf100_gr_mthd(gr, oclass->mthd);
gf100_gr_mthd(gr, grctx->mthd);
nvkm_mask(device, 0x418e94, 0xffffffff, 0xc4230000);
nvkm_mask(device, 0x418e4c, 0xffffffff, 0x70000000);
}
struct nvkm_oclass *
gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x24),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm204_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
@ -1055,4 +1046,4 @@ gm204_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};

View File

@ -49,17 +49,8 @@ gm206_grctx_pack_gpc[] = {
{}
};
struct nvkm_oclass *
gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x26),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm206_grctx = {
.main = gm204_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.hub = gm204_grctx_pack_hub,
@ -80,4 +71,4 @@ gm206_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0x1800,
.alpha_nr = 0x1000,
}.base;
};

View File

@ -39,7 +39,7 @@ static void
gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
struct gf100_grctx_oclass *oclass = (void *)nv_engine(gr)->cclass;
const struct gf100_grctx_func *grctx = gr->func->grctx;
int idle_timeout_save;
int i, tmp;
@ -50,9 +50,9 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
idle_timeout_save = nvkm_rd32(device, 0x404154);
nvkm_wr32(device, 0x404154, 0x00000000);
oclass->attrib(info);
grctx->attrib(info);
oclass->unkn(gr);
grctx->unkn(gr);
gm204_grctx_generate_tpcid(gr);
gm20b_grctx_generate_r406028(gr);
@ -81,21 +81,12 @@ gm20b_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
gf100_gr_wait_idle(gr);
gf100_gr_icmd(gr, gr->fuc_bundle);
oclass->pagepool(info);
oclass->bundle(info);
grctx->pagepool(info);
grctx->bundle(info);
}
struct nvkm_oclass *
gm20b_grctx_oclass = &(struct gf100_grctx_oclass) {
.base.handle = NV_ENGCTX(GR, 0x2b),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = gf100_gr_context_ctor,
.dtor = gf100_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
const struct gf100_grctx_func
gm20b_grctx = {
.main = gm20b_grctx_generate_main,
.unkn = gk104_grctx_generate_unkn,
.bundle = gm107_grctx_generate_bundle,
@ -109,4 +100,4 @@ gm20b_grctx_oclass = &(struct gf100_grctx_oclass) {
.attrib_nr = 0x400,
.alpha_nr_max = 0xc00,
.alpha_nr = 0x800,
}.base;
};

View File

@ -580,7 +580,6 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
if (ctx->mode != NVKM_GRCTX_VALS)
return;
nvkm_kmap(obj);
offset += 0x0280/4;
for (i = 0; i < 16; i++, offset += 2)
nvkm_wo32(obj, offset * 4, 0x3f800000);
@ -591,7 +590,6 @@ nv40_gr_construct_shader(struct nvkm_grctx *ctx)
for (i = 0; i < vs_nr_b1 * 4; i += 4)
nvkm_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
}
nvkm_done(obj);
}
static void

View File

@ -125,8 +125,6 @@ gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val)
reg = (reg - 0x00400000) / 4;
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
nvkm_kmap(ctx->data);
nvkm_wo32(ctx->data, reg * 4, val);
nvkm_done(ctx->data);
}
#endif

View File

@ -784,10 +784,8 @@ static void
dd_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NVKM_GRCTX_VALS) {
nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
nvkm_done(ctx->data);
}
ctx->ctxvals_pos += num;
}
@ -1159,10 +1157,8 @@ static void
xf_emit(struct nvkm_grctx *ctx, int num, u32 val) {
int i;
if (val && ctx->mode == NVKM_GRCTX_VALS) {
nvkm_kmap(ctx->data);
for (i = 0; i < num; i++)
nvkm_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
nvkm_done(ctx->data);
}
ctx->ctxvals_pos += num << 3;
}

View File

@ -223,12 +223,8 @@ gf100_fermi_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
return -EINVAL;
}
struct nvkm_ofuncs
gf100_fermi_ofuncs = {
.ctor = _nvkm_object_ctor,
.dtor = nvkm_object_destroy,
.init = _nvkm_object_init,
.fini = _nvkm_object_fini,
const struct nvkm_object_func
gf100_fermi = {
.mthd = gf100_fermi_mthd,
};
@ -259,40 +255,106 @@ gf100_gr_mthd_sw(struct nvkm_device *device, u16 class, u32 mthd, u32 data)
return false;
}
struct nvkm_oclass
gf100_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
static int
gf100_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
{
struct gf100_gr *gr = gf100_gr(base);
int c = 0;
while (gr->func->sclass[c].oclass) {
if (c++ == index) {
*sclass = gr->func->sclass[index];
return index;
}
}
return c;
}
/*******************************************************************************
* PGRAPH context
******************************************************************************/
int
gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *args, u32 size,
struct nvkm_object **pobject)
static int
gf100_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_vm *vm = nvkm_client(parent)->vm;
struct gf100_gr *gr = (void *)engine;
struct gf100_gr_chan *chan = gf100_gr_chan(object);
struct gf100_gr *gr = chan->gr;
int ret, i;
ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
align, false, parent, pgpuobj);
if (ret)
return ret;
nvkm_kmap(*pgpuobj);
for (i = 0; i < gr->size; i += 4)
nvkm_wo32(*pgpuobj, i, gr->data[i / 4]);
if (!gr->firmware) {
nvkm_wo32(*pgpuobj, 0x00, chan->mmio_nr / 2);
nvkm_wo32(*pgpuobj, 0x04, chan->mmio_vma.offset >> 8);
} else {
nvkm_wo32(*pgpuobj, 0xf4, 0);
nvkm_wo32(*pgpuobj, 0xf8, 0);
nvkm_wo32(*pgpuobj, 0x10, chan->mmio_nr / 2);
nvkm_wo32(*pgpuobj, 0x14, lower_32_bits(chan->mmio_vma.offset));
nvkm_wo32(*pgpuobj, 0x18, upper_32_bits(chan->mmio_vma.offset));
nvkm_wo32(*pgpuobj, 0x1c, 1);
nvkm_wo32(*pgpuobj, 0x20, 0);
nvkm_wo32(*pgpuobj, 0x28, 0);
nvkm_wo32(*pgpuobj, 0x2c, 0);
}
nvkm_done(*pgpuobj);
return 0;
}
static void *
gf100_gr_chan_dtor(struct nvkm_object *object)
{
struct gf100_gr_chan *chan = gf100_gr_chan(object);
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
if (chan->data[i].vma.node) {
nvkm_vm_unmap(&chan->data[i].vma);
nvkm_vm_put(&chan->data[i].vma);
}
nvkm_memory_del(&chan->data[i].mem);
}
if (chan->mmio_vma.node) {
nvkm_vm_unmap(&chan->mmio_vma);
nvkm_vm_put(&chan->mmio_vma);
}
nvkm_memory_del(&chan->mmio);
return chan;
}
static const struct nvkm_object_func
gf100_gr_chan = {
.dtor = gf100_gr_chan_dtor,
.bind = gf100_gr_chan_bind,
};
static int
gf100_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
struct gf100_gr *gr = gf100_gr(base);
struct gf100_gr_data *data = gr->mmio_data;
struct gf100_gr_mmio *mmio = gr->mmio_list;
struct gf100_gr_chan *chan;
struct nvkm_device *device = gr->base.engine.subdev.device;
struct nvkm_gpuobj *image;
int ret, i;
/* allocate memory for context, and fill with default values */
ret = nvkm_gr_context_create(parent, engine, oclass, NULL,
gr->size, 0x100,
NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&gf100_gr_chan, oclass, &chan->object);
chan->gr = gr;
*pobject = &chan->object;
/* allocate memory for a "mmio list" buffer that's used by the HUB
* fuc to modify some per-context register settings on first load
@ -303,7 +365,7 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ret = nvkm_vm_get(vm, 0x1000, 12, NV_MEM_ACCESS_RW |
ret = nvkm_vm_get(fifoch->vm, 0x1000, 12, NV_MEM_ACCESS_RW |
NV_MEM_ACCESS_SYS, &chan->mmio_vma);
if (ret)
return ret;
@ -318,8 +380,9 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
ret = nvkm_vm_get(vm, nvkm_memory_size(chan->data[i].mem),
12, data->access, &chan->data[i].vma);
ret = nvkm_vm_get(fifoch->vm,
nvkm_memory_size(chan->data[i].mem), 12,
data->access, &chan->data[i].vma);
if (ret)
return ret;
@ -343,53 +406,9 @@ gf100_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
mmio++;
}
nvkm_done(chan->mmio);
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
for (i = 0; i < gr->size; i += 4)
nvkm_wo32(image, i, gr->data[i / 4]);
if (!gr->firmware) {
nvkm_wo32(image, 0x00, chan->mmio_nr / 2);
nvkm_wo32(image, 0x04, chan->mmio_vma.offset >> 8);
} else {
nvkm_wo32(image, 0xf4, 0);
nvkm_wo32(image, 0xf8, 0);
nvkm_wo32(image, 0x10, chan->mmio_nr / 2);
nvkm_wo32(image, 0x14, lower_32_bits(chan->mmio_vma.offset));
nvkm_wo32(image, 0x18, upper_32_bits(chan->mmio_vma.offset));
nvkm_wo32(image, 0x1c, 1);
nvkm_wo32(image, 0x20, 0);
nvkm_wo32(image, 0x28, 0);
nvkm_wo32(image, 0x2c, 0);
}
nvkm_done(image);
return 0;
}
void
gf100_gr_context_dtor(struct nvkm_object *object)
{
struct gf100_gr_chan *chan = (void *)object;
int i;
for (i = 0; i < ARRAY_SIZE(chan->data); i++) {
if (chan->data[i].vma.node) {
nvkm_vm_unmap(&chan->data[i].vma);
nvkm_vm_put(&chan->data[i].vma);
}
nvkm_memory_del(&chan->data[i].mem);
}
if (chan->mmio_vma.node) {
nvkm_vm_unmap(&chan->mmio_vma);
nvkm_vm_put(&chan->mmio_vma);
}
nvkm_memory_del(&chan->mmio);
nvkm_gr_context_destroy(&chan->base);
}
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -1312,10 +1331,10 @@ gf100_gr_init_csdata(struct gf100_gr *gr,
int
gf100_gr_init_ctxctl(struct gf100_gr *gr)
{
const struct gf100_grctx_func *grctx = gr->func->grctx;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct gf100_gr_oclass *oclass = (void *)nv_object(gr)->oclass;
struct gf100_grctx_oclass *cclass = (void *)nv_engine(gr)->cclass;
int i;
if (gr->firmware) {
@ -1446,10 +1465,10 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
nvkm_mc(gr)->unk260(nvkm_mc(gr), 1);
/* load register lists */
gf100_gr_init_csdata(gr, cclass->hub, 0x409000, 0x000, 0x000000);
gf100_gr_init_csdata(gr, cclass->gpc, 0x41a000, 0x000, 0x418000);
gf100_gr_init_csdata(gr, cclass->tpc, 0x41a000, 0x004, 0x419800);
gf100_gr_init_csdata(gr, cclass->ppc, 0x41a000, 0x008, 0x41be00);
gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
gf100_gr_init_csdata(gr, grctx->gpc, 0x41a000, 0x000, 0x418000);
gf100_gr_init_csdata(gr, grctx->tpc, 0x41a000, 0x004, 0x419800);
gf100_gr_init_csdata(gr, grctx->ppc, 0x41a000, 0x008, 0x41be00);
/* start HUB ucode running, it'll init the GPCs */
nvkm_wr32(device, 0x40910c, 0x00000000);
@ -1646,6 +1665,12 @@ gf100_gr_dtor(struct nvkm_object *object)
nvkm_gr_destroy(&gr->base);
}
static const struct nvkm_gr_func
gf100_gr_ = {
.chan_new = gf100_gr_chan_new,
.object_get = gf100_gr_object_get,
};
int
gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *bclass, void *data, u32 size,
@ -1666,6 +1691,8 @@ gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->func = oclass->func;
gr->base.func = &gf100_gr_;
nv_subdev(gr)->unit = 0x08001000;
nv_subdev(gr)->intr = gf100_gr_intr;
@ -1752,8 +1779,6 @@ gf100_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
break;
}
nv_engine(gr)->cclass = *oclass->cclass;
nv_engine(gr)->sclass = oclass->sclass;
return 0;
}
@ -1777,6 +1802,18 @@ gf100_gr_gpccs_ucode = {
.data.size = sizeof(gf100_grgpc_data),
};
static const struct gf100_gr_func
gf100_gr = {
.grctx = &gf100_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf100_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc0),
@ -1786,8 +1823,7 @@ gf100_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf100_grctx_oclass,
.sclass = gf100_gr_sclass,
.func = &gf100_gr,
.mmio = gf100_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,

View File

@ -23,9 +23,12 @@
*/
#ifndef __NVC0_GR_H__
#define __NVC0_GR_H__
#include <engine/gr.h>
#define gf100_gr(p) container_of((p), struct gf100_gr, base)
#include "priv.h"
#include <core/gpuobj.h>
#include <subdev/ltc.h>
#include <subdev/mmu.h>
#define GPC_MAX 32
#define TPC_MAX (GPC_MAX * 8)
@ -69,6 +72,7 @@ struct gf100_gr_zbc_depth {
struct gf100_gr {
struct nvkm_gr base;
const struct gf100_gr_func *func;
struct gf100_gr_fuc fuc409c;
struct gf100_gr_fuc fuc409d;
@ -106,23 +110,27 @@ struct gf100_gr {
u8 magic_not_rop_nr;
};
struct gf100_gr_func {
const struct gf100_grctx_func *grctx;
struct nvkm_sclass sclass[];
};
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
struct gf100_gr_chan {
struct nvkm_gr_chan base;
struct nvkm_object object;
struct gf100_gr *gr;
struct nvkm_memory *mmio;
struct nvkm_vma mmio_vma;
int mmio_nr;
struct {
struct nvkm_memory *mem;
struct nvkm_vma vma;
} data[4];
};
int gf100_gr_context_ctor(struct nvkm_object *, struct nvkm_object *,
struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
void gf100_gr_context_dtor(struct nvkm_object *);
void gf100_gr_ctxctl_debug(struct gf100_gr *);
void gf100_gr_dtor_fw(struct gf100_gr_fuc *);
@ -149,7 +157,7 @@ int gk20a_gr_init(struct nvkm_object *);
int gm204_gr_init(struct nvkm_object *);
extern struct nvkm_ofuncs gf100_fermi_ofuncs;
extern const struct nvkm_object_func gf100_fermi;
extern struct nvkm_oclass gf100_gr_sclass[];
extern struct nvkm_oclass gf110_gr_sclass[];
@ -185,8 +193,7 @@ extern struct gf100_gr_ucode gk110_gr_gpccs_ucode;
struct gf100_gr_oclass {
struct nvkm_oclass base;
struct nvkm_oclass **cclass;
struct nvkm_oclass *sclass;
const struct gf100_gr_func *func;
const struct gf100_gr_pack *mmio;
struct {
struct gf100_gr_ucode *ucode;

View File

@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -110,6 +112,18 @@ gf104_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf104_gr = {
.grctx = &gf104_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf104_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc3),
@ -119,8 +133,7 @@ gf104_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf104_grctx_oclass,
.sclass = gf100_gr_sclass,
.func = &gf104_gr,
.mmio = gf104_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,

View File

@ -26,20 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gf108_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_B, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -117,6 +103,19 @@ gf108_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf108_gr = {
.grctx = &gf108_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf108_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc1),
@ -126,8 +125,7 @@ gf108_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf108_grctx_oclass,
.sclass = gf108_gr_sclass,
.func = &gf108_gr,
.mmio = gf108_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,

View File

@ -26,21 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gf110_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ FERMI_MEMORY_TO_MEMORY_FORMAT_A, &nvkm_object_ofuncs },
{ FERMI_A, &gf100_fermi_ofuncs },
{ FERMI_B, &gf100_fermi_ofuncs },
{ FERMI_C, &gf100_fermi_ofuncs },
{ FERMI_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -99,6 +84,20 @@ gf110_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf110_gr = {
.grctx = &gf110_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xc8),
@ -108,8 +107,7 @@ gf110_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf110_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf110_gr,
.mmio = gf110_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,

View File

@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -118,6 +120,20 @@ gf117_gr_gpccs_ucode = {
.data.size = sizeof(gf117_grgpc_data),
};
static const struct gf100_gr_func
gf117_gr = {
.grctx = &gf117_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf117_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd7),
@ -127,8 +143,7 @@ gf117_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf117_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf117_gr,
.mmio = gf117_gr_pack_mmio,
.fecs.ucode = &gf117_gr_fecs_ucode,
.gpccs.ucode = &gf117_gr_gpccs_ucode,

View File

@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -173,6 +175,20 @@ gf119_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gf119_gr = {
.grctx = &gf119_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, FERMI_MEMORY_TO_MEMORY_FORMAT_A },
{ -1, -1, FERMI_A, &gf100_fermi },
{ -1, -1, FERMI_B, &gf100_fermi },
{ -1, -1, FERMI_C, &gf100_fermi },
{ -1, -1, FERMI_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gf119_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xd9),
@ -182,8 +198,7 @@ gf119_gr_oclass = &(struct gf100_gr_oclass) {
.init = gf100_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gf119_grctx_oclass,
.sclass = gf110_gr_sclass,
.func = &gf119_gr,
.mmio = gf119_gr_pack_mmio,
.fecs.ucode = &gf100_gr_fecs_ucode,
.gpccs.ucode = &gf100_gr_gpccs_ucode,

View File

@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gk104_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_A, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -311,6 +298,18 @@ gk104_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gk104_gr = {
.grctx = &gk104_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
{ -1, -1, KEPLER_A, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_A },
{}
}
};
int
gk104_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -351,8 +350,7 @@ gk104_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk104_grctx_oclass,
.sclass = gk104_gr_sclass,
.func = &gk104_gr,
.mmio = gk104_gr_pack_mmio,
.fecs.ucode = &gk104_gr_fecs_ucode,
.gpccs.ucode = &gk104_gr_gpccs_ucode,

View File

@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gk110_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -193,6 +180,18 @@ gk110_gr_gpccs_ucode = {
.data.size = sizeof(gk110_grgpc_data),
};
static const struct gf100_gr_func
gk110_gr = {
.grctx = &gk110_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk110_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf0),
@ -202,8 +201,7 @@ gk110_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk110_grctx_oclass,
.sclass = gk110_gr_sclass,
.func = &gk110_gr,
.mmio = gk110_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,

View File

@ -24,6 +24,8 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -98,6 +100,18 @@ gk110b_gr_pack_mmio[] = {
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct gf100_gr_func
gk110b_gr = {
.grctx = &gk110b_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0xf1),
@ -107,8 +121,7 @@ gk110b_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk110b_grctx_oclass,
.sclass = gk110_gr_sclass,
.func = &gk110b_gr,
.mmio = gk110b_gr_pack_mmio,
.fecs.ucode = &gk110_gr_fecs_ucode,
.gpccs.ucode = &gk110_gr_gpccs_ucode,

View File

@ -28,19 +28,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gk208_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ KEPLER_B, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -172,6 +159,18 @@ gk208_gr_gpccs_ucode = {
.data.size = sizeof(gk208_grgpc_data),
};
static const struct gf100_gr_func
gk208_gr = {
.grctx = &gk208_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, KEPLER_B, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gk208_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x08),
@ -181,8 +180,7 @@ gk208_gr_oclass = &(struct gf100_gr_oclass) {
.init = gk104_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk208_grctx_oclass,
.sclass = gk208_gr_sclass,
.func = &gk208_gr,
.mmio = gk208_gr_pack_mmio,
.fecs.ucode = &gk208_gr_fecs_ucode,
.gpccs.ucode = &gk208_gr_gpccs_ucode,

View File

@ -22,17 +22,9 @@
#include "gk20a.h"
#include "ctxgf100.h"
#include <nvif/class.h>
#include <subdev/timer.h>
static struct nvkm_oclass
gk20a_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_A, &nvkm_object_ofuncs },
{ KEPLER_C, &gf100_fermi_ofuncs },
{ KEPLER_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
#include <nvif/class.h>
static void
gk20a_gr_init_dtor(struct gf100_gr_pack *pack)
@ -350,6 +342,18 @@ gk20a_gr_init(struct nvkm_object *object)
return gf100_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gk20a_gr = {
.grctx = &gk20a_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_A },
{ -1, -1, KEPLER_C, &gf100_fermi },
{ -1, -1, KEPLER_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gk20a_gr_oclass = &(struct gk20a_gr_oclass) {
.gf100 = {
@ -360,8 +364,7 @@ gk20a_gr_oclass = &(struct gk20a_gr_oclass) {
.init = gk20a_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gk20a_grctx_oclass,
.sclass = gk20a_gr_sclass,
.func = &gk20a_gr,
.ppc_nr = 1,
},
.set_hww_esr_report_mask = gk20a_gr_set_hww_esr_report_mask,

View File

@ -29,19 +29,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
gm107_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_A, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_A, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -454,6 +441,18 @@ gm107_gr_gpccs_ucode = {
.data.size = sizeof(gm107_grgpc_data),
};
static const struct gf100_gr_func
gm107_gr = {
.grctx = &gm107_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_A, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_A },
{}
}
};
struct nvkm_oclass *
gm107_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x07),
@ -463,8 +462,7 @@ gm107_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm107_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm107_grctx_oclass,
.sclass = gm107_gr_sclass,
.func = &gm107_gr,
.mmio = gm107_gr_pack_mmio,
.fecs.ucode = &gm107_gr_fecs_ucode,
.gpccs.ucode = &gm107_gr_gpccs_ucode,

View File

@ -26,19 +26,6 @@
#include <nvif/class.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
gm204_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_B, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
/*******************************************************************************
* PGRAPH register lists
******************************************************************************/
@ -371,6 +358,18 @@ gm204_gr_init(struct nvkm_object *object)
return gm204_gr_init_ctxctl(gr);
}
static const struct gf100_gr_func
gm204_gr = {
.grctx = &gm204_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm204_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x24),
@ -380,8 +379,7 @@ gm204_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm204_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm204_grctx_oclass,
.sclass = gm204_gr_sclass,
.func = &gm204_gr,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
}.base;

View File

@ -24,6 +24,20 @@
#include "gf100.h"
#include "ctxgf100.h"
#include <nvif/class.h>
static const struct gf100_gr_func
gm206_gr = {
.grctx = &gm206_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm206_gr_oclass = &(struct gf100_gr_oclass) {
.base.handle = NV_ENGINE(GR, 0x26),
@ -33,8 +47,7 @@ gm206_gr_oclass = &(struct gf100_gr_oclass) {
.init = gm204_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm206_grctx_oclass,
.sclass = gm204_gr_sclass,
.func = &gm206_gr,
.mmio = gm204_gr_pack_mmio,
.ppc_nr = 2,
}.base;

View File

@ -22,17 +22,9 @@
#include "gk20a.h"
#include "ctxgf100.h"
#include <nvif/class.h>
#include <subdev/timer.h>
static struct nvkm_oclass
gm20b_gr_sclass[] = {
{ FERMI_TWOD_A, &nvkm_object_ofuncs },
{ KEPLER_INLINE_TO_MEMORY_B, &nvkm_object_ofuncs },
{ MAXWELL_B, &gf100_fermi_ofuncs },
{ MAXWELL_COMPUTE_B, &nvkm_object_ofuncs },
{}
};
#include <nvif/class.h>
static void
gm20b_gr_init_gpc_mmu(struct gf100_gr *gr)
@ -67,6 +59,18 @@ gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr)
nvkm_wr32(device, 0x419e4c, 0x5);
}
static const struct gf100_gr_func
gm20b_gr = {
.grctx = &gm20b_grctx,
.sclass = {
{ -1, -1, FERMI_TWOD_A },
{ -1, -1, KEPLER_INLINE_TO_MEMORY_B },
{ -1, -1, MAXWELL_B, &gf100_fermi },
{ -1, -1, MAXWELL_COMPUTE_B },
{}
}
};
struct nvkm_oclass *
gm20b_gr_oclass = &(struct gk20a_gr_oclass) {
.gf100 = {
@ -77,8 +81,7 @@ gm20b_gr_oclass = &(struct gk20a_gr_oclass) {
.init = gk20a_gr_init,
.fini = _nvkm_gr_fini,
},
.cclass = &gm20b_grctx_oclass,
.sclass = gm20b_gr_sclass,
.func = &gm20b_gr,
.ppc_nr = 1,
},
.init_gpc_mmu = gm20b_gr_init_gpc_mmu,

View File

@ -21,7 +21,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <engine/gr.h>
#include "priv.h"
#include "regs.h"
#include <core/client.h>
@ -345,25 +345,23 @@ nv04_gr_ctx_regs[] = {
NV04_PGRAPH_DEBUG_3
};
#define nv04_gr(p) container_of((p), struct nv04_gr, base)
struct nv04_gr {
struct nvkm_gr base;
struct nv04_gr_chan *chan[16];
spinlock_t lock;
};
#define nv04_gr_chan(p) container_of((p), struct nv04_gr_chan, object)
struct nv04_gr_chan {
struct nvkm_object base;
struct nvkm_object object;
struct nv04_gr *gr;
int chid;
u32 nv04[ARRAY_SIZE(nv04_gr_ctx_regs)];
};
static inline struct nv04_gr *
nv04_gr(struct nv04_gr_chan *chan)
{
return (void *)nv_object(chan)->engine;
}
/*******************************************************************************
* Graphics object classes
******************************************************************************/
@ -1041,85 +1039,28 @@ nv04_gr_mthd(struct nvkm_device *device, u32 inst, u32 mthd, u32 data)
}
static int
nv04_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv04_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *obj;
int ret;
ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nvkm_kmap(obj);
nvkm_wo32(obj, 0x00, nv_mclass(obj));
int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16, align,
false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, object->oclass_name);
nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
#ifdef __BIG_ENDIAN
nvkm_mo32(obj, 0x00, 0x00080000, 0x00080000);
nvkm_mo32(*pgpuobj, 0x08, 0x00080000, 0x00080000);
#endif
nvkm_wo32(obj, 0x04, 0x00000000);
nvkm_wo32(obj, 0x08, 0x00000000);
nvkm_wo32(obj, 0x0c, 0x00000000);
nvkm_done(obj);
return 0;
nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
nvkm_done(*pgpuobj);
}
return ret;
}
struct nvkm_ofuncs
nv04_gr_ofuncs = {
.ctor = nv04_gr_object_ctor,
.dtor = _nvkm_gpuobj_dtor,
.init = _nvkm_gpuobj_init,
.fini = _nvkm_gpuobj_fini,
.rd32 = _nvkm_gpuobj_rd32,
.wr32 = _nvkm_gpuobj_wr32,
};
static struct nvkm_oclass
nv04_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
{ 0x0017, &nv04_gr_ofuncs }, /* chroma */
{ 0x0018, &nv04_gr_ofuncs }, /* pattern (nv01) */
{ 0x0019, &nv04_gr_ofuncs }, /* clip */
{ 0x001c, &nv04_gr_ofuncs }, /* line */
{ 0x001d, &nv04_gr_ofuncs }, /* tri */
{ 0x001e, &nv04_gr_ofuncs }, /* rect */
{ 0x001f, &nv04_gr_ofuncs },
{ 0x0021, &nv04_gr_ofuncs },
{ 0x0030, &nv04_gr_ofuncs }, /* null */
{ 0x0036, &nv04_gr_ofuncs },
{ 0x0037, &nv04_gr_ofuncs },
{ 0x0038, &nv04_gr_ofuncs }, /* dvd subpicture */
{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
{ 0x0042, &nv04_gr_ofuncs }, /* surf2d */
{ 0x0043, &nv04_gr_ofuncs }, /* rop */
{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
{ 0x0048, &nv04_gr_ofuncs },
{ 0x004a, &nv04_gr_ofuncs },
{ 0x004b, &nv04_gr_ofuncs },
{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
{ 0x0053, &nv04_gr_ofuncs },
{ 0x0054, &nv04_gr_ofuncs }, /* ttri */
{ 0x0055, &nv04_gr_ofuncs }, /* mtri */
{ 0x0057, &nv04_gr_ofuncs }, /* chroma */
{ 0x0058, &nv04_gr_ofuncs }, /* surf_dst */
{ 0x0059, &nv04_gr_ofuncs }, /* surf_src */
{ 0x005a, &nv04_gr_ofuncs }, /* surf_color */
{ 0x005b, &nv04_gr_ofuncs }, /* surf_zeta */
{ 0x005c, &nv04_gr_ofuncs }, /* line */
{ 0x005d, &nv04_gr_ofuncs }, /* tri */
{ 0x005e, &nv04_gr_ofuncs }, /* rect */
{ 0x005f, &nv04_gr_ofuncs },
{ 0x0060, &nv04_gr_ofuncs },
{ 0x0061, &nv04_gr_ofuncs },
{ 0x0064, &nv04_gr_ofuncs }, /* iifc (nv05) */
{ 0x0065, &nv04_gr_ofuncs }, /* ifc (nv05) */
{ 0x0066, &nv04_gr_ofuncs }, /* sifc (nv05) */
{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
{ 0x0076, &nv04_gr_ofuncs },
{ 0x0077, &nv04_gr_ofuncs },
{},
const struct nvkm_object_func
nv04_gr_object = {
.bind = nv04_gr_object_bind,
};
/*******************************************************************************
@ -1142,8 +1083,7 @@ nv04_gr_channel(struct nv04_gr *gr)
static int
nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
{
struct nv04_gr *gr = nv04_gr(chan);
struct nvkm_device *device = gr->base.engine.subdev.device;
struct nvkm_device *device = chan->gr->base.engine.subdev.device;
int i;
for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
@ -1158,8 +1098,7 @@ nv04_gr_load_context(struct nv04_gr_chan *chan, int chid)
static int
nv04_gr_unload_context(struct nv04_gr_chan *chan)
{
struct nv04_gr *gr = nv04_gr(chan);
struct nvkm_device *device = gr->base.engine.subdev.device;
struct nvkm_device *device = chan->gr->base.engine.subdev.device;
int i;
for (i = 0; i < ARRAY_SIZE(nv04_gr_ctx_regs); i++)
@ -1178,7 +1117,7 @@ nv04_gr_context_switch(struct nv04_gr *gr)
struct nv04_gr_chan *next = NULL;
int chid;
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
/* If previous context is valid, we need to save it */
prev = nv04_gr_channel(gr);
@ -1204,59 +1143,24 @@ static u32 *ctx_reg(struct nv04_gr_chan *chan, u32 reg)
return NULL;
}
static int
nv04_gr_context_ctor(struct nvkm_object *parent,
struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
static void *
nv04_gr_chan_dtor(struct nvkm_object *object)
{
struct nvkm_fifo_chan *fifo = (void *)parent;
struct nv04_gr *gr = (void *)engine;
struct nv04_gr_chan *chan;
unsigned long flags;
int ret;
ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
spin_lock_irqsave(&gr->lock, flags);
if (gr->chan[fifo->chid]) {
*pobject = nv_object(gr->chan[fifo->chid]);
atomic_inc(&(*pobject)->refcount);
spin_unlock_irqrestore(&gr->lock, flags);
nvkm_object_destroy(&chan->base);
return 1;
}
*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
gr->chan[fifo->chid] = chan;
chan->chid = fifo->chid;
spin_unlock_irqrestore(&gr->lock, flags);
return 0;
}
static void
nv04_gr_context_dtor(struct nvkm_object *object)
{
struct nv04_gr *gr = (void *)object->engine;
struct nv04_gr_chan *chan = (void *)object;
struct nv04_gr_chan *chan = nv04_gr_chan(object);
struct nv04_gr *gr = chan->gr;
unsigned long flags;
spin_lock_irqsave(&gr->lock, flags);
gr->chan[chan->chid] = NULL;
spin_unlock_irqrestore(&gr->lock, flags);
nvkm_object_destroy(&chan->base);
return chan;
}
static int
nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
nv04_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv04_gr *gr = (void *)object->engine;
struct nv04_gr_chan *chan = (void *)object;
struct nv04_gr_chan *chan = nv04_gr_chan(object);
struct nv04_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
@ -1266,34 +1170,50 @@ nv04_gr_context_fini(struct nvkm_object *object, bool suspend)
nv04_gr_unload_context(chan);
nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&gr->lock, flags);
return _nvkm_object_fini(&chan->base, suspend);
return 0;
}
static struct nvkm_oclass
nv04_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x04),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_gr_context_ctor,
.dtor = nv04_gr_context_dtor,
.init = _nvkm_object_init,
.fini = nv04_gr_context_fini,
},
static const struct nvkm_object_func
nv04_gr_chan = {
.dtor = nv04_gr_chan_dtor,
.fini = nv04_gr_chan_fini,
};
static int
nv04_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv04_gr *gr = nv04_gr(base);
struct nv04_gr_chan *chan;
unsigned long flags;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv04_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
*ctx_reg(chan, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
spin_lock_irqsave(&gr->lock, flags);
gr->chan[chan->chid] = chan;
spin_unlock_irqrestore(&gr->lock, flags);
return 0;
}
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
bool
nv04_gr_idle(void *obj)
nv04_gr_idle(struct nvkm_gr *gr)
{
struct nvkm_gr *gr = nvkm_gr(obj);
struct nvkm_subdev *subdev = &gr->engine.subdev;
struct nvkm_device *device = subdev->device;
u32 mask = 0xffffffff;
if (nv_device(obj)->card_type == NV_40)
if (device->card_type == NV_40)
mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
if (nvkm_msec(device, 2000,
@ -1395,12 +1315,62 @@ nv04_gr_intr(struct nvkm_subdev *subdev)
"nstatus %08x [%s] ch %d [%s] subc %d "
"class %04x mthd %04x data %08x\n",
show, msg, nsource, src, nstatus, sta, chid,
nvkm_client_name(chan), subc, class, mthd, data);
chan ? chan->object.client->name : "unknown",
subc, class, mthd, data);
}
spin_unlock_irqrestore(&gr->lock, flags);
}
static const struct nvkm_gr_func
nv04_gr = {
.chan_new = nv04_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0017, &nv04_gr_object }, /* chroma */
{ -1, -1, 0x0018, &nv04_gr_object }, /* pattern (nv01) */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x001c, &nv04_gr_object }, /* line */
{ -1, -1, 0x001d, &nv04_gr_object }, /* tri */
{ -1, -1, 0x001e, &nv04_gr_object }, /* rect */
{ -1, -1, 0x001f, &nv04_gr_object },
{ -1, -1, 0x0021, &nv04_gr_object },
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0036, &nv04_gr_object },
{ -1, -1, 0x0037, &nv04_gr_object },
{ -1, -1, 0x0038, &nv04_gr_object }, /* dvd subpicture */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0042, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
{ -1, -1, 0x0048, &nv04_gr_object },
{ -1, -1, 0x004a, &nv04_gr_object },
{ -1, -1, 0x004b, &nv04_gr_object },
{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x0053, &nv04_gr_object },
{ -1, -1, 0x0054, &nv04_gr_object }, /* ttri */
{ -1, -1, 0x0055, &nv04_gr_object }, /* mtri */
{ -1, -1, 0x0057, &nv04_gr_object }, /* chroma */
{ -1, -1, 0x0058, &nv04_gr_object }, /* surf_dst */
{ -1, -1, 0x0059, &nv04_gr_object }, /* surf_src */
{ -1, -1, 0x005a, &nv04_gr_object }, /* surf_color */
{ -1, -1, 0x005b, &nv04_gr_object }, /* surf_zeta */
{ -1, -1, 0x005c, &nv04_gr_object }, /* line */
{ -1, -1, 0x005d, &nv04_gr_object }, /* tri */
{ -1, -1, 0x005e, &nv04_gr_object }, /* rect */
{ -1, -1, 0x005f, &nv04_gr_object },
{ -1, -1, 0x0060, &nv04_gr_object },
{ -1, -1, 0x0061, &nv04_gr_object },
{ -1, -1, 0x0064, &nv04_gr_object }, /* iifc (nv05) */
{ -1, -1, 0x0065, &nv04_gr_object }, /* ifc (nv05) */
{ -1, -1, 0x0066, &nv04_gr_object }, /* sifc (nv05) */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0076, &nv04_gr_object },
{ -1, -1, 0x0077, &nv04_gr_object },
{}
}
};
static int
nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -1414,10 +1384,9 @@ nv04_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv04_gr;
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv04_gr_intr;
nv_engine(gr)->cclass = &nv04_gr_cclass;
nv_engine(gr)->sclass = nv04_gr_sclass;
spin_lock_init(&gr->lock);
return 0;
}

View File

@ -21,7 +21,7 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <engine/gr.h>
#include "priv.h"
#include "regs.h"
#include <core/client.h>
@ -385,14 +385,19 @@ static int nv17_gr_ctx_regs[] = {
0x00400a04,
};
#define nv10_gr(p) container_of((p), struct nv10_gr, base)
struct nv10_gr {
struct nvkm_gr base;
struct nv10_gr_chan *chan[32];
spinlock_t lock;
};
#define nv10_gr_chan(p) container_of((p), struct nv10_gr_chan, object)
struct nv10_gr_chan {
struct nvkm_object base;
struct nvkm_object object;
struct nv10_gr *gr;
int chid;
int nv10[ARRAY_SIZE(nv10_gr_ctx_regs)];
int nv17[ARRAY_SIZE(nv17_gr_ctx_regs)];
@ -401,12 +406,6 @@ struct nv10_gr_chan {
};
static inline struct nv10_gr *
nv10_gr(struct nv10_gr_chan *chan)
{
return (void *)nv_object(chan)->engine;
}
/*******************************************************************************
* Graphics object classes
******************************************************************************/
@ -427,57 +426,11 @@ nv10_gr(struct nv10_gr_chan *chan)
nvkm_wr32(device, NV10_PGRAPH_PIPE_DATA, state[__i]); \
} while (0)
static struct nvkm_oclass
nv10_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs }, /* clip */
{ 0x0030, &nv04_gr_ofuncs }, /* null */
{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs }, /* rop */
{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
{ 0x005f, &nv04_gr_ofuncs }, /* blit */
{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs }, /* blit */
{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
{ 0x0056, &nv04_gr_ofuncs }, /* celcius */
{},
};
static struct nvkm_oclass
nv15_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs }, /* clip */
{ 0x0030, &nv04_gr_ofuncs }, /* null */
{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs }, /* rop */
{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
{ 0x005f, &nv04_gr_ofuncs }, /* blit */
{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs }, /* blit */
{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
{ 0x0096, &nv04_gr_ofuncs }, /* celcius */
{},
};
static void
nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
{
struct nvkm_device *device = chan->base.engine->subdev.device;
struct nvkm_gr *gr = nvkm_gr(chan);
struct nvkm_device *device = chan->object.engine->subdev.device;
struct nvkm_gr *gr = &chan->gr->base;
struct pipe_state *pipe = &chan->pipe_state;
u32 pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
u32 xfmode0, xfmode1;
@ -549,8 +502,8 @@ nv17_gr_mthd_lma_window(struct nv10_gr_chan *chan, u32 mthd, u32 data)
static void
nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data)
{
struct nvkm_device *device = chan->base.engine->subdev.device;
struct nvkm_gr *gr = nvkm_gr(chan);
struct nvkm_device *device = chan->object.engine->subdev.device;
struct nvkm_gr *gr = &chan->gr->base;
nv04_gr_idle(gr);
@ -585,29 +538,6 @@ nv10_gr_mthd(struct nv10_gr_chan *chan, u8 class, u32 mthd, u32 data)
return func(chan, mthd, data);
}
static struct nvkm_oclass
nv17_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs }, /* clip */
{ 0x0030, &nv04_gr_ofuncs }, /* null */
{ 0x0039, &nv04_gr_ofuncs }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs }, /* rop */
{ 0x0044, &nv04_gr_ofuncs }, /* pattern */
{ 0x004a, &nv04_gr_ofuncs }, /* gdi */
{ 0x0052, &nv04_gr_ofuncs }, /* swzsurf */
{ 0x005f, &nv04_gr_ofuncs }, /* blit */
{ 0x0062, &nv04_gr_ofuncs }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs }, /* blit */
{ 0x0093, &nv04_gr_ofuncs }, /* surf3d */
{ 0x0094, &nv04_gr_ofuncs }, /* ttri */
{ 0x0095, &nv04_gr_ofuncs }, /* mtri */
{ 0x0099, &nv04_gr_ofuncs },
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
@ -628,7 +558,7 @@ nv10_gr_channel(struct nv10_gr *gr)
static void
nv10_gr_save_pipe(struct nv10_gr_chan *chan)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct pipe_state *pipe = &chan->pipe_state;
struct nvkm_device *device = gr->base.engine.subdev.device;
@ -647,13 +577,13 @@ nv10_gr_save_pipe(struct nv10_gr_chan *chan)
static void
nv10_gr_load_pipe(struct nv10_gr_chan *chan)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct pipe_state *pipe = &chan->pipe_state;
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 xfmode0, xfmode1;
int i;
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
/* XXX check haiku comments */
xfmode0 = nvkm_rd32(device, NV10_PGRAPH_XFMODE0);
xfmode1 = nvkm_rd32(device, NV10_PGRAPH_XFMODE1);
@ -678,7 +608,7 @@ nv10_gr_load_pipe(struct nv10_gr_chan *chan)
PIPE_RESTORE(gr, pipe->pipe_0x0200, 0x0200);
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
/* restore XFMODE */
nvkm_wr32(device, NV10_PGRAPH_XFMODE0, xfmode0);
@ -692,13 +622,13 @@ nv10_gr_load_pipe(struct nv10_gr_chan *chan)
PIPE_RESTORE(gr, pipe->pipe_0x4400, 0x4400);
PIPE_RESTORE(gr, pipe->pipe_0x0000, 0x0000);
PIPE_RESTORE(gr, pipe->pipe_0x0040, 0x0040);
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
}
static void
nv10_gr_create_pipe(struct nv10_gr_chan *chan)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct pipe_state *pipe_state = &chan->pipe_state;
u32 *pipe_state_addr;
@ -880,7 +810,7 @@ nv17_gr_ctx_regs_find_offset(struct nv10_gr *gr, int reg)
static void
nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
u32 ctx_user, ctx_switch[5];
@ -951,7 +881,7 @@ nv10_gr_load_dma_vtxbuf(struct nv10_gr_chan *chan, int chid, u32 inst)
static int
nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 inst;
int i;
@ -979,7 +909,7 @@ nv10_gr_load_context(struct nv10_gr_chan *chan, int chid)
static int
nv10_gr_unload_context(struct nv10_gr_chan *chan)
{
struct nv10_gr *gr = nv10_gr(chan);
struct nv10_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
int i;
@ -1007,7 +937,7 @@ nv10_gr_context_switch(struct nv10_gr *gr)
struct nv10_gr_chan *next = NULL;
int chid;
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
/* If previous context is valid, we need to save it */
prev = nv10_gr_channel(gr);
@ -1021,6 +951,42 @@ nv10_gr_context_switch(struct nv10_gr *gr)
nv10_gr_load_context(next, chid);
}
static int
nv10_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv10_gr_chan *chan = nv10_gr_chan(object);
struct nv10_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
spin_lock_irqsave(&gr->lock, flags);
nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
if (nv10_gr_channel(gr) == chan)
nv10_gr_unload_context(chan);
nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&gr->lock, flags);
return 0;
}
static void *
nv10_gr_chan_dtor(struct nvkm_object *object)
{
struct nv10_gr_chan *chan = nv10_gr_chan(object);
struct nv10_gr *gr = chan->gr;
unsigned long flags;
spin_lock_irqsave(&gr->lock, flags);
gr->chan[chan->chid] = NULL;
spin_unlock_irqrestore(&gr->lock, flags);
return chan;
}
static const struct nvkm_object_func
nv10_gr_chan = {
.dtor = nv10_gr_chan_dtor,
.fini = nv10_gr_chan_fini,
};
#define NV_WRITE_CTX(reg, val) do { \
int offset = nv10_gr_ctx_regs_find_offset(gr, reg); \
if (offset > 0) \
@ -1034,30 +1000,20 @@ nv10_gr_context_switch(struct nv10_gr *gr)
} while (0)
static int
nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv10_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nvkm_fifo_chan *fifo = (void *)parent;
struct nv10_gr *gr = (void *)engine;
struct nv10_gr *gr = nv10_gr(base);
struct nv10_gr_chan *chan;
struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
int ret;
ret = nvkm_object_create(parent, engine, oclass, 0, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
spin_lock_irqsave(&gr->lock, flags);
if (gr->chan[fifo->chid]) {
*pobject = nv_object(gr->chan[fifo->chid]);
atomic_inc(&(*pobject)->refcount);
spin_unlock_irqrestore(&gr->lock, flags);
nvkm_object_destroy(&chan->base);
return 1;
}
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv10_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
NV_WRITE_CTX(0x00400e88, 0x08000000);
NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
@ -1066,11 +1022,10 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
NV_WRITE_CTX(0x00400e14, 0x00001000);
NV_WRITE_CTX(0x00400e30, 0x00080008);
NV_WRITE_CTX(0x00400e34, 0x00080008);
if (nv_device(gr)->card_type >= NV_11 &&
nv_device(gr)->chipset >= 0x17) {
if (device->card_type >= NV_11 && device->chipset >= 0x17) {
/* is it really needed ??? */
NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
nvkm_rd32(device, NV10_PGRAPH_DEBUG_4));
NV17_WRITE_CTX(0x004006b0, nvkm_rd32(device, 0x004006b0));
NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
@ -1081,55 +1036,12 @@ nv10_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv10_gr_create_pipe(chan);
gr->chan[fifo->chid] = chan;
chan->chid = fifo->chid;
spin_lock_irqsave(&gr->lock, flags);
gr->chan[chan->chid] = chan;
spin_unlock_irqrestore(&gr->lock, flags);
return 0;
}
static void
nv10_gr_context_dtor(struct nvkm_object *object)
{
struct nv10_gr *gr = (void *)object->engine;
struct nv10_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&gr->lock, flags);
gr->chan[chan->chid] = NULL;
spin_unlock_irqrestore(&gr->lock, flags);
nvkm_object_destroy(&chan->base);
}
static int
nv10_gr_context_fini(struct nvkm_object *object, bool suspend)
{
struct nv10_gr *gr = (void *)object->engine;
struct nv10_gr_chan *chan = (void *)object;
struct nvkm_device *device = gr->base.engine.subdev.device;
unsigned long flags;
spin_lock_irqsave(&gr->lock, flags);
nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
if (nv10_gr_channel(gr) == chan)
nv10_gr_unload_context(chan);
nvkm_mask(device, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&gr->lock, flags);
return _nvkm_object_fini(&chan->base, suspend);
}
static struct nvkm_oclass
nv10_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x10),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv10_gr_context_ctor,
.dtor = nv10_gr_context_dtor,
.init = _nvkm_object_init,
.fini = nv10_gr_context_fini,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@ -1144,7 +1056,7 @@ nv10_gr_tile_prog(struct nvkm_engine *engine, int i)
unsigned long flags;
fifo->pause(fifo, &flags);
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
nvkm_wr32(device, NV10_PGRAPH_TLIMIT(i), tile->limit);
nvkm_wr32(device, NV10_PGRAPH_TSIZE(i), tile->pitch);
@ -1214,12 +1126,92 @@ nv10_gr_intr(struct nvkm_subdev *subdev)
"nstatus %08x [%s] ch %d [%s] subc %d "
"class %04x mthd %04x data %08x\n",
show, msg, nsource, src, nstatus, sta, chid,
nvkm_client_name(chan), subc, class, mthd, data);
chan ? chan->object.client->name : "unknown",
subc, class, mthd, data);
}
spin_unlock_irqrestore(&gr->lock, flags);
}
static const struct nvkm_gr_func
nv10_gr = {
.chan_new = nv10_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
{ -1, -1, 0x0056, &nv04_gr_object }, /* celcius */
{}
}
};
static const struct nvkm_gr_func
nv15_gr = {
.chan_new = nv10_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{}
}
};
static const struct nvkm_gr_func
nv17_gr = {
.chan_new = nv10_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* pattern */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0052, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x005f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* blit */
{ -1, -1, 0x0093, &nv04_gr_object }, /* surf3d */
{ -1, -1, 0x0094, &nv04_gr_object }, /* ttri */
{ -1, -1, 0x0095, &nv04_gr_object }, /* mtri */
{ -1, -1, 0x0099, &nv04_gr_object },
{}
}
};
static int
nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -1235,16 +1227,15 @@ nv10_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv10_gr_intr;
nv_engine(gr)->cclass = &nv10_gr_cclass;
if (nv_device(gr)->chipset <= 0x10)
nv_engine(gr)->sclass = nv10_gr_sclass;
gr->base.func = &nv10_gr;
else
if (nv_device(gr)->chipset < 0x17 ||
nv_device(gr)->card_type < NV_11)
nv_engine(gr)->sclass = nv15_gr_sclass;
gr->base.func = &nv15_gr;
else
nv_engine(gr)->sclass = nv17_gr_sclass;
gr->base.func = &nv17_gr;
nv_engine(gr)->tile_prog = nv10_gr_tile_prog;
spin_lock_init(&gr->lock);

View File

@ -7,131 +7,37 @@
#include <subdev/fb.h>
#include <subdev/timer.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
nv20_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
{ 0x0097, &nv04_gr_ofuncs, NULL }, /* kelvin */
{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv20_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x37f0,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x033c, 0xffff0000);
nvkm_wo32(image, 0x03a0, 0x0fff0000);
nvkm_wo32(image, 0x03a4, 0x0fff0000);
nvkm_wo32(image, 0x047c, 0x00000101);
nvkm_wo32(image, 0x0490, 0x00000111);
nvkm_wo32(image, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nvkm_wo32(image, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nvkm_wo32(image, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nvkm_wo32(image, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nvkm_wo32(image, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nvkm_wo32(image, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x05a4, 0x4b7fffff);
nvkm_wo32(image, 0x05fc, 0x00000001);
nvkm_wo32(image, 0x0604, 0x00004000);
nvkm_wo32(image, 0x0610, 0x00000001);
nvkm_wo32(image, 0x0618, 0x00040000);
nvkm_wo32(image, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
nvkm_wo32(image, (i + 0), 0x10700ff9);
nvkm_wo32(image, (i + 4), 0x0436086c);
nvkm_wo32(image, (i + 8), 0x000c001b);
}
nvkm_wo32(image, 0x281c, 0x3f800000);
nvkm_wo32(image, 0x2830, 0x3f800000);
nvkm_wo32(image, 0x285c, 0x40000000);
nvkm_wo32(image, 0x2860, 0x3f800000);
nvkm_wo32(image, 0x2864, 0x3f000000);
nvkm_wo32(image, 0x286c, 0x40000000);
nvkm_wo32(image, 0x2870, 0x3f800000);
nvkm_wo32(image, 0x2878, 0xbf800000);
nvkm_wo32(image, 0x2880, 0xbf800000);
nvkm_wo32(image, 0x34a4, 0x000fe000);
nvkm_wo32(image, 0x3530, 0x000003f8);
nvkm_wo32(image, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
nvkm_wo32(image, i, 0x001c527c);
nvkm_done(image);
return 0;
}
int
nv20_gr_context_init(struct nvkm_object *object)
nv20_gr_chan_init(struct nvkm_object *object)
{
struct nv20_gr *gr = (void *)object->engine;
struct nv20_gr_chan *chan = (void *)object;
int ret;
ret = nvkm_gr_context_init(&chan->base);
if (ret)
return ret;
struct nv20_gr_chan *chan = nv20_gr_chan(object);
struct nv20_gr *gr = chan->gr;
u32 inst = nvkm_memory_addr(chan->inst);
nvkm_kmap(gr->ctxtab);
nvkm_wo32(gr->ctxtab, chan->chid * 4, nv_gpuobj(chan)->addr >> 4);
nvkm_wo32(gr->ctxtab, chan->chid * 4, inst >> 4);
nvkm_done(gr->ctxtab);
return 0;
}
int
nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
nv20_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv20_gr *gr = (void *)object->engine;
struct nv20_gr_chan *chan = (void *)object;
struct nv20_gr_chan *chan = nv20_gr_chan(object);
struct nv20_gr *gr = chan->gr;
struct nvkm_device *device = gr->base.engine.subdev.device;
u32 inst = nvkm_memory_addr(chan->inst);
int chid = -1;
nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
if (nvkm_rd32(device, 0x400144) & 0x00010000)
chid = (nvkm_rd32(device, 0x400148) & 0x1f000000) >> 24;
if (chan->chid == chid) {
nvkm_wr32(device, 0x400784, nv_gpuobj(chan)->addr >> 4);
nvkm_wr32(device, 0x400784, inst >> 4);
nvkm_wr32(device, 0x400788, 0x00000002);
nvkm_msec(device, 2000,
if (!nvkm_rd32(device, 0x400700))
@ -145,23 +51,94 @@ nv20_gr_context_fini(struct nvkm_object *object, bool suspend)
nvkm_kmap(gr->ctxtab);
nvkm_wo32(gr->ctxtab, chan->chid * 4, 0x00000000);
nvkm_done(gr->ctxtab);
return nvkm_gr_context_fini(&chan->base, suspend);
return 0;
}
static struct nvkm_oclass
nv20_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x20),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv20_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
void *
nv20_gr_chan_dtor(struct nvkm_object *object)
{
struct nv20_gr_chan *chan = nv20_gr_chan(object);
nvkm_memory_del(&chan->inst);
return chan;
}
static const struct nvkm_object_func
nv20_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv20_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
int ret, i;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv20_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x37f0, 16, true,
&chan->inst);
if (ret)
return ret;
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
nvkm_wo32(chan->inst, 0x047c, 0x00000101);
nvkm_wo32(chan->inst, 0x0490, 0x00000111);
nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
nvkm_wo32(chan->inst, 0x0604, 0x00004000);
nvkm_wo32(chan->inst, 0x0610, 0x00000001);
nvkm_wo32(chan->inst, 0x0618, 0x00040000);
nvkm_wo32(chan->inst, 0x061c, 0x00010000);
for (i = 0x1c1c; i <= 0x248c; i += 16) {
nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
nvkm_wo32(chan->inst, 0x281c, 0x3f800000);
nvkm_wo32(chan->inst, 0x2830, 0x3f800000);
nvkm_wo32(chan->inst, 0x285c, 0x40000000);
nvkm_wo32(chan->inst, 0x2860, 0x3f800000);
nvkm_wo32(chan->inst, 0x2864, 0x3f000000);
nvkm_wo32(chan->inst, 0x286c, 0x40000000);
nvkm_wo32(chan->inst, 0x2870, 0x3f800000);
nvkm_wo32(chan->inst, 0x2878, 0xbf800000);
nvkm_wo32(chan->inst, 0x2880, 0xbf800000);
nvkm_wo32(chan->inst, 0x34a4, 0x000fe000);
nvkm_wo32(chan->inst, 0x3530, 0x000003f8);
nvkm_wo32(chan->inst, 0x3540, 0x002fe000);
for (i = 0x355c; i <= 0x3578; i += 4)
nvkm_wo32(chan->inst, i, 0x001c527c);
nvkm_done(chan->inst);
return 0;
}
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@ -176,7 +153,7 @@ nv20_gr_tile_prog(struct nvkm_engine *engine, int i)
unsigned long flags;
fifo->pause(fifo, &flags);
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
nvkm_wr32(device, NV20_PGRAPH_TLIMIT(i), tile->limit);
nvkm_wr32(device, NV20_PGRAPH_TSIZE(i), tile->pitch);
@ -237,6 +214,29 @@ nv20_gr_intr(struct nvkm_subdev *subdev)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
static const struct nvkm_gr_func
nv20_gr = {
.chan_new = nv20_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{ -1, -1, 0x0097, &nv04_gr_object }, /* kelvin */
{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{}
}
};
static int
nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -251,6 +251,8 @@ nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv20_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -258,8 +260,6 @@ nv20_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv20_gr_cclass;
nv_engine(gr)->sclass = nv20_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -1,20 +1,25 @@
#ifndef __NV20_GR_H__
#define __NV20_GR_H__
#include <engine/gr.h>
#define nv20_gr(p) container_of((p), struct nv20_gr, base)
#include "priv.h"
struct nv20_gr {
struct nvkm_gr base;
struct nvkm_memory *ctxtab;
};
#define nv20_gr_chan(p) container_of((p), struct nv20_gr_chan, object)
struct nv20_gr_chan {
struct nvkm_gr_chan base;
struct nvkm_object object;
struct nv20_gr *gr;
int chid;
struct nvkm_memory *inst;
};
extern struct nvkm_oclass nv25_gr_sclass[];
int nv20_gr_context_init(struct nvkm_object *);
int nv20_gr_context_fini(struct nvkm_object *, bool);
void *nv20_gr_chan_dtor(struct nvkm_object *);
int nv20_gr_chan_init(struct nvkm_object *);
int nv20_gr_chan_fini(struct nvkm_object *, bool);
void nv20_gr_tile_prog(struct nvkm_engine *, int);
void nv20_gr_intr(struct nvkm_subdev *);

View File

@ -4,127 +4,123 @@
#include <engine/fifo.h>
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
struct nvkm_oclass
nv25_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x0096, &nv04_gr_ofuncs, NULL }, /* celcius */
{ 0x009e, &nv04_gr_ofuncs, NULL }, /* swzsurf */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0597, &nv04_gr_ofuncs, NULL }, /* kelvin */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv25_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv25_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv25_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x3724,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv25_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x3724, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x035c, 0xffff0000);
nvkm_wo32(image, 0x03c0, 0x0fff0000);
nvkm_wo32(image, 0x03c4, 0x0fff0000);
nvkm_wo32(image, 0x049c, 0x00000101);
nvkm_wo32(image, 0x04b0, 0x00000111);
nvkm_wo32(image, 0x04c8, 0x00000080);
nvkm_wo32(image, 0x04cc, 0xffff0000);
nvkm_wo32(image, 0x04d0, 0x00000001);
nvkm_wo32(image, 0x04e4, 0x44400000);
nvkm_wo32(image, 0x04fc, 0x4b800000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x035c, 0xffff0000);
nvkm_wo32(chan->inst, 0x03c0, 0x0fff0000);
nvkm_wo32(chan->inst, 0x03c4, 0x0fff0000);
nvkm_wo32(chan->inst, 0x049c, 0x00000101);
nvkm_wo32(chan->inst, 0x04b0, 0x00000111);
nvkm_wo32(chan->inst, 0x04c8, 0x00000080);
nvkm_wo32(chan->inst, 0x04cc, 0xffff0000);
nvkm_wo32(chan->inst, 0x04d0, 0x00000001);
nvkm_wo32(chan->inst, 0x04e4, 0x44400000);
nvkm_wo32(chan->inst, 0x04fc, 0x4b800000);
for (i = 0x0510; i <= 0x051c; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x0530; i <= 0x053c; i += 4)
nvkm_wo32(image, i, 0x00080000);
nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x0548; i <= 0x0554; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0558; i <= 0x0564; i += 4)
nvkm_wo32(image, i, 0x000105b8);
nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x0568; i <= 0x0574; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x0598; i <= 0x05d4; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x05e0, 0x4b7fffff);
nvkm_wo32(image, 0x0620, 0x00000080);
nvkm_wo32(image, 0x0624, 0x30201000);
nvkm_wo32(image, 0x0628, 0x70605040);
nvkm_wo32(image, 0x062c, 0xb0a09080);
nvkm_wo32(image, 0x0630, 0xf0e0d0c0);
nvkm_wo32(image, 0x0664, 0x00000001);
nvkm_wo32(image, 0x066c, 0x00004000);
nvkm_wo32(image, 0x0678, 0x00000001);
nvkm_wo32(image, 0x0680, 0x00040000);
nvkm_wo32(image, 0x0684, 0x00010000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x05e0, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x0620, 0x00000080);
nvkm_wo32(chan->inst, 0x0624, 0x30201000);
nvkm_wo32(chan->inst, 0x0628, 0x70605040);
nvkm_wo32(chan->inst, 0x062c, 0xb0a09080);
nvkm_wo32(chan->inst, 0x0630, 0xf0e0d0c0);
nvkm_wo32(chan->inst, 0x0664, 0x00000001);
nvkm_wo32(chan->inst, 0x066c, 0x00004000);
nvkm_wo32(chan->inst, 0x0678, 0x00000001);
nvkm_wo32(chan->inst, 0x0680, 0x00040000);
nvkm_wo32(chan->inst, 0x0684, 0x00010000);
for (i = 0x1b04; i <= 0x2374; i += 16) {
nvkm_wo32(image, (i + 0), 0x10700ff9);
nvkm_wo32(image, (i + 4), 0x0436086c);
nvkm_wo32(image, (i + 8), 0x000c001b);
nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
nvkm_wo32(image, 0x2704, 0x3f800000);
nvkm_wo32(image, 0x2718, 0x3f800000);
nvkm_wo32(image, 0x2744, 0x40000000);
nvkm_wo32(image, 0x2748, 0x3f800000);
nvkm_wo32(image, 0x274c, 0x3f000000);
nvkm_wo32(image, 0x2754, 0x40000000);
nvkm_wo32(image, 0x2758, 0x3f800000);
nvkm_wo32(image, 0x2760, 0xbf800000);
nvkm_wo32(image, 0x2768, 0xbf800000);
nvkm_wo32(image, 0x308c, 0x000fe000);
nvkm_wo32(image, 0x3108, 0x000003f8);
nvkm_wo32(image, 0x3468, 0x002fe000);
nvkm_wo32(chan->inst, 0x2704, 0x3f800000);
nvkm_wo32(chan->inst, 0x2718, 0x3f800000);
nvkm_wo32(chan->inst, 0x2744, 0x40000000);
nvkm_wo32(chan->inst, 0x2748, 0x3f800000);
nvkm_wo32(chan->inst, 0x274c, 0x3f000000);
nvkm_wo32(chan->inst, 0x2754, 0x40000000);
nvkm_wo32(chan->inst, 0x2758, 0x3f800000);
nvkm_wo32(chan->inst, 0x2760, 0xbf800000);
nvkm_wo32(chan->inst, 0x2768, 0xbf800000);
nvkm_wo32(chan->inst, 0x308c, 0x000fe000);
nvkm_wo32(chan->inst, 0x3108, 0x000003f8);
nvkm_wo32(chan->inst, 0x3468, 0x002fe000);
for (i = 0x3484; i <= 0x34a0; i += 4)
nvkm_wo32(image, i, 0x001c527c);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x001c527c);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv25_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x25),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv25_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv25_gr = {
.chan_new = nv25_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
static int
nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -139,6 +135,8 @@ nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv25_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -146,8 +144,6 @@ nv25_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv25_gr_cclass;
nv_engine(gr)->sclass = nv25_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -8,90 +8,110 @@
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv2a_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv2a_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv2a_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x36b0,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv2a_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x36b0, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x033c, 0xffff0000);
nvkm_wo32(image, 0x03a0, 0x0fff0000);
nvkm_wo32(image, 0x03a4, 0x0fff0000);
nvkm_wo32(image, 0x047c, 0x00000101);
nvkm_wo32(image, 0x0490, 0x00000111);
nvkm_wo32(image, 0x04a8, 0x44400000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0000, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x033c, 0xffff0000);
nvkm_wo32(chan->inst, 0x03a0, 0x0fff0000);
nvkm_wo32(chan->inst, 0x03a4, 0x0fff0000);
nvkm_wo32(chan->inst, 0x047c, 0x00000101);
nvkm_wo32(chan->inst, 0x0490, 0x00000111);
nvkm_wo32(chan->inst, 0x04a8, 0x44400000);
for (i = 0x04d4; i <= 0x04e0; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x04f4; i <= 0x0500; i += 4)
nvkm_wo32(image, i, 0x00080000);
nvkm_wo32(chan->inst, i, 0x00080000);
for (i = 0x050c; i <= 0x0518; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x051c; i <= 0x0528; i += 4)
nvkm_wo32(image, i, 0x000105b8);
nvkm_wo32(chan->inst, i, 0x000105b8);
for (i = 0x052c; i <= 0x0538; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(chan->inst, i, 0x00080008);
for (i = 0x055c; i <= 0x0598; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x05a4, 0x4b7fffff);
nvkm_wo32(image, 0x05fc, 0x00000001);
nvkm_wo32(image, 0x0604, 0x00004000);
nvkm_wo32(image, 0x0610, 0x00000001);
nvkm_wo32(image, 0x0618, 0x00040000);
nvkm_wo32(image, 0x061c, 0x00010000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x05a4, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x05fc, 0x00000001);
nvkm_wo32(chan->inst, 0x0604, 0x00004000);
nvkm_wo32(chan->inst, 0x0610, 0x00000001);
nvkm_wo32(chan->inst, 0x0618, 0x00040000);
nvkm_wo32(chan->inst, 0x061c, 0x00010000);
for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
nvkm_wo32(image, (i + 0), 0x10700ff9);
nvkm_wo32(image, (i + 4), 0x0436086c);
nvkm_wo32(image, (i + 8), 0x000c001b);
nvkm_wo32(chan->inst, (i + 0), 0x10700ff9);
nvkm_wo32(chan->inst, (i + 4), 0x0436086c);
nvkm_wo32(chan->inst, (i + 8), 0x000c001b);
}
nvkm_wo32(image, 0x269c, 0x3f800000);
nvkm_wo32(image, 0x26b0, 0x3f800000);
nvkm_wo32(image, 0x26dc, 0x40000000);
nvkm_wo32(image, 0x26e0, 0x3f800000);
nvkm_wo32(image, 0x26e4, 0x3f000000);
nvkm_wo32(image, 0x26ec, 0x40000000);
nvkm_wo32(image, 0x26f0, 0x3f800000);
nvkm_wo32(image, 0x26f8, 0xbf800000);
nvkm_wo32(image, 0x2700, 0xbf800000);
nvkm_wo32(image, 0x3024, 0x000fe000);
nvkm_wo32(image, 0x30a0, 0x000003f8);
nvkm_wo32(image, 0x33fc, 0x002fe000);
nvkm_wo32(chan->inst, 0x269c, 0x3f800000);
nvkm_wo32(chan->inst, 0x26b0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26dc, 0x40000000);
nvkm_wo32(chan->inst, 0x26e0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26e4, 0x3f000000);
nvkm_wo32(chan->inst, 0x26ec, 0x40000000);
nvkm_wo32(chan->inst, 0x26f0, 0x3f800000);
nvkm_wo32(chan->inst, 0x26f8, 0xbf800000);
nvkm_wo32(chan->inst, 0x2700, 0xbf800000);
nvkm_wo32(chan->inst, 0x3024, 0x000fe000);
nvkm_wo32(chan->inst, 0x30a0, 0x000003f8);
nvkm_wo32(chan->inst, 0x33fc, 0x002fe000);
for (i = 0x341c; i <= 0x3438; i += 4)
nvkm_wo32(image, i, 0x001c527c);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x001c527c);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv2a_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x2a),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv2a_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv2a_gr = {
.chan_new = nv2a_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x0096, &nv04_gr_object }, /* celcius */
{ -1, -1, 0x009e, &nv04_gr_object }, /* swzsurf */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0597, &nv04_gr_object }, /* kelvin */
{}
}
};
static int
nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -106,6 +126,8 @@ nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv2a_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -113,8 +135,6 @@ nv2a_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv2a_gr_cclass;
nv_engine(gr)->sclass = nv25_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -5,128 +5,124 @@
#include <engine/fifo/chan.h>
#include <subdev/fb.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
nv30_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0397, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv30_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x5f48,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x5f48, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x0410, 0x00000101);
nvkm_wo32(image, 0x0424, 0x00000111);
nvkm_wo32(image, 0x0428, 0x00000060);
nvkm_wo32(image, 0x0444, 0x00000080);
nvkm_wo32(image, 0x0448, 0xffff0000);
nvkm_wo32(image, 0x044c, 0x00000001);
nvkm_wo32(image, 0x0460, 0x44400000);
nvkm_wo32(image, 0x048c, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x0410, 0x00000101);
nvkm_wo32(chan->inst, 0x0424, 0x00000111);
nvkm_wo32(chan->inst, 0x0428, 0x00000060);
nvkm_wo32(chan->inst, 0x0444, 0x00000080);
nvkm_wo32(chan->inst, 0x0448, 0xffff0000);
nvkm_wo32(chan->inst, 0x044c, 0x00000001);
nvkm_wo32(chan->inst, 0x0460, 0x44400000);
nvkm_wo32(chan->inst, 0x048c, 0xffff0000);
for (i = 0x04e0; i < 0x04e8; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04ec, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04ec, 0x00011100);
for (i = 0x0508; i < 0x0548; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x0550, 0x4b7fffff);
nvkm_wo32(image, 0x058c, 0x00000080);
nvkm_wo32(image, 0x0590, 0x30201000);
nvkm_wo32(image, 0x0594, 0x70605040);
nvkm_wo32(image, 0x0598, 0xb8a89888);
nvkm_wo32(image, 0x059c, 0xf8e8d8c8);
nvkm_wo32(image, 0x05b0, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x058c, 0x00000080);
nvkm_wo32(chan->inst, 0x0590, 0x30201000);
nvkm_wo32(chan->inst, 0x0594, 0x70605040);
nvkm_wo32(chan->inst, 0x0598, 0xb8a89888);
nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05b0, 0xb0000000);
for (i = 0x0600; i < 0x0640; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0640; i < 0x0680; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c0; i < 0x0700; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0700; i < 0x0740; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0740; i < 0x0780; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x085c, 0x00040000);
nvkm_wo32(image, 0x0860, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x085c, 0x00040000);
nvkm_wo32(chan->inst, 0x0860, 0x00010000);
for (i = 0x0864; i < 0x0874; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f18; i <= 0x3088 ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 1, 0x0436086c);
nvkm_wo32(image, i + 2, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 1, 0x0436086c);
nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x30b8; i < 0x30c8; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x344c, 0x3f800000);
nvkm_wo32(image, 0x3808, 0x3f800000);
nvkm_wo32(image, 0x381c, 0x3f800000);
nvkm_wo32(image, 0x3848, 0x40000000);
nvkm_wo32(image, 0x384c, 0x3f800000);
nvkm_wo32(image, 0x3850, 0x3f000000);
nvkm_wo32(image, 0x3858, 0x40000000);
nvkm_wo32(image, 0x385c, 0x3f800000);
nvkm_wo32(image, 0x3864, 0xbf800000);
nvkm_wo32(image, 0x386c, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x344c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3808, 0x3f800000);
nvkm_wo32(chan->inst, 0x381c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3848, 0x40000000);
nvkm_wo32(chan->inst, 0x384c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3850, 0x3f000000);
nvkm_wo32(chan->inst, 0x3858, 0x40000000);
nvkm_wo32(chan->inst, 0x385c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3864, 0xbf800000);
nvkm_wo32(chan->inst, 0x386c, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv30_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x30),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv30_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv30_gr = {
.chan_new = nv30_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0397, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -141,6 +137,8 @@ nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv30_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -148,8 +146,6 @@ nv30_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv30_gr_cclass;
nv_engine(gr)->sclass = nv30_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -4,128 +4,124 @@
#include <engine/fifo.h>
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
nv34_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0697, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv34_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x46dc,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x46dc, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x040c, 0x01000101);
nvkm_wo32(image, 0x0420, 0x00000111);
nvkm_wo32(image, 0x0424, 0x00000060);
nvkm_wo32(image, 0x0440, 0x00000080);
nvkm_wo32(image, 0x0444, 0xffff0000);
nvkm_wo32(image, 0x0448, 0x00000001);
nvkm_wo32(image, 0x045c, 0x44400000);
nvkm_wo32(image, 0x0480, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x040c, 0x01000101);
nvkm_wo32(chan->inst, 0x0420, 0x00000111);
nvkm_wo32(chan->inst, 0x0424, 0x00000060);
nvkm_wo32(chan->inst, 0x0440, 0x00000080);
nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
nvkm_wo32(chan->inst, 0x0448, 0x00000001);
nvkm_wo32(chan->inst, 0x045c, 0x44400000);
nvkm_wo32(chan->inst, 0x0480, 0xffff0000);
for (i = 0x04d4; i < 0x04dc; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04e0, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04e0, 0x00011100);
for (i = 0x04fc; i < 0x053c; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x0544, 0x4b7fffff);
nvkm_wo32(image, 0x057c, 0x00000080);
nvkm_wo32(image, 0x0580, 0x30201000);
nvkm_wo32(image, 0x0584, 0x70605040);
nvkm_wo32(image, 0x0588, 0xb8a89888);
nvkm_wo32(image, 0x058c, 0xf8e8d8c8);
nvkm_wo32(image, 0x05a0, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x057c, 0x00000080);
nvkm_wo32(chan->inst, 0x0580, 0x30201000);
nvkm_wo32(chan->inst, 0x0584, 0x70605040);
nvkm_wo32(chan->inst, 0x0588, 0xb8a89888);
nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05a0, 0xb0000000);
for (i = 0x05f0; i < 0x0630; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0630; i < 0x0670; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06b0; i < 0x06f0; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x06f0; i < 0x0730; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0730; i < 0x0770; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x0850, 0x00040000);
nvkm_wo32(image, 0x0854, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x0850, 0x00040000);
nvkm_wo32(chan->inst, 0x0854, 0x00010000);
for (i = 0x0858; i < 0x0868; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x15ac; i <= 0x271c ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 1, 0x0436086c);
nvkm_wo32(image, i + 2, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 1, 0x0436086c);
nvkm_wo32(chan->inst, i + 2, 0x000c001b);
}
for (i = 0x274c; i < 0x275c; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x2ae0, 0x3f800000);
nvkm_wo32(image, 0x2e9c, 0x3f800000);
nvkm_wo32(image, 0x2eb0, 0x3f800000);
nvkm_wo32(image, 0x2edc, 0x40000000);
nvkm_wo32(image, 0x2ee0, 0x3f800000);
nvkm_wo32(image, 0x2ee4, 0x3f000000);
nvkm_wo32(image, 0x2eec, 0x40000000);
nvkm_wo32(image, 0x2ef0, 0x3f800000);
nvkm_wo32(image, 0x2ef8, 0xbf800000);
nvkm_wo32(image, 0x2f00, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000);
nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2edc, 0x40000000);
nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000);
nvkm_wo32(chan->inst, 0x2eec, 0x40000000);
nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000);
nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000);
nvkm_wo32(chan->inst, 0x2f00, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv34_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x34),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv34_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv34_gr = {
.chan_new = nv34_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0697, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -140,6 +136,8 @@ nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv34_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -147,8 +145,6 @@ nv34_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv34_gr_cclass;
nv_engine(gr)->sclass = nv34_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -4,128 +4,124 @@
#include <engine/fifo.h>
#include <engine/fifo/chan.h>
/*******************************************************************************
* Graphics object classes
******************************************************************************/
static struct nvkm_oclass
nv35_gr_sclass[] = {
{ 0x0012, &nv04_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv04_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv04_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv04_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv04_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv04_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv04_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv04_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv04_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv04_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv04_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv04_gr_ofuncs, NULL }, /* imageblit */
{ 0x0362, &nv04_gr_ofuncs, NULL }, /* surf2d (nv30) */
{ 0x0389, &nv04_gr_ofuncs, NULL }, /* sifm (nv30) */
{ 0x038a, &nv04_gr_ofuncs, NULL }, /* ifc (nv30) */
{ 0x039e, &nv04_gr_ofuncs, NULL }, /* swzsurf (nv30) */
{ 0x0497, &nv04_gr_ofuncs, NULL }, /* rankine */
{},
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static const struct nvkm_object_func
nv35_gr_chan = {
.dtor = nv20_gr_chan_dtor,
.init = nv20_gr_chan_init,
.fini = nv20_gr_chan_fini,
};
static int
nv35_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv35_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv20_gr *gr = nv20_gr(base);
struct nv20_gr_chan *chan;
struct nvkm_gpuobj *image;
int ret, i;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, 0x577c,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv35_gr_chan, oclass, &chan->object);
chan->gr = gr;
chan->chid = fifoch->chid;
*pobject = &chan->object;
ret = nvkm_memory_new(gr->base.engine.subdev.device,
NVKM_MEM_TARGET_INST, 0x577c, 16, true,
&chan->inst);
if (ret)
return ret;
chan->chid = nvkm_fifo_chan(parent)->chid;
image = &chan->base.base.gpuobj;
nvkm_kmap(image);
nvkm_wo32(image, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(image, 0x040c, 0x00000101);
nvkm_wo32(image, 0x0420, 0x00000111);
nvkm_wo32(image, 0x0424, 0x00000060);
nvkm_wo32(image, 0x0440, 0x00000080);
nvkm_wo32(image, 0x0444, 0xffff0000);
nvkm_wo32(image, 0x0448, 0x00000001);
nvkm_wo32(image, 0x045c, 0x44400000);
nvkm_wo32(image, 0x0488, 0xffff0000);
nvkm_kmap(chan->inst);
nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24));
nvkm_wo32(chan->inst, 0x040c, 0x00000101);
nvkm_wo32(chan->inst, 0x0420, 0x00000111);
nvkm_wo32(chan->inst, 0x0424, 0x00000060);
nvkm_wo32(chan->inst, 0x0440, 0x00000080);
nvkm_wo32(chan->inst, 0x0444, 0xffff0000);
nvkm_wo32(chan->inst, 0x0448, 0x00000001);
nvkm_wo32(chan->inst, 0x045c, 0x44400000);
nvkm_wo32(chan->inst, 0x0488, 0xffff0000);
for (i = 0x04dc; i < 0x04e4; i += 4)
nvkm_wo32(image, i, 0x0fff0000);
nvkm_wo32(image, 0x04e8, 0x00011100);
nvkm_wo32(chan->inst, i, 0x0fff0000);
nvkm_wo32(chan->inst, 0x04e8, 0x00011100);
for (i = 0x0504; i < 0x0544; i += 4)
nvkm_wo32(image, i, 0x07ff0000);
nvkm_wo32(image, 0x054c, 0x4b7fffff);
nvkm_wo32(image, 0x0588, 0x00000080);
nvkm_wo32(image, 0x058c, 0x30201000);
nvkm_wo32(image, 0x0590, 0x70605040);
nvkm_wo32(image, 0x0594, 0xb8a89888);
nvkm_wo32(image, 0x0598, 0xf8e8d8c8);
nvkm_wo32(image, 0x05ac, 0xb0000000);
nvkm_wo32(chan->inst, i, 0x07ff0000);
nvkm_wo32(chan->inst, 0x054c, 0x4b7fffff);
nvkm_wo32(chan->inst, 0x0588, 0x00000080);
nvkm_wo32(chan->inst, 0x058c, 0x30201000);
nvkm_wo32(chan->inst, 0x0590, 0x70605040);
nvkm_wo32(chan->inst, 0x0594, 0xb8a89888);
nvkm_wo32(chan->inst, 0x0598, 0xf8e8d8c8);
nvkm_wo32(chan->inst, 0x05ac, 0xb0000000);
for (i = 0x0604; i < 0x0644; i += 4)
nvkm_wo32(image, i, 0x00010588);
nvkm_wo32(chan->inst, i, 0x00010588);
for (i = 0x0644; i < 0x0684; i += 4)
nvkm_wo32(image, i, 0x00030303);
nvkm_wo32(chan->inst, i, 0x00030303);
for (i = 0x06c4; i < 0x0704; i += 4)
nvkm_wo32(image, i, 0x0008aae4);
nvkm_wo32(chan->inst, i, 0x0008aae4);
for (i = 0x0704; i < 0x0744; i += 4)
nvkm_wo32(image, i, 0x01012000);
nvkm_wo32(chan->inst, i, 0x01012000);
for (i = 0x0744; i < 0x0784; i += 4)
nvkm_wo32(image, i, 0x00080008);
nvkm_wo32(image, 0x0860, 0x00040000);
nvkm_wo32(image, 0x0864, 0x00010000);
nvkm_wo32(chan->inst, i, 0x00080008);
nvkm_wo32(chan->inst, 0x0860, 0x00040000);
nvkm_wo32(chan->inst, 0x0864, 0x00010000);
for (i = 0x0868; i < 0x0878; i += 4)
nvkm_wo32(image, i, 0x00040004);
nvkm_wo32(chan->inst, i, 0x00040004);
for (i = 0x1f1c; i <= 0x308c ; i += 16) {
nvkm_wo32(image, i + 0, 0x10700ff9);
nvkm_wo32(image, i + 4, 0x0436086c);
nvkm_wo32(image, i + 8, 0x000c001b);
nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
nvkm_wo32(chan->inst, i + 4, 0x0436086c);
nvkm_wo32(chan->inst, i + 8, 0x000c001b);
}
for (i = 0x30bc; i < 0x30cc; i += 4)
nvkm_wo32(image, i, 0x0000ffff);
nvkm_wo32(image, 0x3450, 0x3f800000);
nvkm_wo32(image, 0x380c, 0x3f800000);
nvkm_wo32(image, 0x3820, 0x3f800000);
nvkm_wo32(image, 0x384c, 0x40000000);
nvkm_wo32(image, 0x3850, 0x3f800000);
nvkm_wo32(image, 0x3854, 0x3f000000);
nvkm_wo32(image, 0x385c, 0x40000000);
nvkm_wo32(image, 0x3860, 0x3f800000);
nvkm_wo32(image, 0x3868, 0xbf800000);
nvkm_wo32(image, 0x3870, 0xbf800000);
nvkm_done(image);
nvkm_wo32(chan->inst, i, 0x0000ffff);
nvkm_wo32(chan->inst, 0x3450, 0x3f800000);
nvkm_wo32(chan->inst, 0x380c, 0x3f800000);
nvkm_wo32(chan->inst, 0x3820, 0x3f800000);
nvkm_wo32(chan->inst, 0x384c, 0x40000000);
nvkm_wo32(chan->inst, 0x3850, 0x3f800000);
nvkm_wo32(chan->inst, 0x3854, 0x3f000000);
nvkm_wo32(chan->inst, 0x385c, 0x40000000);
nvkm_wo32(chan->inst, 0x3860, 0x3f800000);
nvkm_wo32(chan->inst, 0x3868, 0xbf800000);
nvkm_wo32(chan->inst, 0x3870, 0xbf800000);
nvkm_done(chan->inst);
return 0;
}
static struct nvkm_oclass
nv35_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x35),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv35_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = nv20_gr_context_init,
.fini = nv20_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
};
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
static const struct nvkm_gr_func
nv35_gr = {
.chan_new = nv35_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv04_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv04_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv04_gr_object }, /* null */
{ -1, -1, 0x0039, &nv04_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv04_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv04_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv04_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv04_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv04_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv04_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv04_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv04_gr_object }, /* imageblit */
{ -1, -1, 0x0362, &nv04_gr_object }, /* surf2d (nv30) */
{ -1, -1, 0x0389, &nv04_gr_object }, /* sifm (nv30) */
{ -1, -1, 0x038a, &nv04_gr_object }, /* ifc (nv30) */
{ -1, -1, 0x039e, &nv04_gr_object }, /* swzsurf (nv30) */
{ -1, -1, 0x0497, &nv04_gr_object }, /* rankine */
{}
}
};
static int
nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -140,6 +136,8 @@ nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
gr->base.func = &nv35_gr;
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 32 * 4, 16, true,
&gr->ctxtab);
if (ret)
@ -147,8 +145,6 @@ nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv20_gr_intr;
nv_engine(gr)->cclass = &nv35_gr_cclass;
nv_engine(gr)->sclass = nv35_gr_sclass;
nv_engine(gr)->tile_prog = nv20_gr_tile_prog;
return 0;
}

View File

@ -29,19 +29,6 @@
#include <subdev/timer.h>
#include <engine/fifo.h>
struct nv40_gr {
struct nvkm_gr base;
u32 size;
struct list_head chan;
};
struct nv40_gr_chan {
struct nvkm_gr_chan base;
struct nvkm_fifo_chan *fifo;
u32 inst;
struct list_head head;
};
static u64
nv40_gr_units(struct nvkm_gr *gr)
{
@ -53,133 +40,61 @@ nv40_gr_units(struct nvkm_gr *gr)
******************************************************************************/
static int
nv40_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *obj;
int ret;
ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
20, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nvkm_kmap(obj);
nvkm_wo32(obj, 0x00, nv_mclass(obj));
nvkm_wo32(obj, 0x04, 0x00000000);
nvkm_wo32(obj, 0x08, 0x00000000);
int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align,
false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, object->oclass_name);
nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
#ifdef __BIG_ENDIAN
nvkm_mo32(obj, 0x08, 0x01000000, 0x01000000);
nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000);
#endif
nvkm_wo32(obj, 0x0c, 0x00000000);
nvkm_wo32(obj, 0x10, 0x00000000);
nvkm_done(obj);
return 0;
nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
nvkm_wo32(*pgpuobj, 0x10, 0x00000000);
nvkm_done(*pgpuobj);
}
return ret;
}
static struct nvkm_ofuncs
nv40_gr_ofuncs = {
.ctor = nv40_gr_object_ctor,
.dtor = _nvkm_gpuobj_dtor,
.init = _nvkm_gpuobj_init,
.fini = _nvkm_gpuobj_fini,
.rd32 = _nvkm_gpuobj_rd32,
.wr32 = _nvkm_gpuobj_wr32,
};
static struct nvkm_oclass
nv40_gr_sclass[] = {
{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
{ 0x4097, &nv40_gr_ofuncs, NULL }, /* curie */
{},
};
static struct nvkm_oclass
nv44_gr_sclass[] = {
{ 0x0012, &nv40_gr_ofuncs, NULL }, /* beta1 */
{ 0x0019, &nv40_gr_ofuncs, NULL }, /* clip */
{ 0x0030, &nv40_gr_ofuncs, NULL }, /* null */
{ 0x0039, &nv40_gr_ofuncs, NULL }, /* m2mf */
{ 0x0043, &nv40_gr_ofuncs, NULL }, /* rop */
{ 0x0044, &nv40_gr_ofuncs, NULL }, /* patt */
{ 0x004a, &nv40_gr_ofuncs, NULL }, /* gdi */
{ 0x0062, &nv40_gr_ofuncs, NULL }, /* surf2d */
{ 0x0072, &nv40_gr_ofuncs, NULL }, /* beta4 */
{ 0x0089, &nv40_gr_ofuncs, NULL }, /* sifm */
{ 0x008a, &nv40_gr_ofuncs, NULL }, /* ifc */
{ 0x009f, &nv40_gr_ofuncs, NULL }, /* imageblit */
{ 0x3062, &nv40_gr_ofuncs, NULL }, /* surf2d (nv40) */
{ 0x3089, &nv40_gr_ofuncs, NULL }, /* sifm (nv40) */
{ 0x309e, &nv40_gr_ofuncs, NULL }, /* swzsurf (nv40) */
{ 0x4497, &nv40_gr_ofuncs, NULL }, /* curie */
{},
static const struct nvkm_object_func
nv40_gr_object = {
.bind = nv40_gr_object_bind,
};
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static void
nv40_gr_context_dtor(struct nvkm_object *object)
static int
nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv40_gr_chan *chan = (void *)object;
unsigned long flags;
spin_lock_irqsave(&object->engine->lock, flags);
list_del(&chan->head);
spin_unlock_irqrestore(&object->engine->lock, flags);
struct nv40_gr_chan *chan = nv40_gr_chan(object);
struct nv40_gr *gr = chan->gr;
int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
align, true, parent, pgpuobj);
if (ret == 0) {
chan->inst = (*pgpuobj)->addr;
nvkm_kmap(*pgpuobj);
nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4);
nvkm_done(*pgpuobj);
}
return ret;
}
static int
nv40_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv40_gr_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nv40_gr *gr = (void *)engine;
struct nv40_gr_chan *chan;
unsigned long flags;
int ret;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
16, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv40_grctx_fill(nv_device(gr), nv_gpuobj(chan));
nvkm_wo32(&chan->base.base.gpuobj, 0x00000, nv_gpuobj(chan)->addr >> 4);
spin_lock_irqsave(&gr->base.engine.lock, flags);
chan->fifo = (void *)parent;
chan->inst = chan->base.base.gpuobj.addr;
list_add(&chan->head, &gr->chan);
spin_unlock_irqrestore(&gr->base.engine.lock, flags);
return 0;
}
static int
nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
{
struct nv40_gr *gr = (void *)object->engine;
struct nv40_gr_chan *chan = (void *)object;
struct nv40_gr_chan *chan = nv40_gr_chan(object);
struct nv40_gr *gr = chan->gr;
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
u32 inst = 0x01000000 | chan->inst >> 4;
int ret = 0;
nvkm_mask(device, 0x400720, 0x00000001, 0x00000000);
@ -210,19 +125,44 @@ nv40_gr_context_fini(struct nvkm_object *object, bool suspend)
return ret;
}
static struct nvkm_oclass
nv40_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x40),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv40_gr_context_ctor,
.dtor = nv40_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = nv40_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
static void *
nv40_gr_chan_dtor(struct nvkm_object *object)
{
struct nv40_gr_chan *chan = nv40_gr_chan(object);
unsigned long flags;
spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
list_del(&chan->head);
spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
return chan;
}
static const struct nvkm_object_func
nv40_gr_chan = {
.dtor = nv40_gr_chan_dtor,
.fini = nv40_gr_chan_fini,
.bind = nv40_gr_chan_bind,
};
static int
nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv40_gr *gr = nv40_gr(base);
struct nv40_gr_chan *chan;
unsigned long flags;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
chan->gr = gr;
*pobject = &chan->object;
spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
list_add(&chan->head, &gr->chan);
spin_unlock_irqrestore(&chan->gr->base.engine.lock, flags);
return 0;
}
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@ -237,7 +177,7 @@ nv40_gr_tile_prog(struct nvkm_engine *engine, int i)
unsigned long flags;
fifo->pause(fifo, &flags);
nv04_gr_idle(gr);
nv04_gr_idle(&gr->base);
switch (nv_device(gr)->chipset) {
case 0x40:
@ -360,6 +300,54 @@ nv40_gr_intr(struct nvkm_subdev *subdev)
spin_unlock_irqrestore(&gr->base.engine.lock, flags);
}
static const struct nvkm_gr_func
nv40_gr = {
.chan_new = nv40_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
{ -1, -1, 0x4097, &nv40_gr_object }, /* curie */
{}
}
};
static const struct nvkm_gr_func
nv44_gr = {
.chan_new = nv40_gr_chan_new,
.sclass = {
{ -1, -1, 0x0012, &nv40_gr_object }, /* beta1 */
{ -1, -1, 0x0019, &nv40_gr_object }, /* clip */
{ -1, -1, 0x0030, &nv40_gr_object }, /* null */
{ -1, -1, 0x0039, &nv40_gr_object }, /* m2mf */
{ -1, -1, 0x0043, &nv40_gr_object }, /* rop */
{ -1, -1, 0x0044, &nv40_gr_object }, /* patt */
{ -1, -1, 0x004a, &nv40_gr_object }, /* gdi */
{ -1, -1, 0x0062, &nv40_gr_object }, /* surf2d */
{ -1, -1, 0x0072, &nv40_gr_object }, /* beta4 */
{ -1, -1, 0x0089, &nv40_gr_object }, /* sifm */
{ -1, -1, 0x008a, &nv40_gr_object }, /* ifc */
{ -1, -1, 0x009f, &nv40_gr_object }, /* imageblit */
{ -1, -1, 0x3062, &nv40_gr_object }, /* surf2d (nv40) */
{ -1, -1, 0x3089, &nv40_gr_object }, /* sifm (nv40) */
{ -1, -1, 0x309e, &nv40_gr_object }, /* swzsurf (nv40) */
{ -1, -1, 0x4497, &nv40_gr_object }, /* curie */
{}
}
};
static int
nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -377,11 +365,10 @@ nv40_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00001000;
nv_subdev(gr)->intr = nv40_gr_intr;
nv_engine(gr)->cclass = &nv40_gr_cclass;
if (nv44_gr_class(gr))
nv_engine(gr)->sclass = nv44_gr_sclass;
gr->base.func = &nv44_gr;
else
nv_engine(gr)->sclass = nv40_gr_sclass;
gr->base.func = &nv40_gr;
nv_engine(gr)->tile_prog = nv40_gr_tile_prog;
gr->base.units = nv40_gr_units;

View File

@ -1,8 +1,23 @@
#ifndef __NV40_GR_H__
#define __NV40_GR_H__
#include <engine/gr.h>
#define nv40_gr(p) container_of((p), struct nv40_gr, base)
#include "priv.h"
struct nvkm_gpuobj;
struct nv40_gr {
struct nvkm_gr base;
u32 size;
struct list_head chan;
};
#define nv40_gr_chan(p) container_of((p), struct nv40_gr_chan, object)
struct nv40_gr_chan {
struct nvkm_object object;
struct nv40_gr *gr;
struct nvkm_fifo_chan *fifo;
u32 inst;
struct list_head head;
};
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features

View File

@ -27,16 +27,6 @@
#include <subdev/timer.h>
#include <engine/fifo.h>
struct nv50_gr {
struct nvkm_gr base;
spinlock_t lock;
u32 size;
};
struct nv50_gr_chan {
struct nvkm_gr_chan base;
};
static u64
nv50_gr_units(struct nvkm_gr *gr)
{
@ -48,126 +38,82 @@ nv50_gr_units(struct nvkm_gr *gr)
******************************************************************************/
static int
nv50_gr_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv50_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *obj;
int ret;
ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent,
16, 16, 0, &obj);
*pobject = nv_object(obj);
if (ret)
return ret;
nvkm_kmap(obj);
nvkm_wo32(obj, 0x00, nv_mclass(obj));
nvkm_wo32(obj, 0x04, 0x00000000);
nvkm_wo32(obj, 0x08, 0x00000000);
nvkm_wo32(obj, 0x0c, 0x00000000);
nvkm_done(obj);
return 0;
int ret = nvkm_gpuobj_new(object->engine->subdev.device, 16,
align, false, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nvkm_wo32(*pgpuobj, 0x00, object->oclass_name);
nvkm_wo32(*pgpuobj, 0x04, 0x00000000);
nvkm_wo32(*pgpuobj, 0x08, 0x00000000);
nvkm_wo32(*pgpuobj, 0x0c, 0x00000000);
nvkm_done(*pgpuobj);
}
return ret;
}
static struct nvkm_ofuncs
nv50_gr_ofuncs = {
.ctor = nv50_gr_object_ctor,
.dtor = _nvkm_gpuobj_dtor,
.init = _nvkm_gpuobj_init,
.fini = _nvkm_gpuobj_fini,
.rd32 = _nvkm_gpuobj_rd32,
.wr32 = _nvkm_gpuobj_wr32,
static const struct nvkm_object_func
nv50_gr_object = {
.bind = nv50_gr_object_bind,
};
static struct nvkm_oclass
nv50_gr_sclass[] = {
{ 0x0030, &nv50_gr_ofuncs },
{ 0x502d, &nv50_gr_ofuncs },
{ 0x5039, &nv50_gr_ofuncs },
{ 0x5097, &nv50_gr_ofuncs },
{ 0x50c0, &nv50_gr_ofuncs },
{}
};
static int
nv50_gr_object_get(struct nvkm_gr *base, int index, struct nvkm_sclass *sclass)
{
struct nv50_gr *gr = nv50_gr(base);
int c = 0;
static struct nvkm_oclass
g84_gr_sclass[] = {
{ 0x0030, &nv50_gr_ofuncs },
{ 0x502d, &nv50_gr_ofuncs },
{ 0x5039, &nv50_gr_ofuncs },
{ 0x50c0, &nv50_gr_ofuncs },
{ 0x8297, &nv50_gr_ofuncs },
{}
};
while (gr->func->sclass[c].oclass) {
if (c++ == index) {
*sclass = gr->func->sclass[index];
return index;
}
}
static struct nvkm_oclass
gt200_gr_sclass[] = {
{ 0x0030, &nv50_gr_ofuncs },
{ 0x502d, &nv50_gr_ofuncs },
{ 0x5039, &nv50_gr_ofuncs },
{ 0x50c0, &nv50_gr_ofuncs },
{ 0x8397, &nv50_gr_ofuncs },
{}
};
static struct nvkm_oclass
gt215_gr_sclass[] = {
{ 0x0030, &nv50_gr_ofuncs },
{ 0x502d, &nv50_gr_ofuncs },
{ 0x5039, &nv50_gr_ofuncs },
{ 0x50c0, &nv50_gr_ofuncs },
{ 0x8597, &nv50_gr_ofuncs },
{ 0x85c0, &nv50_gr_ofuncs },
{}
};
static struct nvkm_oclass
mcp89_gr_sclass[] = {
{ 0x0030, &nv50_gr_ofuncs },
{ 0x502d, &nv50_gr_ofuncs },
{ 0x5039, &nv50_gr_ofuncs },
{ 0x50c0, &nv50_gr_ofuncs },
{ 0x85c0, &nv50_gr_ofuncs },
{ 0x8697, &nv50_gr_ofuncs },
{}
};
return c;
}
/*******************************************************************************
* PGRAPH context
******************************************************************************/
static int
nv50_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
nv50_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent,
int align, struct nvkm_gpuobj **pgpuobj)
{
struct nv50_gr *gr = (void *)engine;
struct nv50_gr_chan *chan;
int ret;
ret = nvkm_gr_context_create(parent, engine, oclass, NULL, gr->size,
0, NVOBJ_FLAG_ZERO_ALLOC, &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
nv50_grctx_fill(nv_device(gr), nv_gpuobj(chan));
return 0;
struct nv50_gr *gr = nv50_gr_chan(object)->gr;
int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size,
align, true, parent, pgpuobj);
if (ret == 0) {
nvkm_kmap(*pgpuobj);
nv50_grctx_fill(gr->base.engine.subdev.device, *pgpuobj);
nvkm_done(*pgpuobj);
}
return ret;
}
static struct nvkm_oclass
nv50_gr_cclass = {
.handle = NV_ENGCTX(GR, 0x50),
.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv50_gr_context_ctor,
.dtor = _nvkm_gr_context_dtor,
.init = _nvkm_gr_context_init,
.fini = _nvkm_gr_context_fini,
.rd32 = _nvkm_gr_context_rd32,
.wr32 = _nvkm_gr_context_wr32,
},
static const struct nvkm_object_func
nv50_gr_chan = {
.bind = nv50_gr_chan_bind,
};
static int
nv50_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
const struct nvkm_oclass *oclass, struct nvkm_object **pobject)
{
struct nv50_gr *gr = nv50_gr(base);
struct nv50_gr_chan *chan;
if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL)))
return -ENOMEM;
nvkm_object_ctor(&nv50_gr_chan, oclass, &chan->object);
chan->gr = gr;
*pobject = &chan->object;
return 0;
}
/*******************************************************************************
* PGRAPH engine/subdev functions
******************************************************************************/
@ -889,6 +835,74 @@ nv50_gr_intr(struct nvkm_subdev *subdev)
nvkm_fifo_chan_put(device->fifo, flags, &chan);
}
static const struct nv50_gr_func
nv50_gr = {
.sclass = {
{ -1, -1, 0x0030, &nv50_gr_object },
{ -1, -1, 0x502d, &nv50_gr_object },
{ -1, -1, 0x5039, &nv50_gr_object },
{ -1, -1, 0x5097, &nv50_gr_object },
{ -1, -1, 0x50c0, &nv50_gr_object },
{}
}
};
static const struct nv50_gr_func
g84_gr = {
.sclass = {
{ -1, -1, 0x0030, &nv50_gr_object },
{ -1, -1, 0x502d, &nv50_gr_object },
{ -1, -1, 0x5039, &nv50_gr_object },
{ -1, -1, 0x50c0, &nv50_gr_object },
{ -1, -1, 0x8297, &nv50_gr_object },
{}
}
};
static const struct nv50_gr_func
gt200_gr = {
.sclass = {
{ -1, -1, 0x0030, &nv50_gr_object },
{ -1, -1, 0x502d, &nv50_gr_object },
{ -1, -1, 0x5039, &nv50_gr_object },
{ -1, -1, 0x50c0, &nv50_gr_object },
{ -1, -1, 0x8397, &nv50_gr_object },
{}
}
};
static const struct nv50_gr_func
gt215_gr = {
.sclass = {
{ -1, -1, 0x0030, &nv50_gr_object },
{ -1, -1, 0x502d, &nv50_gr_object },
{ -1, -1, 0x5039, &nv50_gr_object },
{ -1, -1, 0x50c0, &nv50_gr_object },
{ -1, -1, 0x8597, &nv50_gr_object },
{ -1, -1, 0x85c0, &nv50_gr_object },
{}
}
};
static const struct nv50_gr_func
mcp89_gr = {
.sclass = {
{ -1, -1, 0x0030, &nv50_gr_object },
{ -1, -1, 0x502d, &nv50_gr_object },
{ -1, -1, 0x5039, &nv50_gr_object },
{ -1, -1, 0x50c0, &nv50_gr_object },
{ -1, -1, 0x85c0, &nv50_gr_object },
{ -1, -1, 0x8697, &nv50_gr_object },
{}
}
};
static const struct nvkm_gr_func
nv50_gr_ = {
.chan_new = nv50_gr_chan_new,
.object_get = nv50_gr_object_get,
};
static int
nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
@ -904,13 +918,13 @@ nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_subdev(gr)->unit = 0x00201000;
nv_subdev(gr)->intr = nv50_gr_intr;
nv_engine(gr)->cclass = &nv50_gr_cclass;
gr->base.func = &nv50_gr_;
gr->base.units = nv50_gr_units;
switch (nv_device(gr)->chipset) {
case 0x50:
nv_engine(gr)->sclass = nv50_gr_sclass;
gr->func = &nv50_gr;
break;
case 0x84:
case 0x86:
@ -918,22 +932,21 @@ nv50_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
case 0x94:
case 0x96:
case 0x98:
nv_engine(gr)->sclass = g84_gr_sclass;
gr->func = &g84_gr;
break;
case 0xa0:
case 0xaa:
case 0xac:
nv_engine(gr)->sclass = gt200_gr_sclass;
gr->func = &gt200_gr;
break;
case 0xa3:
case 0xa5:
case 0xa8:
nv_engine(gr)->sclass = gt215_gr_sclass;
gr->func = &gt215_gr;
break;
case 0xaf:
nv_engine(gr)->sclass = mcp89_gr_sclass;
gr->func = &mcp89_gr;
break;
}
/* unfortunate hw bug workaround... */

View File

@ -1,8 +1,26 @@
#ifndef __NV50_GR_H__
#define __NV50_GR_H__
#include <engine/gr.h>
struct nvkm_device;
struct nvkm_gpuobj;
#define nv50_gr(p) container_of((p), struct nv50_gr, base)
#include "priv.h"
struct nv50_gr {
struct nvkm_gr base;
const struct nv50_gr_func *func;
spinlock_t lock;
u32 size;
};
struct nv50_gr_func {
void *(*dtor)(struct nv50_gr *);
struct nvkm_sclass sclass[];
};
#define nv50_gr_chan(p) container_of((p), struct nv50_gr_chan, object)
struct nv50_gr_chan {
struct nvkm_object object;
struct nv50_gr *gr;
};
int nv50_grctx_init(struct nvkm_device *, u32 *size);
void nv50_grctx_fill(struct nvkm_device *, struct nvkm_gpuobj *);

View File

@ -0,0 +1,15 @@
#ifndef __NVKM_GR_PRIV_H__
#define __NVKM_GR_PRIV_H__
#define nvkm_gr(p) container_of((p), struct nvkm_gr, engine)
#include <engine/gr.h>
struct nvkm_fifo_chan;
struct nvkm_gr_func {
int (*chan_new)(struct nvkm_gr *, struct nvkm_fifo_chan *,
const struct nvkm_oclass *, struct nvkm_object **);
int (*object_get)(struct nvkm_gr *, int, struct nvkm_sclass *);
struct nvkm_sclass sclass[];
};
extern const struct nvkm_object_func nv04_gr_object;
#endif