drm/i915: enumerate scratch fields
We have a bunch of offsets in the scratch buffer. As we're about to add some more, let's group all of the offsets in a common location. Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20190709123351.5645-6-lionel.g.landwerlin@intel.com
This commit is contained in:
parent
a5af1df716
commit
46c5847e3d
@ -24,9 +24,10 @@ void intel_gt_chipset_flush(struct intel_gt *gt);
|
||||
int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
|
||||
void intel_gt_fini_scratch(struct intel_gt *gt);
|
||||
|
||||
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt)
|
||||
static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
|
||||
enum intel_gt_scratch_field field)
|
||||
{
|
||||
return i915_ggtt_offset(gt->scratch);
|
||||
return i915_ggtt_offset(gt->scratch) + field;
|
||||
}
|
||||
|
||||
#endif /* __INTEL_GT_H__ */
|
||||
|
@ -60,4 +60,19 @@ struct intel_gt {
|
||||
u32 pm_ier;
|
||||
};
|
||||
|
||||
enum intel_gt_scratch_field {
|
||||
/* 8 bytes */
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT = 0,
|
||||
|
||||
/* 8 bytes */
|
||||
INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128,
|
||||
|
||||
/* 8 bytes */
|
||||
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128,
|
||||
|
||||
/* 8 bytes */
|
||||
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
|
||||
|
||||
};
|
||||
|
||||
#endif /* __INTEL_GT_TYPES_H__ */
|
||||
|
@ -1782,7 +1782,8 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
|
||||
/* NB no one else is allowed to scribble over scratch + 256! */
|
||||
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
|
||||
*batch++ = intel_gt_scratch_offset(engine->gt) + 256;
|
||||
*batch++ = intel_gt_scratch_offset(engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
|
||||
*batch++ = 0;
|
||||
|
||||
*batch++ = MI_LOAD_REGISTER_IMM(1);
|
||||
@ -1796,12 +1797,19 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
|
||||
|
||||
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
|
||||
*batch++ = intel_gt_scratch_offset(engine->gt) + 256;
|
||||
*batch++ = intel_gt_scratch_offset(engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
|
||||
*batch++ = 0;
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
||||
static u32 slm_offset(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_gt_scratch_offset(engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA);
|
||||
}
|
||||
|
||||
/*
|
||||
* Typically we only have one indirect_ctx and per_ctx batch buffer which are
|
||||
* initialized at the beginning and shared across all contexts but this field
|
||||
@ -1833,8 +1841,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
|
||||
PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
PIPE_CONTROL_QW_WRITE,
|
||||
intel_gt_scratch_offset(engine->gt) +
|
||||
2 * CACHELINE_BYTES);
|
||||
slm_offset(engine));
|
||||
|
||||
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
||||
@ -2528,7 +2535,8 @@ static int gen8_emit_flush_render(struct i915_request *request,
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
u32 scratch_addr =
|
||||
intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES;
|
||||
intel_gt_scratch_offset(engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
|
||||
bool vf_flush_wa = false, dc_flush_wa = false;
|
||||
u32 *cs, flags = 0;
|
||||
int len;
|
||||
|
@ -76,7 +76,8 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
*cs++ = cmd;
|
||||
while (num_store_dw--) {
|
||||
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt);
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
||||
*cs++ = 0;
|
||||
}
|
||||
*cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
|
||||
@ -149,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
*/
|
||||
if (mode & EMIT_INVALIDATE) {
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt) |
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
|
||||
PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
@ -158,7 +160,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
*cs++ = MI_FLUSH;
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt) |
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
|
||||
PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
*cs++ = 0;
|
||||
@ -212,7 +215,8 @@ static int
|
||||
gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
|
||||
intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
|
||||
u32 *cs;
|
||||
|
||||
cs = intel_ring_begin(rq, 6);
|
||||
@ -246,7 +250,8 @@ static int
|
||||
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
|
||||
intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
|
||||
u32 *cs, flags = 0;
|
||||
int ret;
|
||||
|
||||
@ -304,7 +309,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
|
||||
|
||||
*cs++ = GFX_OP_PIPE_CONTROL(4);
|
||||
*cs++ = PIPE_CONTROL_QW_WRITE;
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt) |
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT) |
|
||||
PIPE_CONTROL_GLOBAL_GTT;
|
||||
*cs++ = 0;
|
||||
|
||||
@ -349,7 +355,8 @@ static int
|
||||
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
|
||||
{
|
||||
u32 scratch_addr =
|
||||
intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
|
||||
intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
|
||||
u32 *cs, flags = 0;
|
||||
|
||||
/*
|
||||
@ -1078,7 +1085,9 @@ i830_emit_bb_start(struct i915_request *rq,
|
||||
u64 offset, u32 len,
|
||||
unsigned int dispatch_flags)
|
||||
{
|
||||
u32 *cs, cs_offset = intel_gt_scratch_offset(rq->engine->gt);
|
||||
u32 *cs, cs_offset =
|
||||
intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
||||
|
||||
GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
|
||||
|
||||
@ -1522,7 +1531,8 @@ static int flush_pd_dir(struct i915_request *rq)
|
||||
/* Stall until the page table load is complete */
|
||||
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt);
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
||||
*cs++ = MI_NOOP;
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
@ -1638,7 +1648,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
|
||||
/* Insert a delay before the next switch! */
|
||||
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
|
||||
*cs++ = i915_mmio_reg_offset(last_reg);
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt);
|
||||
*cs++ = intel_gt_scratch_offset(rq->engine->gt,
|
||||
INTEL_GT_SCRATCH_FIELD_DEFAULT);
|
||||
*cs++ = MI_NOOP;
|
||||
}
|
||||
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
|
||||
|
Loading…
x
Reference in New Issue
Block a user