drm/i915: Remove access to global seqno in the HWSP
Stop accessing the HWSP to read the global seqno, and stop tracking the mirror in the engine's execution timeline -- it is unused. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190226094922.31617-2-chris@chris-wilson.co.uk
This commit is contained in:
parent
89531e7d8e
commit
8892f47742
@ -526,8 +526,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
|
||||
ee->vm_info.pp_dir_base);
|
||||
}
|
||||
}
|
||||
err_printf(m, " seqno: 0x%08x\n", ee->seqno);
|
||||
err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
|
||||
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
|
||||
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
|
||||
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
|
||||
@ -1216,8 +1214,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
||||
|
||||
ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
|
||||
ee->acthd = intel_engine_get_active_head(engine);
|
||||
ee->seqno = intel_engine_get_seqno(engine);
|
||||
ee->last_seqno = intel_engine_last_submit(engine);
|
||||
ee->start = I915_READ_START(engine);
|
||||
ee->head = I915_READ_HEAD(engine);
|
||||
ee->tail = I915_READ_TAIL(engine);
|
||||
|
@ -94,8 +94,6 @@ struct i915_gpu_state {
|
||||
u32 cpu_ring_head;
|
||||
u32 cpu_ring_tail;
|
||||
|
||||
u32 last_seqno;
|
||||
|
||||
/* Register state */
|
||||
u32 start;
|
||||
u32 tail;
|
||||
@ -108,7 +106,6 @@ struct i915_gpu_state {
|
||||
u32 bbstate;
|
||||
u32 instpm;
|
||||
u32 instps;
|
||||
u32 seqno;
|
||||
u64 bbaddr;
|
||||
u64 acthd;
|
||||
u32 fault_reg;
|
||||
|
@ -179,12 +179,11 @@ static void free_capture_list(struct i915_request *request)
|
||||
static void __retire_engine_request(struct intel_engine_cs *engine,
|
||||
struct i915_request *rq)
|
||||
{
|
||||
GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
|
||||
GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
|
||||
__func__, engine->name,
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
rq->global_seqno,
|
||||
hwsp_seqno(rq),
|
||||
intel_engine_get_seqno(engine));
|
||||
hwsp_seqno(rq));
|
||||
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
|
||||
@ -243,12 +242,11 @@ static void i915_request_retire(struct i915_request *request)
|
||||
{
|
||||
struct i915_active_request *active, *next;
|
||||
|
||||
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
|
||||
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
|
||||
request->engine->name,
|
||||
request->fence.context, request->fence.seqno,
|
||||
request->global_seqno,
|
||||
hwsp_seqno(request),
|
||||
intel_engine_get_seqno(request->engine));
|
||||
hwsp_seqno(request));
|
||||
|
||||
lockdep_assert_held(&request->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
|
||||
@ -305,12 +303,11 @@ void i915_request_retire_upto(struct i915_request *rq)
|
||||
struct intel_ring *ring = rq->ring;
|
||||
struct i915_request *tmp;
|
||||
|
||||
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
|
||||
GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
|
||||
rq->engine->name,
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
rq->global_seqno,
|
||||
hwsp_seqno(rq),
|
||||
intel_engine_get_seqno(rq->engine));
|
||||
hwsp_seqno(rq));
|
||||
|
||||
lockdep_assert_held(&rq->i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!i915_request_completed(rq));
|
||||
@ -354,12 +351,11 @@ void __i915_request_submit(struct i915_request *request)
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
u32 seqno;
|
||||
|
||||
GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
|
||||
GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
|
||||
engine->name,
|
||||
request->fence.context, request->fence.seqno,
|
||||
engine->timeline.seqno + 1,
|
||||
hwsp_seqno(request),
|
||||
intel_engine_get_seqno(engine));
|
||||
hwsp_seqno(request));
|
||||
|
||||
GEM_BUG_ON(!irqs_disabled());
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
@ -371,7 +367,6 @@ void __i915_request_submit(struct i915_request *request)
|
||||
|
||||
seqno = next_global_seqno(&engine->timeline);
|
||||
GEM_BUG_ON(!seqno);
|
||||
GEM_BUG_ON(intel_engine_signaled(engine, seqno));
|
||||
|
||||
/* We may be recursing from the signal callback of another i915 fence */
|
||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||
@ -409,12 +404,11 @@ void __i915_request_unsubmit(struct i915_request *request)
|
||||
{
|
||||
struct intel_engine_cs *engine = request->engine;
|
||||
|
||||
GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
|
||||
GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
|
||||
engine->name,
|
||||
request->fence.context, request->fence.seqno,
|
||||
request->global_seqno,
|
||||
hwsp_seqno(request),
|
||||
intel_engine_get_seqno(engine));
|
||||
hwsp_seqno(request));
|
||||
|
||||
GEM_BUG_ON(!irqs_disabled());
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
@ -425,7 +419,6 @@ void __i915_request_unsubmit(struct i915_request *request)
|
||||
*/
|
||||
GEM_BUG_ON(!request->global_seqno);
|
||||
GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
|
||||
GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
|
||||
engine->timeline.seqno--;
|
||||
|
||||
/* We may be recursing from the signal callback of another i915 fence */
|
||||
|
@ -788,7 +788,6 @@ static void nop_submit_request(struct i915_request *request)
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
__i915_request_submit(request);
|
||||
i915_request_mark_complete(request);
|
||||
intel_engine_write_global_seqno(engine, request->global_seqno);
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
|
||||
intel_engine_queue_breadcrumbs(engine);
|
||||
|
@ -455,12 +455,6 @@ cleanup:
|
||||
return err;
|
||||
}
|
||||
|
||||
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
|
||||
{
|
||||
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
|
||||
GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
|
||||
}
|
||||
|
||||
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_gem_batch_pool_init(&engine->batch_pool, engine);
|
||||
@ -1011,10 +1005,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
||||
if (i915_reset_failed(engine->i915))
|
||||
return true;
|
||||
|
||||
/* Any inflight/incomplete requests? */
|
||||
if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
|
||||
return false;
|
||||
|
||||
/* Waiting to drain ELSP? */
|
||||
if (READ_ONCE(engine->execlists.active)) {
|
||||
struct tasklet_struct *t = &engine->execlists.tasklet;
|
||||
@ -1497,9 +1487,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
if (i915_reset_failed(engine->i915))
|
||||
drm_printf(m, "*** WEDGED ***\n");
|
||||
|
||||
drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x/%x [%d ms]\n",
|
||||
intel_engine_get_seqno(engine),
|
||||
intel_engine_last_submit(engine),
|
||||
drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
|
||||
engine->hangcheck.last_seqno,
|
||||
engine->hangcheck.next_seqno,
|
||||
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
|
||||
|
@ -528,13 +528,12 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
||||
desc = execlists_update_context(rq);
|
||||
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
|
||||
|
||||
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
|
||||
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
|
||||
engine->name, n,
|
||||
port[n].context_id, count,
|
||||
rq->global_seqno,
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
hwsp_seqno(rq),
|
||||
intel_engine_get_seqno(engine),
|
||||
rq_prio(rq));
|
||||
} else {
|
||||
GEM_BUG_ON(!n);
|
||||
@ -840,13 +839,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
|
||||
while (num_ports-- && port_isset(port)) {
|
||||
struct i915_request *rq = port_request(port);
|
||||
|
||||
GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
|
||||
GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
|
||||
rq->engine->name,
|
||||
(unsigned int)(port - execlists->port),
|
||||
rq->global_seqno,
|
||||
rq->fence.context, rq->fence.seqno,
|
||||
hwsp_seqno(rq),
|
||||
intel_engine_get_seqno(rq->engine));
|
||||
hwsp_seqno(rq));
|
||||
|
||||
GEM_BUG_ON(!execlists->active);
|
||||
execlists_context_schedule_out(rq,
|
||||
@ -902,8 +900,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
struct rb_node *rb;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s current %d\n",
|
||||
engine->name, intel_engine_get_seqno(engine));
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
/*
|
||||
* Before we call engine->cancel_requests(), we should have exclusive
|
||||
@ -952,10 +949,6 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
kmem_cache_free(engine->i915->priorities, p);
|
||||
}
|
||||
|
||||
intel_write_status_page(engine,
|
||||
I915_GEM_HWS_INDEX,
|
||||
intel_engine_last_submit(engine));
|
||||
|
||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
execlists->queue_priority_hint = INT_MIN;
|
||||
@ -1071,14 +1064,13 @@ static void process_csb(struct intel_engine_cs *engine)
|
||||
EXECLISTS_ACTIVE_USER));
|
||||
|
||||
rq = port_unpack(port, &count);
|
||||
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
|
||||
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
|
||||
engine->name,
|
||||
port->context_id, count,
|
||||
rq ? rq->global_seqno : 0,
|
||||
rq ? rq->fence.context : 0,
|
||||
rq ? rq->fence.seqno : 0,
|
||||
rq ? hwsp_seqno(rq) : 0,
|
||||
intel_engine_get_seqno(engine),
|
||||
rq ? rq_prio(rq) : 0);
|
||||
|
||||
/* Check the context/desc id for this event matches */
|
||||
@ -1946,10 +1938,9 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
/* Following the reset, we need to reload the CSB read/write pointers */
|
||||
reset_csb_pointers(&engine->execlists);
|
||||
|
||||
GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
|
||||
GEM_TRACE("%s seqno=%d, stalled? %s\n",
|
||||
engine->name,
|
||||
rq ? rq->global_seqno : 0,
|
||||
intel_engine_get_seqno(engine),
|
||||
yesno(stalled));
|
||||
if (!rq)
|
||||
goto out_unlock;
|
||||
|
@ -782,10 +782,9 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
|
||||
}
|
||||
}
|
||||
|
||||
GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
|
||||
GEM_TRACE("%s seqno=%d, stalled? %s\n",
|
||||
engine->name,
|
||||
rq ? rq->global_seqno : 0,
|
||||
intel_engine_get_seqno(engine),
|
||||
yesno(stalled));
|
||||
/*
|
||||
* The guilty request will get skipped on a hung engine.
|
||||
@ -924,10 +923,6 @@ static void cancel_requests(struct intel_engine_cs *engine)
|
||||
i915_request_mark_complete(request);
|
||||
}
|
||||
|
||||
intel_write_status_page(engine,
|
||||
I915_GEM_HWS_INDEX,
|
||||
intel_engine_last_submit(engine));
|
||||
|
||||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
|
@ -848,8 +848,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
|
||||
return (head - tail - CACHELINE_BYTES) & (size - 1);
|
||||
}
|
||||
|
||||
void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
|
||||
|
||||
int intel_engine_setup_common(struct intel_engine_cs *engine);
|
||||
int intel_engine_init_common(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
|
||||
@ -867,44 +865,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
|
||||
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
|
||||
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
|
||||
|
||||
static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
|
||||
{
|
||||
/*
|
||||
* We are only peeking at the tail of the submit queue (and not the
|
||||
* queue itself) in order to gain a hint as to the current active
|
||||
* state of the engine. Callers are not expected to be taking
|
||||
* engine->timeline->lock, nor are they expected to be concerned
|
||||
* wtih serialising this hint with anything, so document it as
|
||||
* a hint and nothing more.
|
||||
*/
|
||||
return READ_ONCE(engine->timeline.seqno);
|
||||
}
|
||||
|
||||
static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
|
||||
}
|
||||
|
||||
static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
|
||||
u32 seqno)
|
||||
{
|
||||
return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
|
||||
}
|
||||
|
||||
static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
|
||||
u32 seqno)
|
||||
{
|
||||
GEM_BUG_ON(!seqno);
|
||||
return intel_engine_signaled(engine, seqno);
|
||||
}
|
||||
|
||||
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
|
||||
u32 seqno)
|
||||
{
|
||||
GEM_BUG_ON(!seqno);
|
||||
return intel_engine_signaled(engine, seqno - 1);
|
||||
}
|
||||
|
||||
void intel_engine_get_instdone(struct intel_engine_cs *engine,
|
||||
struct intel_instdone *instdone);
|
||||
|
||||
|
@ -226,8 +226,7 @@ static int igt_request_rewind(void *arg)
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
|
||||
pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
|
||||
vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
|
||||
pr_err("timed out waiting for high priority request\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -86,8 +86,6 @@ static struct mock_request *first_request(struct mock_engine *engine)
|
||||
static void advance(struct mock_request *request)
|
||||
{
|
||||
list_del_init(&request->link);
|
||||
intel_engine_write_global_seqno(request->base.engine,
|
||||
request->base.global_seqno);
|
||||
i915_request_mark_complete(&request->base);
|
||||
GEM_BUG_ON(!i915_request_completed(&request->base));
|
||||
|
||||
@ -278,7 +276,6 @@ void mock_engine_flush(struct intel_engine_cs *engine)
|
||||
|
||||
void mock_engine_reset(struct intel_engine_cs *engine)
|
||||
{
|
||||
intel_engine_write_global_seqno(engine, 0);
|
||||
}
|
||||
|
||||
void mock_engine_free(struct intel_engine_cs *engine)
|
||||
|
Loading…
x
Reference in New Issue
Block a user