drm/i915: Move intel_engine_context_in/out into intel_lrc.c
Intel_lrc.c is the only caller and so to avoid some header file ordering issues in future patches move these two over there. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191025090952.10135-1-tvrtko.ursulin@linux.intel.com
This commit is contained in:
@ -290,61 +290,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||||||
struct drm_printer *m,
|
struct drm_printer *m,
|
||||||
const char *header, ...);
|
const char *header, ...);
|
||||||
|
|
||||||
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (READ_ONCE(engine->stats.enabled) == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
write_seqlock_irqsave(&engine->stats.lock, flags);
|
|
||||||
|
|
||||||
if (engine->stats.enabled > 0) {
|
|
||||||
if (engine->stats.active++ == 0)
|
|
||||||
engine->stats.start = ktime_get();
|
|
||||||
GEM_BUG_ON(engine->stats.active == 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
if (READ_ONCE(engine->stats.enabled) == 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
write_seqlock_irqsave(&engine->stats.lock, flags);
|
|
||||||
|
|
||||||
if (engine->stats.enabled > 0) {
|
|
||||||
ktime_t last;
|
|
||||||
|
|
||||||
if (engine->stats.active && --engine->stats.active == 0) {
|
|
||||||
/*
|
|
||||||
* Decrement the active context count and in case GPU
|
|
||||||
* is now idle add up to the running total.
|
|
||||||
*/
|
|
||||||
last = ktime_sub(ktime_get(), engine->stats.start);
|
|
||||||
|
|
||||||
engine->stats.total = ktime_add(engine->stats.total,
|
|
||||||
last);
|
|
||||||
} else if (engine->stats.active == 0) {
|
|
||||||
/*
|
|
||||||
* After turning on engine stats, context out might be
|
|
||||||
* the first event in which case we account from the
|
|
||||||
* time stats gathering was turned on.
|
|
||||||
*/
|
|
||||||
last = ktime_sub(ktime_get(), engine->stats.enabled_at);
|
|
||||||
|
|
||||||
engine->stats.total = ktime_add(engine->stats.total,
|
|
||||||
last);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
int intel_enable_engine_stats(struct intel_engine_cs *engine);
|
int intel_enable_engine_stats(struct intel_engine_cs *engine);
|
||||||
void intel_disable_engine_stats(struct intel_engine_cs *engine);
|
void intel_disable_engine_stats(struct intel_engine_cs *engine);
|
||||||
|
|
||||||
|
@ -944,6 +944,61 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
|
|||||||
status, rq);
|
status, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_engine_context_in(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (READ_ONCE(engine->stats.enabled) == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||||
|
|
||||||
|
if (engine->stats.enabled > 0) {
|
||||||
|
if (engine->stats.active++ == 0)
|
||||||
|
engine->stats.start = ktime_get();
|
||||||
|
GEM_BUG_ON(engine->stats.active == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_engine_context_out(struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
if (READ_ONCE(engine->stats.enabled) == 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||||
|
|
||||||
|
if (engine->stats.enabled > 0) {
|
||||||
|
ktime_t last;
|
||||||
|
|
||||||
|
if (engine->stats.active && --engine->stats.active == 0) {
|
||||||
|
/*
|
||||||
|
* Decrement the active context count and in case GPU
|
||||||
|
* is now idle add up to the running total.
|
||||||
|
*/
|
||||||
|
last = ktime_sub(ktime_get(), engine->stats.start);
|
||||||
|
|
||||||
|
engine->stats.total = ktime_add(engine->stats.total,
|
||||||
|
last);
|
||||||
|
} else if (engine->stats.active == 0) {
|
||||||
|
/*
|
||||||
|
* After turning on engine stats, context out might be
|
||||||
|
* the first event in which case we account from the
|
||||||
|
* time stats gathering was turned on.
|
||||||
|
*/
|
||||||
|
last = ktime_sub(ktime_get(), engine->stats.enabled_at);
|
||||||
|
|
||||||
|
engine->stats.total = ktime_add(engine->stats.total,
|
||||||
|
last);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct intel_engine_cs *
|
static inline struct intel_engine_cs *
|
||||||
__execlists_schedule_in(struct i915_request *rq)
|
__execlists_schedule_in(struct i915_request *rq)
|
||||||
{
|
{
|
||||||
|
Reference in New Issue
Block a user