linux/drivers/gpu/drm/i915/gt/intel_ring.h

141 lines
4.0 KiB
C
Raw Normal View History

/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_RING_H
#define INTEL_RING_H
#include "i915_gem.h" /* GEM_BUG_ON */
#include "i915_request.h"
#include "intel_ring_types.h"
struct intel_engine_cs;
struct intel_ring *
intel_engine_create_ring(struct intel_engine_cs *engine, int size);
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
int intel_ring_cacheline_align(struct i915_request *rq);
unsigned int intel_ring_update_space(struct intel_ring *ring);
int intel_ring_pin(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
void intel_ring_free(struct kref *ref);
static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
{
kref_get(&ring->ref);
return ring;
}
static inline void intel_ring_put(struct intel_ring *ring)
{
kref_put(&ring->ref, intel_ring_free);
}
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
{
/* Dummy function.
*
* This serves as a placeholder in the code so that the reader
* can compare against the preceding intel_ring_begin() and
* check that the number of dwords emitted matches the space
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
}
static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
{
return pos & (ring->size - 1);
}
drm/i915/execlists: Always force a context reload when rewinding RING_TAIL If we rewind the RING_TAIL on a context, due to a preemption event, we must force the context restore for the RING_TAIL update to be properly handled. Rather than note which preemption events may cause us to rewind the tail, compare the new request's tail with the previously submitted RING_TAIL, as it turns out that timeslicing was causing unexpected rewinds. <idle>-0 0d.s2 1280851190us : __execlists_submission_tasklet: 0000:00:02.0 rcs0: expired last=130:4698, prio=3, hint=3 <idle>-0 0d.s2 1280851192us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 66:119966, current 119964 <idle>-0 0d.s2 1280851195us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 130:4698, current 4695 <idle>-0 0d.s2 1280851198us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 130:4696, current 4695 ^---- Note we unwind 2 requests from the same context <idle>-0 0d.s2 1280851208us : __i915_request_submit: 0000:00:02.0 rcs0: fence 130:4696, current 4695 <idle>-0 0d.s2 1280851213us : __i915_request_submit: 0000:00:02.0 rcs0: fence 134:1508, current 1506 ^---- But to apply the new timeslice, we have to replay the first request before the new client can start -- the unexpected RING_TAIL rewind <idle>-0 0d.s2 1280851219us : trace_ports: 0000:00:02.0 rcs0: submit { 130:4696*, 134:1508 } synmark2-5425 2..s. 1280851239us : process_csb: 0000:00:02.0 rcs0: cs-irq head=5, tail=0 synmark2-5425 2..s. 1280851240us : process_csb: 0000:00:02.0 rcs0: csb[0]: status=0x00008002:0x00000000 ^---- Preemption event for the ELSP update; note the lite-restore synmark2-5425 2..s. 1280851243us : trace_ports: 0000:00:02.0 rcs0: preempted { 130:4698, 66:119966 } synmark2-5425 2..s. 1280851246us : trace_ports: 0000:00:02.0 rcs0: promote { 130:4696*, 134:1508 } synmark2-5425 2.... 1280851462us : __i915_request_commit: 0000:00:02.0 rcs0: fence 130:4700, current 4695 synmark2-5425 2.... 1280852111us : __i915_request_commit: 0000:00:02.0 rcs0: fence 130:4702, current 4695 synmark2-5425 2.Ns1 1280852296us : process_csb: 0000:00:02.0 rcs0: cs-irq head=0, tail=2 synmark2-5425 2.Ns1 1280852297us : process_csb: 0000:00:02.0 rcs0: csb[1]: status=0x00000814:0x00000000 synmark2-5425 2.Ns1 1280852299us : trace_ports: 0000:00:02.0 rcs0: completed { 130:4696!, 134:1508 } synmark2-5425 2.Ns1 1280852301us : process_csb: 0000:00:02.0 rcs0: csb[2]: status=0x00000818:0x00000040 synmark2-5425 2.Ns1 1280852302us : trace_ports: 0000:00:02.0 rcs0: completed { 134:1508, 0:0 } synmark2-5425 2.Ns1 1280852313us : process_csb: process_csb:2336 GEM_BUG_ON(!i915_request_completed(*execlists->active) && !reset_in_progress(execlists)) Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing") Referenecs: 82c69bf58650 ("drm/i915/gt: Detect if we miss WaIdleLiteRestore") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: <stable@vger.kernel.org> # v5.4+ Link: https://patchwork.freedesktop.org/patch/msgid/20200207211452.2860634-1-chris@chris-wilson.co.uk
2020-02-07 21:14:52 +00:00
static inline int intel_ring_direction(const struct intel_ring *ring,
u32 next, u32 prev)
{
typecheck(typeof(ring->size), next);
typecheck(typeof(ring->size), prev);
return (next - prev) << ring->wrap;
}
static inline bool
intel_ring_offset_valid(const struct intel_ring *ring,
unsigned int pos)
{
if (pos & -ring->size) /* must be strictly within the ring */
return false;
if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
return false;
return true;
}
static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - rq->ring->vaddr;
GEM_BUG_ON(offset > rq->ring->size);
return intel_ring_wrap(rq->ring, offset);
}
static inline void
assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
{
drm/i915/gt: Mark up racy read of intel_ring.head The intel_ring.head is updated as the requests are retired, but is sampled at any time as we submit requests. Furthermore, it tracks RING_HEAD which is inherently asynchronous. [ 148.630314] BUG: KCSAN: data-race in execlists_dequeue [i915] / i915_request_retire [i915] [ 148.630349] [ 148.630374] write to 0xffff8881f4e28ddc of 4 bytes by task 90 on cpu 2: [ 148.630752] i915_request_retire+0xed/0x770 [i915] [ 148.631123] retire_requests+0x7a/0xd0 [i915] [ 148.631491] engine_retire+0xa6/0xe0 [i915] [ 148.631523] process_one_work+0x3af/0x640 [ 148.631552] worker_thread+0x80/0x670 [ 148.631581] kthread+0x19a/0x1e0 [ 148.631609] ret_from_fork+0x1f/0x30 [ 148.631629] [ 148.631652] read to 0xffff8881f4e28ddc of 4 bytes by task 14288 on cpu 3: [ 148.632019] execlists_dequeue+0x1300/0x1680 [i915] [ 148.632384] __execlists_submission_tasklet+0x48/0x60 [i915] [ 148.632770] execlists_submit_request+0x38e/0x3c0 [i915] [ 148.633146] submit_notify+0x8f/0xc0 [i915] [ 148.633512] __i915_sw_fence_complete+0x5d/0x3e0 [i915] [ 148.633875] i915_sw_fence_complete+0x58/0x80 [i915] [ 148.634238] i915_sw_fence_commit+0x16/0x20 [i915] [ 148.634613] __i915_request_queue+0x60/0x70 [i915] [ 148.634985] i915_gem_do_execbuffer+0x2de0/0x42b0 [i915] [ 148.635366] i915_gem_execbuffer2_ioctl+0x2ab/0x580 [i915] [ 148.635400] drm_ioctl_kernel+0xe9/0x130 [ 148.635429] drm_ioctl+0x27d/0x45e [ 148.635456] ksys_ioctl+0x89/0xb0 [ 148.635482] __x64_sys_ioctl+0x42/0x60 [ 148.635510] do_syscall_64+0x6e/0x2c0 [ 148.635542] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 645.071436] BUG: KCSAN: data-race in gen8_emit_fini_breadcrumb [i915] / i915_request_retire [i915] [ 645.071456] [ 645.071467] write to 0xffff8881efe403dc of 4 bytes by task 14668 on cpu 3: [ 645.071647] i915_request_retire+0xed/0x770 [i915] [ 645.071824] i915_request_create+0x6c/0x160 [i915] [ 645.072000] i915_gem_do_execbuffer+0x206d/0x42b0 [i915] [ 645.072177] i915_gem_execbuffer2_ioctl+0x2ab/0x580 [i915] [ 645.072194] drm_ioctl_kernel+0xe9/0x130 [ 645.072208] drm_ioctl+0x27d/0x45e [ 645.072222] ksys_ioctl+0x89/0xb0 [ 645.072235] __x64_sys_ioctl+0x42/0x60 [ 645.072248] do_syscall_64+0x6e/0x2c0 [ 645.072263] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 645.072275] [ 645.072285] read to 0xffff8881efe403dc of 4 bytes by interrupt on cpu 2: [ 645.072458] gen8_emit_fini_breadcrumb+0x158/0x300 [i915] [ 645.072636] __i915_request_submit+0x204/0x430 [i915] [ 645.072809] execlists_dequeue+0x8e1/0x1680 [i915] [ 645.072982] __execlists_submission_tasklet+0x48/0x60 [i915] [ 645.073154] execlists_submit_request+0x38e/0x3c0 [i915] [ 645.073330] submit_notify+0x8f/0xc0 [i915] [ 645.073499] __i915_sw_fence_complete+0x5d/0x3e0 [i915] [ 645.073668] i915_sw_fence_wake+0xc2/0x130 [i915] [ 645.073836] __i915_sw_fence_complete+0x2cf/0x3e0 [i915] [ 645.074006] i915_sw_fence_complete+0x58/0x80 [i915] [ 645.074175] dma_i915_sw_fence_wake+0x3e/0x80 [i915] [ 645.074344] signal_irq_work+0x62f/0x710 [i915] [ 645.074360] irq_work_run_list+0xd7/0x110 [ 645.074373] irq_work_run+0x1d/0x50 [ 645.074386] smp_irq_work_interrupt+0x21/0x30 [ 645.074400] irq_work_interrupt+0xf/0x20 [ 645.074414] _raw_spin_unlock_irqrestore+0x34/0x40 [ 645.074585] execlists_submission_tasklet+0xde/0x170 [i915] [ 645.074602] tasklet_action_common.isra.0+0x42/0x90 [ 645.074617] __do_softirq+0xc8/0x206 [ 645.074629] irq_exit+0xcd/0xe0 [ 645.074642] do_IRQ+0x44/0xc0 [ 645.074654] ret_from_intr+0x0/0x1c [ 645.074667] finish_task_switch+0x73/0x230 [ 645.074679] __schedule+0x1c5/0x4c0 [ 645.074691] schedule+0x45/0xb0 [ 645.074704] worker_thread+0x194/0x670 [ 645.074716] kthread+0x19a/0x1e0 [ 645.074729] ret_from_fork+0x1f/0x30 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200407221832.15465-1-chris@chris-wilson.co.uk
2020-04-07 23:18:32 +01:00
unsigned int head = READ_ONCE(ring->head);
GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
/*
* "Ring Buffer Use"
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6
* Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
* Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the
* same cacheline, the Head Pointer must not be greater than the Tail
* Pointer."
*
* We use ring->head as the last known location of the actual RING_HEAD,
* it may have advanced but in the worst case it is equally the same
* as ring->head and so we should never program RING_TAIL to advance
* into the same cacheline as ring->head.
*/
#define cacheline(a) round_down(a, CACHELINE_BYTES)
drm/i915/gt: Mark up racy read of intel_ring.head The intel_ring.head is updated as the requests are retired, but is sampled at any time as we submit requests. Furthermore, it tracks RING_HEAD which is inherently asynchronous. [ 148.630314] BUG: KCSAN: data-race in execlists_dequeue [i915] / i915_request_retire [i915] [ 148.630349] [ 148.630374] write to 0xffff8881f4e28ddc of 4 bytes by task 90 on cpu 2: [ 148.630752] i915_request_retire+0xed/0x770 [i915] [ 148.631123] retire_requests+0x7a/0xd0 [i915] [ 148.631491] engine_retire+0xa6/0xe0 [i915] [ 148.631523] process_one_work+0x3af/0x640 [ 148.631552] worker_thread+0x80/0x670 [ 148.631581] kthread+0x19a/0x1e0 [ 148.631609] ret_from_fork+0x1f/0x30 [ 148.631629] [ 148.631652] read to 0xffff8881f4e28ddc of 4 bytes by task 14288 on cpu 3: [ 148.632019] execlists_dequeue+0x1300/0x1680 [i915] [ 148.632384] __execlists_submission_tasklet+0x48/0x60 [i915] [ 148.632770] execlists_submit_request+0x38e/0x3c0 [i915] [ 148.633146] submit_notify+0x8f/0xc0 [i915] [ 148.633512] __i915_sw_fence_complete+0x5d/0x3e0 [i915] [ 148.633875] i915_sw_fence_complete+0x58/0x80 [i915] [ 148.634238] i915_sw_fence_commit+0x16/0x20 [i915] [ 148.634613] __i915_request_queue+0x60/0x70 [i915] [ 148.634985] i915_gem_do_execbuffer+0x2de0/0x42b0 [i915] [ 148.635366] i915_gem_execbuffer2_ioctl+0x2ab/0x580 [i915] [ 148.635400] drm_ioctl_kernel+0xe9/0x130 [ 148.635429] drm_ioctl+0x27d/0x45e [ 148.635456] ksys_ioctl+0x89/0xb0 [ 148.635482] __x64_sys_ioctl+0x42/0x60 [ 148.635510] do_syscall_64+0x6e/0x2c0 [ 148.635542] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 645.071436] BUG: KCSAN: data-race in gen8_emit_fini_breadcrumb [i915] / i915_request_retire [i915] [ 645.071456] [ 645.071467] write to 0xffff8881efe403dc of 4 bytes by task 14668 on cpu 3: [ 645.071647] i915_request_retire+0xed/0x770 [i915] [ 645.071824] i915_request_create+0x6c/0x160 [i915] [ 645.072000] i915_gem_do_execbuffer+0x206d/0x42b0 [i915] [ 645.072177] i915_gem_execbuffer2_ioctl+0x2ab/0x580 [i915] [ 645.072194] drm_ioctl_kernel+0xe9/0x130 [ 645.072208] drm_ioctl+0x27d/0x45e [ 645.072222] ksys_ioctl+0x89/0xb0 [ 645.072235] __x64_sys_ioctl+0x42/0x60 [ 645.072248] do_syscall_64+0x6e/0x2c0 [ 645.072263] entry_SYSCALL_64_after_hwframe+0x44/0xa9 [ 645.072275] [ 645.072285] read to 0xffff8881efe403dc of 4 bytes by interrupt on cpu 2: [ 645.072458] gen8_emit_fini_breadcrumb+0x158/0x300 [i915] [ 645.072636] __i915_request_submit+0x204/0x430 [i915] [ 645.072809] execlists_dequeue+0x8e1/0x1680 [i915] [ 645.072982] __execlists_submission_tasklet+0x48/0x60 [i915] [ 645.073154] execlists_submit_request+0x38e/0x3c0 [i915] [ 645.073330] submit_notify+0x8f/0xc0 [i915] [ 645.073499] __i915_sw_fence_complete+0x5d/0x3e0 [i915] [ 645.073668] i915_sw_fence_wake+0xc2/0x130 [i915] [ 645.073836] __i915_sw_fence_complete+0x2cf/0x3e0 [i915] [ 645.074006] i915_sw_fence_complete+0x58/0x80 [i915] [ 645.074175] dma_i915_sw_fence_wake+0x3e/0x80 [i915] [ 645.074344] signal_irq_work+0x62f/0x710 [i915] [ 645.074360] irq_work_run_list+0xd7/0x110 [ 645.074373] irq_work_run+0x1d/0x50 [ 645.074386] smp_irq_work_interrupt+0x21/0x30 [ 645.074400] irq_work_interrupt+0xf/0x20 [ 645.074414] _raw_spin_unlock_irqrestore+0x34/0x40 [ 645.074585] execlists_submission_tasklet+0xde/0x170 [i915] [ 645.074602] tasklet_action_common.isra.0+0x42/0x90 [ 645.074617] __do_softirq+0xc8/0x206 [ 645.074629] irq_exit+0xcd/0xe0 [ 645.074642] do_IRQ+0x44/0xc0 [ 645.074654] ret_from_intr+0x0/0x1c [ 645.074667] finish_task_switch+0x73/0x230 [ 645.074679] __schedule+0x1c5/0x4c0 [ 645.074691] schedule+0x45/0xb0 [ 645.074704] worker_thread+0x194/0x670 [ 645.074716] kthread+0x19a/0x1e0 [ 645.074729] ret_from_fork+0x1f/0x30 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200407221832.15465-1-chris@chris-wilson.co.uk
2020-04-07 23:18:32 +01:00
GEM_BUG_ON(cacheline(tail) == cacheline(head) && tail < head);
#undef cacheline
}
static inline unsigned int
intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
{
/* Whilst writes to the tail are strictly order, there is no
* serialisation between readers and the writers. The tail may be
* read by i915_request_retire() just as it is being updated
* by execlists, as although the breadcrumb is complete, the context
* switch hasn't been seen.
*/
assert_ring_tail_valid(ring, tail);
ring->tail = tail;
return tail;
}
static inline unsigned int
__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
{
/*
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the
* same cacheline, the Head Pointer must not be greater than the Tail
* Pointer."
*/
GEM_BUG_ON(!is_power_of_2(size));
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
#endif /* INTEL_RING_H */