2010-05-21 09:08:55 +08:00
# ifndef _INTEL_RINGBUFFER_H_
# define _INTEL_RINGBUFFER_H_
2012-12-03 18:43:32 +02:00
/*
* Gen2 BSpec " 1. Programming Environment " / 1.4 .4 .6 " Ring Buffer Use "
* Gen3 BSpec " vol1c Memory Interface Functions " / 2.3 .4 .5 " Ring Buffer Use "
* Gen4 + BSpec " vol1c Memory Interface and Command Stream " / 5.3 .4 .5 " Ring Buffer Use "
*
* " If the Ring Buffer Head Pointer and the Tail Pointer are on the same
* cacheline , the Head Pointer must not be greater than the Tail
* Pointer . "
*/
# define I915_RING_FREE_SPACE 64
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page {
2012-04-26 23:28:16 +02:00
u32 * page_addr ;
2010-05-21 09:08:55 +08:00
unsigned int gfx_addr ;
2010-11-08 19:18:58 +00:00
struct drm_i915_gem_object * obj ;
2010-05-21 09:08:55 +08:00
} ;
2011-04-25 11:22:22 -07:00
# define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
# define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
# define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
# define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
# define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
# define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
2010-08-02 16:29:44 +02:00
2013-08-11 12:44:01 +03:00
enum intel_ring_hangcheck_action {
2013-09-06 16:03:28 +03:00
HANGCHECK_IDLE = 0 ,
2013-08-11 12:44:01 +03:00
HANGCHECK_WAIT ,
HANGCHECK_ACTIVE ,
HANGCHECK_KICK ,
HANGCHECK_HUNG ,
} ;
2013-06-12 12:35:32 +03:00
2014-01-30 19:04:43 +02:00
# define HANGCHECK_SCORE_RING_HUNG 31
2013-05-24 17:16:07 +03:00
struct intel_ring_hangcheck {
2013-06-10 11:20:21 +01:00
bool deadlock ;
2013-05-24 17:16:07 +03:00
u32 seqno ;
2013-05-30 09:04:29 +03:00
u32 acthd ;
int score ;
2013-06-12 12:35:32 +03:00
enum intel_ring_hangcheck_action action ;
2013-05-24 17:16:07 +03:00
} ;
2010-05-21 09:08:55 +08:00
struct intel_ring_buffer {
const char * name ;
2010-09-18 11:02:01 +01:00
enum intel_ring_id {
2011-12-14 13:57:00 +01:00
RCS = 0x0 ,
VCS ,
BCS ,
2013-05-28 19:22:19 -07:00
VECS ,
2010-09-18 11:02:01 +01:00
} id ;
2013-05-28 19:22:19 -07:00
# define I915_NUM_RINGS 4
2010-08-02 16:24:01 +02:00
u32 mmio_base ;
2011-01-13 19:06:50 +00:00
void __iomem * virtual_start ;
2010-05-21 09:08:55 +08:00
struct drm_device * dev ;
2010-11-08 19:18:58 +00:00
struct drm_i915_gem_object * obj ;
2010-05-21 09:08:55 +08:00
2010-12-09 12:56:37 +00:00
u32 head ;
u32 tail ;
2010-09-23 17:45:39 +01:00
int space ;
2010-10-27 15:11:53 +01:00
int size ;
2010-12-22 14:04:47 +00:00
int effective_size ;
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page status_page ;
2012-02-15 11:25:36 +00:00
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position .
*
* last_retired_head is set to - 1 after the value is consumed so
* we can detect new retirements .
*/
u32 last_retired_head ;
2013-07-04 23:35:29 +02:00
unsigned irq_refcount ; /* protected by dev_priv->irq_lock */
2012-04-11 22:12:46 +02:00
u32 irq_enable_mask ; /* bitmask to enable ring interrupt */
2011-02-03 11:57:46 +00:00
u32 trace_irq_seqno ;
2010-12-04 11:30:53 +00:00
u32 sync_seqno [ I915_NUM_RINGS - 1 ] ;
2010-12-13 16:54:50 +00:00
bool __must_check ( * irq_get ) ( struct intel_ring_buffer * ring ) ;
2010-12-04 11:30:53 +00:00
void ( * irq_put ) ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
int ( * init ) ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
void ( * write_tail ) ( struct intel_ring_buffer * ring ,
2010-10-22 17:02:41 +01:00
u32 value ) ;
2011-01-04 17:34:02 +00:00
int __must_check ( * flush ) ( struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains ) ;
2012-11-27 16:22:52 +00:00
int ( * add_request ) ( struct intel_ring_buffer * ring ) ;
2012-08-09 10:58:30 +01:00
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up - to - date seqno .
* However , the up - to - date seqno is not always required and the last
* seen value is good enough . Note that the seqno will always be
* monotonic , even if not coherent .
*/
u32 ( * get_seqno ) ( struct intel_ring_buffer * ring ,
bool lazy_coherency ) ;
2012-12-19 11:13:05 +02:00
void ( * set_seqno ) ( struct intel_ring_buffer * ring ,
u32 seqno ) ;
2010-10-27 12:18:21 +01:00
int ( * dispatch_execbuffer ) ( struct intel_ring_buffer * ring ,
2012-10-17 12:09:54 +01:00
u32 offset , u32 length ,
unsigned flags ) ;
# define I915_DISPATCH_SECURE 0x1
2012-12-17 16:21:27 +01:00
# define I915_DISPATCH_PINNED 0x2
2010-11-02 16:31:01 +08:00
void ( * cleanup ) ( struct intel_ring_buffer * ring ) ;
2011-09-14 20:32:47 -07:00
int ( * sync_to ) ( struct intel_ring_buffer * ring ,
struct intel_ring_buffer * to ,
u32 seqno ) ;
2013-05-28 19:22:18 -07:00
2013-05-28 19:22:17 -07:00
/* our mbox written by others */
u32 semaphore_register [ I915_NUM_RINGS ] ;
2013-05-28 19:22:18 -07:00
/* mboxes this ring signals to */
u32 signal_mbox [ I915_NUM_RINGS ] ;
2010-05-21 09:08:55 +08:00
/**
* List of objects currently involved in rendering from the
* ringbuffer .
*
* Includes buffers having the contents of their GPU caches
* flushed , not necessarily primitives . last_rendering_seqno
* represents when the rendering involved will be completed .
*
* A reference is held on the buffer while on this list .
*/
struct list_head active_list ;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding .
*/
struct list_head request_list ;
2010-09-28 10:07:56 +01:00
/**
* Do we have some not yet emitted requests outstanding ?
*/
2013-09-04 10:45:52 +01:00
struct drm_i915_gem_request * preallocated_lazy_request ;
2013-09-04 10:45:51 +01:00
u32 outstanding_lazy_seqno ;
2012-06-13 20:45:19 +02:00
bool gpu_caches_dirty ;
2013-06-06 16:53:41 -03:00
bool fbc_dirty ;
2010-09-28 10:07:56 +01:00
2010-05-21 09:08:55 +08:00
wait_queue_head_t irq_queue ;
2010-11-02 16:31:01 +08:00
2012-06-04 14:42:50 -07:00
/**
* Do an explicit TLB flush before MI_SET_CONTEXT
*/
bool itlb_before_ctx_switch ;
2012-06-04 14:42:43 -07:00
struct i915_hw_context * default_context ;
2013-05-02 16:48:07 +03:00
struct i915_hw_context * last_context ;
2012-06-04 14:42:43 -07:00
2013-05-24 17:16:07 +03:00
struct intel_ring_hangcheck hangcheck ;
2013-08-26 20:58:11 +01:00
struct {
struct drm_i915_gem_object * obj ;
u32 gtt_offset ;
volatile u32 * cpu_page ;
} scratch ;
2014-02-18 10:15:46 -08:00
/*
* Tables of commands the command parser needs to know about
* for this ring .
*/
const struct drm_i915_cmd_table * cmd_tables ;
int cmd_table_count ;
/*
* Table of registers allowed in commands that read / write registers .
*/
const u32 * reg_table ;
int reg_count ;
/*
* Table of registers allowed in commands that read / write registers , but
* only from the DRM master .
*/
const u32 * master_reg_table ;
int master_reg_count ;
/*
* Returns the bitmask for the length field of the specified command .
* Return 0 for an unrecognized / invalid command .
*
* If the command parser finds an entry for a command in the ring ' s
* cmd_tables , it gets the command ' s length based on the table entry .
* If not , it calls this function to determine the per - ring length field
* encoding for the command ( i . e . certain opcode ranges use certain bits
* to encode the command length in the header ) .
*/
u32 ( * get_cmd_length_mask ) ( u32 cmd_header ) ;
2010-05-21 09:08:55 +08:00
} ;
2012-05-11 14:29:30 +01:00
static inline bool
intel_ring_initialized ( struct intel_ring_buffer * ring )
{
return ring - > obj ! = NULL ;
}
2011-12-14 13:57:00 +01:00
static inline unsigned
intel_ring_flag ( struct intel_ring_buffer * ring )
{
return 1 < < ring - > id ;
}
2010-12-04 11:30:53 +00:00
static inline u32
intel_ring_sync_index ( struct intel_ring_buffer * ring ,
struct intel_ring_buffer * other )
{
int idx ;
/*
* cs - > 0 = vcs , 1 = bcs
* vcs - > 0 = bcs , 1 = cs ,
* bcs - > 0 = cs , 1 = vcs .
*/
idx = ( other - ring ) - 1 ;
if ( idx < 0 )
idx + = I915_NUM_RINGS ;
return idx ;
}
2010-05-21 09:08:55 +08:00
static inline u32
intel_read_status_page ( struct intel_ring_buffer * ring ,
2010-10-27 12:18:21 +01:00
int reg )
2010-05-21 09:08:55 +08:00
{
2012-04-26 23:28:16 +02:00
/* Ensure that the compiler doesn't optimize away the load. */
barrier ( ) ;
return ring - > status_page . page_addr [ reg ] ;
2010-05-21 09:08:55 +08:00
}
2012-12-19 11:13:05 +02:00
static inline void
intel_write_status_page ( struct intel_ring_buffer * ring ,
int reg , u32 value )
{
ring - > status_page . page_addr [ reg ] = value ;
}
2011-01-13 19:06:50 +00:00
/**
* Reads a dword out of the status page , which is written to from the command
* queue by automatic updates , MI_REPORT_HEAD , MI_STORE_DATA_INDEX , or
* MI_STORE_DATA_IMM .
*
* The following dwords have a reserved meaning :
* 0x00 : ISR copy , updated when an ISR bit not set in the HWSTAM changes .
* 0x04 : ring 0 head pointer
* 0x05 : ring 1 head pointer ( 915 - class )
* 0x06 : ring 2 head pointer ( 915 - class )
* 0x10 - 0x1b : Context status DWords ( GM45 )
* 0x1f : Last written status offset . ( GM45 )
*
* The area from dword 0x20 to 0x3ff is available for driver usage .
*/
# define I915_GEM_HWS_INDEX 0x20
2012-10-26 09:42:42 -07:00
# define I915_GEM_HWS_SCRATCH_INDEX 0x30
# define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
2011-01-13 19:06:50 +00:00
2010-10-27 12:18:21 +01:00
void intel_cleanup_ring_buffer ( struct intel_ring_buffer * ring ) ;
2011-03-19 18:14:27 -07:00
2010-10-27 12:45:26 +01:00
int __must_check intel_ring_begin ( struct intel_ring_buffer * ring , int n ) ;
2014-02-11 19:52:05 +02:00
int __must_check intel_ring_cacheline_align ( struct intel_ring_buffer * ring ) ;
2010-10-27 12:18:21 +01:00
static inline void intel_ring_emit ( struct intel_ring_buffer * ring ,
u32 data )
2010-08-04 15:18:14 +01:00
{
2010-10-27 12:18:21 +01:00
iowrite32 ( data , ring - > virtual_start + ring - > tail ) ;
2010-08-04 15:18:14 +01:00
ring - > tail + = 4 ;
}
2013-08-10 22:16:32 +01:00
static inline void intel_ring_advance ( struct intel_ring_buffer * ring )
{
ring - > tail & = ring - > size - 1 ;
}
void __intel_ring_advance ( struct intel_ring_buffer * ring ) ;
2012-11-27 16:22:54 +00:00
int __must_check intel_ring_idle ( struct intel_ring_buffer * ring ) ;
2012-12-19 11:13:06 +02:00
void intel_ring_init_seqno ( struct intel_ring_buffer * ring , u32 seqno ) ;
2012-07-20 12:41:08 +01:00
int intel_ring_flush_all_caches ( struct intel_ring_buffer * ring ) ;
int intel_ring_invalidate_all_caches ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-09-16 10:43:11 +08:00
int intel_init_render_ring_buffer ( struct drm_device * dev ) ;
int intel_init_bsd_ring_buffer ( struct drm_device * dev ) ;
2010-10-19 11:19:32 +01:00
int intel_init_blt_ring_buffer ( struct drm_device * dev ) ;
2013-05-28 19:22:23 -07:00
int intel_init_vebox_ring_buffer ( struct drm_device * dev ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
u32 intel_ring_get_active_head ( struct intel_ring_buffer * ring ) ;
void intel_ring_setup_status_page ( struct intel_ring_buffer * ring ) ;
2010-09-24 21:20:10 +02:00
2012-02-15 11:25:36 +00:00
static inline u32 intel_ring_get_tail ( struct intel_ring_buffer * ring )
{
return ring - > tail ;
}
2012-11-27 16:22:52 +00:00
static inline u32 intel_ring_get_seqno ( struct intel_ring_buffer * ring )
{
2013-09-04 10:45:51 +01:00
BUG_ON ( ring - > outstanding_lazy_seqno = = 0 ) ;
return ring - > outstanding_lazy_seqno ;
2012-11-27 16:22:52 +00:00
}
2011-02-03 11:57:46 +00:00
static inline void i915_trace_irq_get ( struct intel_ring_buffer * ring , u32 seqno )
{
if ( ring - > trace_irq_seqno = = 0 & & ring - > irq_get ( ring ) )
ring - > trace_irq_seqno = seqno ;
}
2011-01-20 09:57:11 +00:00
/* DRI warts */
int intel_render_ring_init_dri ( struct drm_device * dev , u64 start , u32 size ) ;
2010-05-21 09:08:55 +08:00
# endif /* _INTEL_RINGBUFFER_H_ */