2010-05-21 09:08:55 +08:00
# ifndef _INTEL_RINGBUFFER_H_
# define _INTEL_RINGBUFFER_H_
2014-05-10 14:10:43 -07:00
# include <linux/hashtable.h>
# define I915_CMD_HASH_ORDER 9
2012-12-03 18:43:32 +02:00
/*
* Gen2 BSpec " 1. Programming Environment " / 1.4 .4 .6 " Ring Buffer Use "
* Gen3 BSpec " vol1c Memory Interface Functions " / 2.3 .4 .5 " Ring Buffer Use "
* Gen4 + BSpec " vol1c Memory Interface and Command Stream " / 5.3 .4 .5 " Ring Buffer Use "
*
* " If the Ring Buffer Head Pointer and the Tail Pointer are on the same
* cacheline , the Head Pointer must not be greater than the Tail
* Pointer . "
*/
# define I915_RING_FREE_SPACE 64
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page {
2012-04-26 23:28:16 +02:00
u32 * page_addr ;
2010-05-21 09:08:55 +08:00
unsigned int gfx_addr ;
2010-11-08 19:18:58 +00:00
struct drm_i915_gem_object * obj ;
2010-05-21 09:08:55 +08:00
} ;
2011-04-25 11:22:22 -07:00
# define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
# define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
# define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
# define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
# define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
# define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
2010-08-02 16:29:44 +02:00
2014-03-12 16:39:41 +05:30
# define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
2014-04-02 16:36:07 +01:00
# define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
2014-03-12 16:39:41 +05:30
2014-06-30 09:53:37 -07:00
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes , and that must have qw aligned offsets , simply pretend it ' s 8 b .
*/
# define i915_semaphore_seqno_size sizeof(uint64_t)
# define GEN8_SIGNAL_OFFSET(__ring, to) \
( i915_gem_obj_ggtt_offset ( dev_priv - > semaphore_obj ) + \
( ( __ring ) - > id * I915_NUM_RINGS * i915_semaphore_seqno_size ) + \
( i915_semaphore_seqno_size * ( to ) ) )
# define GEN8_WAIT_OFFSET(__ring, from) \
( i915_gem_obj_ggtt_offset ( dev_priv - > semaphore_obj ) + \
( ( from ) * I915_NUM_RINGS * i915_semaphore_seqno_size ) + \
( i915_semaphore_seqno_size * ( __ring ) - > id ) )
# define GEN8_RING_SEMAPHORE_INIT do { \
if ( ! dev_priv - > semaphore_obj ) { \
break ; \
} \
ring - > semaphore . signal_ggtt [ RCS ] = GEN8_SIGNAL_OFFSET ( ring , RCS ) ; \
ring - > semaphore . signal_ggtt [ VCS ] = GEN8_SIGNAL_OFFSET ( ring , VCS ) ; \
ring - > semaphore . signal_ggtt [ BCS ] = GEN8_SIGNAL_OFFSET ( ring , BCS ) ; \
ring - > semaphore . signal_ggtt [ VECS ] = GEN8_SIGNAL_OFFSET ( ring , VECS ) ; \
ring - > semaphore . signal_ggtt [ VCS2 ] = GEN8_SIGNAL_OFFSET ( ring , VCS2 ) ; \
ring - > semaphore . signal_ggtt [ ring - > id ] = MI_SEMAPHORE_SYNC_INVALID ; \
} while ( 0 )
2013-08-11 12:44:01 +03:00
enum intel_ring_hangcheck_action {
2013-09-06 16:03:28 +03:00
HANGCHECK_IDLE = 0 ,
2013-08-11 12:44:01 +03:00
HANGCHECK_WAIT ,
HANGCHECK_ACTIVE ,
HANGCHECK_KICK ,
HANGCHECK_HUNG ,
} ;
2013-06-12 12:35:32 +03:00
2014-01-30 19:04:43 +02:00
# define HANGCHECK_SCORE_RING_HUNG 31
2013-05-24 17:16:07 +03:00
struct intel_ring_hangcheck {
2014-03-21 12:41:53 +00:00
u64 acthd ;
2013-05-24 17:16:07 +03:00
u32 seqno ;
2013-05-30 09:04:29 +03:00
int score ;
2013-06-12 12:35:32 +03:00
enum intel_ring_hangcheck_action action ;
2014-06-06 10:22:29 +01:00
int deadlock ;
2013-05-24 17:16:07 +03:00
} ;
2014-05-22 14:13:34 +01:00
struct intel_ringbuffer {
struct drm_i915_gem_object * obj ;
void __iomem * virtual_start ;
u32 head ;
u32 tail ;
int space ;
int size ;
int effective_size ;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
* must have finished processing the request and so we know we
* can advance the ringbuffer up to that position .
*
* last_retired_head is set to - 1 after the value is consumed so
* we can detect new retirements .
*/
u32 last_retired_head ;
} ;
2014-05-22 14:13:33 +01:00
struct intel_engine_cs {
2010-05-21 09:08:55 +08:00
const char * name ;
2010-09-18 11:02:01 +01:00
enum intel_ring_id {
2011-12-14 13:57:00 +01:00
RCS = 0x0 ,
VCS ,
BCS ,
2013-05-28 19:22:19 -07:00
VECS ,
2014-04-17 10:37:37 +08:00
VCS2
2010-09-18 11:02:01 +01:00
} id ;
2014-04-17 10:37:37 +08:00
# define I915_NUM_RINGS 5
2014-04-17 10:37:36 +08:00
# define LAST_USER_RING (VECS + 1)
2010-08-02 16:24:01 +02:00
u32 mmio_base ;
2010-05-21 09:08:55 +08:00
struct drm_device * dev ;
2014-05-22 14:13:34 +01:00
struct intel_ringbuffer * buffer ;
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page status_page ;
2013-07-04 23:35:29 +02:00
unsigned irq_refcount ; /* protected by dev_priv->irq_lock */
2012-04-11 22:12:46 +02:00
u32 irq_enable_mask ; /* bitmask to enable ring interrupt */
2011-02-03 11:57:46 +00:00
u32 trace_irq_seqno ;
2014-05-22 14:13:33 +01:00
bool __must_check ( * irq_get ) ( struct intel_engine_cs * ring ) ;
void ( * irq_put ) ( struct intel_engine_cs * ring ) ;
2010-05-21 09:08:55 +08:00
2014-05-22 14:13:33 +01:00
int ( * init ) ( struct intel_engine_cs * ring ) ;
2010-05-21 09:08:55 +08:00
2014-05-22 14:13:33 +01:00
void ( * write_tail ) ( struct intel_engine_cs * ring ,
2010-10-22 17:02:41 +01:00
u32 value ) ;
2014-05-22 14:13:33 +01:00
int __must_check ( * flush ) ( struct intel_engine_cs * ring ,
2011-01-04 17:34:02 +00:00
u32 invalidate_domains ,
u32 flush_domains ) ;
2014-05-22 14:13:33 +01:00
int ( * add_request ) ( struct intel_engine_cs * ring ) ;
2012-08-09 10:58:30 +01:00
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up - to - date seqno .
* However , the up - to - date seqno is not always required and the last
* seen value is good enough . Note that the seqno will always be
* monotonic , even if not coherent .
*/
2014-05-22 14:13:33 +01:00
u32 ( * get_seqno ) ( struct intel_engine_cs * ring ,
2012-08-09 10:58:30 +01:00
bool lazy_coherency ) ;
2014-05-22 14:13:33 +01:00
void ( * set_seqno ) ( struct intel_engine_cs * ring ,
2012-12-19 11:13:05 +02:00
u32 seqno ) ;
2014-05-22 14:13:33 +01:00
int ( * dispatch_execbuffer ) ( struct intel_engine_cs * ring ,
2014-04-28 19:29:25 -07:00
u64 offset , u32 length ,
2012-10-17 12:09:54 +01:00
unsigned flags ) ;
# define I915_DISPATCH_SECURE 0x1
2012-12-17 16:21:27 +01:00
# define I915_DISPATCH_PINNED 0x2
2014-05-22 14:13:33 +01:00
void ( * cleanup ) ( struct intel_engine_cs * ring ) ;
2014-04-29 14:52:28 -07:00
2014-06-30 09:53:37 -07:00
/* GEN8 signal/wait table - never trust comments!
* signal to signal to signal to signal to signal to
* RCS VCS BCS VECS VCS2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* RCS | NOP ( 0x00 ) | VCS ( 0x08 ) | BCS ( 0x10 ) | VECS ( 0x18 ) | VCS2 ( 0x20 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VCS | RCS ( 0x28 ) | NOP ( 0x30 ) | BCS ( 0x38 ) | VECS ( 0x40 ) | VCS2 ( 0x48 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BCS | RCS ( 0x50 ) | VCS ( 0x58 ) | NOP ( 0x60 ) | VECS ( 0x68 ) | VCS2 ( 0x70 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VECS | RCS ( 0x78 ) | VCS ( 0x80 ) | BCS ( 0x88 ) | NOP ( 0x90 ) | VCS2 ( 0x98 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VCS2 | RCS ( 0xa0 ) | VCS ( 0xa8 ) | BCS ( 0xb0 ) | VECS ( 0xb8 ) | NOP ( 0xc0 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* Generalization :
* f ( x , y ) : = ( x - > id * NUM_RINGS * seqno_size ) + ( seqno_size * y - > id )
* ie . transpose of g ( x , y )
*
* sync from sync from sync from sync from sync from
* RCS VCS BCS VECS VCS2
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* RCS | NOP ( 0x00 ) | VCS ( 0x28 ) | BCS ( 0x50 ) | VECS ( 0x78 ) | VCS2 ( 0xa0 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VCS | RCS ( 0x08 ) | NOP ( 0x30 ) | BCS ( 0x58 ) | VECS ( 0x80 ) | VCS2 ( 0xa8 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* BCS | RCS ( 0x10 ) | VCS ( 0x38 ) | NOP ( 0x60 ) | VECS ( 0x88 ) | VCS2 ( 0xb0 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VECS | RCS ( 0x18 ) | VCS ( 0x40 ) | BCS ( 0x68 ) | NOP ( 0x90 ) | VCS2 ( 0xb8 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* VCS2 | RCS ( 0x20 ) | VCS ( 0x48 ) | BCS ( 0x70 ) | VECS ( 0x98 ) | NOP ( 0xc0 ) |
* | - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* Generalization :
* g ( x , y ) : = ( y - > id * NUM_RINGS * seqno_size ) + ( seqno_size * x - > id )
* ie . transpose of f ( x , y )
*/
2014-04-29 14:52:28 -07:00
struct {
u32 sync_seqno [ I915_NUM_RINGS - 1 ] ;
2014-04-29 14:52:29 -07:00
2014-06-30 09:53:37 -07:00
union {
struct {
/* our mbox written by others */
u32 wait [ I915_NUM_RINGS ] ;
/* mboxes this ring signals to */
u32 signal [ I915_NUM_RINGS ] ;
} mbox ;
u64 signal_ggtt [ I915_NUM_RINGS ] ;
} ;
2014-04-29 14:52:29 -07:00
/* AKA wait() */
2014-05-22 14:13:33 +01:00
int ( * sync_to ) ( struct intel_engine_cs * ring ,
struct intel_engine_cs * to ,
2014-04-29 14:52:29 -07:00
u32 seqno ) ;
2014-05-22 14:13:33 +01:00
int ( * signal ) ( struct intel_engine_cs * signaller ,
2014-04-29 14:52:30 -07:00
/* num_dwords needed by caller */
unsigned int num_dwords ) ;
2014-04-29 14:52:28 -07:00
} semaphore ;
2013-05-28 19:22:18 -07:00
2010-05-21 09:08:55 +08:00
/**
* List of objects currently involved in rendering from the
* ringbuffer .
*
* Includes buffers having the contents of their GPU caches
* flushed , not necessarily primitives . last_rendering_seqno
* represents when the rendering involved will be completed .
*
* A reference is held on the buffer while on this list .
*/
struct list_head active_list ;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding .
*/
struct list_head request_list ;
2010-09-28 10:07:56 +01:00
/**
* Do we have some not yet emitted requests outstanding ?
*/
2013-09-04 10:45:52 +01:00
struct drm_i915_gem_request * preallocated_lazy_request ;
2013-09-04 10:45:51 +01:00
u32 outstanding_lazy_seqno ;
2012-06-13 20:45:19 +02:00
bool gpu_caches_dirty ;
2013-06-06 16:53:41 -03:00
bool fbc_dirty ;
2010-09-28 10:07:56 +01:00
2010-05-21 09:08:55 +08:00
wait_queue_head_t irq_queue ;
2010-11-02 16:31:01 +08:00
2014-05-22 14:13:37 +01:00
struct intel_context * default_context ;
struct intel_context * last_context ;
2012-06-04 14:42:43 -07:00
2013-05-24 17:16:07 +03:00
struct intel_ring_hangcheck hangcheck ;
2013-08-26 20:58:11 +01:00
struct {
struct drm_i915_gem_object * obj ;
u32 gtt_offset ;
volatile u32 * cpu_page ;
} scratch ;
2014-02-18 10:15:46 -08:00
2014-05-10 14:10:43 -07:00
bool needs_cmd_parser ;
2014-02-18 10:15:46 -08:00
/*
2014-05-10 14:10:43 -07:00
* Table of commands the command parser needs to know about
2014-02-18 10:15:46 -08:00
* for this ring .
*/
2014-05-10 14:10:43 -07:00
DECLARE_HASHTABLE ( cmd_hash , I915_CMD_HASH_ORDER ) ;
2014-02-18 10:15:46 -08:00
/*
* Table of registers allowed in commands that read / write registers .
*/
const u32 * reg_table ;
int reg_count ;
/*
* Table of registers allowed in commands that read / write registers , but
* only from the DRM master .
*/
const u32 * master_reg_table ;
int master_reg_count ;
/*
* Returns the bitmask for the length field of the specified command .
* Return 0 for an unrecognized / invalid command .
*
* If the command parser finds an entry for a command in the ring ' s
* cmd_tables , it gets the command ' s length based on the table entry .
* If not , it calls this function to determine the per - ring length field
* encoding for the command ( i . e . certain opcode ranges use certain bits
* to encode the command length in the header ) .
*/
u32 ( * get_cmd_length_mask ) ( u32 cmd_header ) ;
2010-05-21 09:08:55 +08:00
} ;
2012-05-11 14:29:30 +01:00
static inline bool
2014-05-22 14:13:33 +01:00
intel_ring_initialized ( struct intel_engine_cs * ring )
2012-05-11 14:29:30 +01:00
{
2014-05-22 14:13:35 +01:00
return ring - > buffer & & ring - > buffer - > obj ;
2012-05-11 14:29:30 +01:00
}
2011-12-14 13:57:00 +01:00
static inline unsigned
2014-05-22 14:13:33 +01:00
intel_ring_flag ( struct intel_engine_cs * ring )
2011-12-14 13:57:00 +01:00
{
return 1 < < ring - > id ;
}
2010-12-04 11:30:53 +00:00
static inline u32
2014-05-22 14:13:33 +01:00
intel_ring_sync_index ( struct intel_engine_cs * ring ,
struct intel_engine_cs * other )
2010-12-04 11:30:53 +00:00
{
int idx ;
/*
2014-06-30 09:51:11 -07:00
* rcs - > 0 = vcs , 1 = bcs , 2 = vecs , 3 = vcs2 ;
* vcs - > 0 = bcs , 1 = vecs , 2 = vcs2 , 3 = rcs ;
* bcs - > 0 = vecs , 1 = vcs2 . 2 = rcs , 3 = vcs ;
* vecs - > 0 = vcs2 , 1 = rcs , 2 = vcs , 3 = bcs ;
* vcs2 - > 0 = rcs , 1 = vcs , 2 = bcs , 3 = vecs ;
2010-12-04 11:30:53 +00:00
*/
idx = ( other - ring ) - 1 ;
if ( idx < 0 )
idx + = I915_NUM_RINGS ;
return idx ;
}
2010-05-21 09:08:55 +08:00
static inline u32
2014-05-22 14:13:33 +01:00
intel_read_status_page ( struct intel_engine_cs * ring ,
2010-10-27 12:18:21 +01:00
int reg )
2010-05-21 09:08:55 +08:00
{
2012-04-26 23:28:16 +02:00
/* Ensure that the compiler doesn't optimize away the load. */
barrier ( ) ;
return ring - > status_page . page_addr [ reg ] ;
2010-05-21 09:08:55 +08:00
}
2012-12-19 11:13:05 +02:00
static inline void
2014-05-22 14:13:33 +01:00
intel_write_status_page ( struct intel_engine_cs * ring ,
2012-12-19 11:13:05 +02:00
int reg , u32 value )
{
ring - > status_page . page_addr [ reg ] = value ;
}
2011-01-13 19:06:50 +00:00
/**
* Reads a dword out of the status page , which is written to from the command
* queue by automatic updates , MI_REPORT_HEAD , MI_STORE_DATA_INDEX , or
* MI_STORE_DATA_IMM .
*
* The following dwords have a reserved meaning :
* 0x00 : ISR copy , updated when an ISR bit not set in the HWSTAM changes .
* 0x04 : ring 0 head pointer
* 0x05 : ring 1 head pointer ( 915 - class )
* 0x06 : ring 2 head pointer ( 915 - class )
* 0x10 - 0x1b : Context status DWords ( GM45 )
* 0x1f : Last written status offset . ( GM45 )
*
* The area from dword 0x20 to 0x3ff is available for driver usage .
*/
# define I915_GEM_HWS_INDEX 0x20
2012-10-26 09:42:42 -07:00
# define I915_GEM_HWS_SCRATCH_INDEX 0x30
# define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
2011-01-13 19:06:50 +00:00
2014-05-22 14:13:33 +01:00
void intel_stop_ring_buffer ( struct intel_engine_cs * ring ) ;
void intel_cleanup_ring_buffer ( struct intel_engine_cs * ring ) ;
2011-03-19 18:14:27 -07:00
2014-05-22 14:13:33 +01:00
int __must_check intel_ring_begin ( struct intel_engine_cs * ring , int n ) ;
int __must_check intel_ring_cacheline_align ( struct intel_engine_cs * ring ) ;
static inline void intel_ring_emit ( struct intel_engine_cs * ring ,
2010-10-27 12:18:21 +01:00
u32 data )
2010-08-04 15:18:14 +01:00
{
2014-05-22 14:13:36 +01:00
struct intel_ringbuffer * ringbuf = ring - > buffer ;
iowrite32 ( data , ringbuf - > virtual_start + ringbuf - > tail ) ;
ringbuf - > tail + = 4 ;
2010-08-04 15:18:14 +01:00
}
2014-05-22 14:13:33 +01:00
static inline void intel_ring_advance ( struct intel_engine_cs * ring )
2013-08-10 22:16:32 +01:00
{
2014-05-22 14:13:36 +01:00
struct intel_ringbuffer * ringbuf = ring - > buffer ;
ringbuf - > tail & = ringbuf - > size - 1 ;
2013-08-10 22:16:32 +01:00
}
2014-05-22 14:13:33 +01:00
void __intel_ring_advance ( struct intel_engine_cs * ring ) ;
2013-08-10 22:16:32 +01:00
2014-05-22 14:13:33 +01:00
int __must_check intel_ring_idle ( struct intel_engine_cs * ring ) ;
void intel_ring_init_seqno ( struct intel_engine_cs * ring , u32 seqno ) ;
int intel_ring_flush_all_caches ( struct intel_engine_cs * ring ) ;
int intel_ring_invalidate_all_caches ( struct intel_engine_cs * ring ) ;
2010-05-21 09:08:55 +08:00
2010-09-16 10:43:11 +08:00
int intel_init_render_ring_buffer ( struct drm_device * dev ) ;
int intel_init_bsd_ring_buffer ( struct drm_device * dev ) ;
2014-04-17 10:37:37 +08:00
int intel_init_bsd2_ring_buffer ( struct drm_device * dev ) ;
2010-10-19 11:19:32 +01:00
int intel_init_blt_ring_buffer ( struct drm_device * dev ) ;
2013-05-28 19:22:23 -07:00
int intel_init_vebox_ring_buffer ( struct drm_device * dev ) ;
2010-05-21 09:08:55 +08:00
2014-05-22 14:13:33 +01:00
u64 intel_ring_get_active_head ( struct intel_engine_cs * ring ) ;
void intel_ring_setup_status_page ( struct intel_engine_cs * ring ) ;
2010-09-24 21:20:10 +02:00
2014-07-03 16:28:04 +01:00
static inline u32 intel_ring_get_tail ( struct intel_ringbuffer * ringbuf )
2012-02-15 11:25:36 +00:00
{
2014-07-03 16:28:04 +01:00
return ringbuf - > tail ;
2012-02-15 11:25:36 +00:00
}
2014-05-22 14:13:33 +01:00
static inline u32 intel_ring_get_seqno ( struct intel_engine_cs * ring )
2012-11-27 16:22:52 +00:00
{
2013-09-04 10:45:51 +01:00
BUG_ON ( ring - > outstanding_lazy_seqno = = 0 ) ;
return ring - > outstanding_lazy_seqno ;
2012-11-27 16:22:52 +00:00
}
2014-05-22 14:13:33 +01:00
static inline void i915_trace_irq_get ( struct intel_engine_cs * ring , u32 seqno )
2011-02-03 11:57:46 +00:00
{
if ( ring - > trace_irq_seqno = = 0 & & ring - > irq_get ( ring ) )
ring - > trace_irq_seqno = seqno ;
}
2011-01-20 09:57:11 +00:00
/* DRI warts */
int intel_render_ring_init_dri ( struct drm_device * dev , u64 start , u32 size ) ;
2010-05-21 09:08:55 +08:00
# endif /* _INTEL_RINGBUFFER_H_ */