2010-05-21 09:08:55 +08:00
# ifndef _INTEL_RINGBUFFER_H_
# define _INTEL_RINGBUFFER_H_
2010-12-04 11:30:53 +00:00
enum {
2011-08-16 15:34:10 -04:00
RCS = 0x0 ,
VCS ,
BCS ,
I915_NUM_RINGS ,
2010-12-04 11:30:53 +00:00
} ;
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page {
2010-10-27 12:18:21 +01:00
u32 __iomem * page_addr ;
2010-05-21 09:08:55 +08:00
unsigned int gfx_addr ;
2010-11-08 19:18:58 +00:00
struct drm_i915_gem_object * obj ;
2010-05-21 09:08:55 +08:00
} ;
2011-04-25 11:22:22 -07:00
# define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
# define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
# define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
# define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
# define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
2010-11-09 17:17:32 +08:00
2011-04-25 11:22:22 -07:00
# define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
# define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
2010-08-02 16:29:44 +02:00
2011-04-25 11:22:22 -07:00
# define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
# define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
# define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
2010-12-04 11:30:53 +00:00
2010-05-21 09:08:55 +08:00
struct intel_ring_buffer {
const char * name ;
2010-09-18 11:02:01 +01:00
enum intel_ring_id {
RING_RENDER = 0x1 ,
RING_BSD = 0x2 ,
2010-10-19 11:19:32 +01:00
RING_BLT = 0x4 ,
2010-09-18 11:02:01 +01:00
} id ;
2010-08-02 16:24:01 +02:00
u32 mmio_base ;
2011-01-13 19:06:50 +00:00
void __iomem * virtual_start ;
2010-05-21 09:08:55 +08:00
struct drm_device * dev ;
2010-11-08 19:18:58 +00:00
struct drm_i915_gem_object * obj ;
2010-05-21 09:08:55 +08:00
2010-12-09 12:56:37 +00:00
u32 head ;
u32 tail ;
2010-09-23 17:45:39 +01:00
int space ;
2010-10-27 15:11:53 +01:00
int size ;
2010-12-22 14:04:47 +00:00
int effective_size ;
2010-05-21 09:08:55 +08:00
struct intel_hw_status_page status_page ;
2011-01-05 10:32:24 +00:00
spinlock_t irq_lock ;
2011-01-04 22:22:56 +00:00
u32 irq_refcount ;
2011-01-04 17:35:21 +00:00
u32 irq_mask ;
2010-10-27 15:27:33 +01:00
u32 irq_seqno ; /* last seq seem at irq time */
2011-02-03 11:57:46 +00:00
u32 trace_irq_seqno ;
2010-10-27 15:27:33 +01:00
u32 waiting_seqno ;
2010-12-04 11:30:53 +00:00
u32 sync_seqno [ I915_NUM_RINGS - 1 ] ;
2010-12-13 16:54:50 +00:00
bool __must_check ( * irq_get ) ( struct intel_ring_buffer * ring ) ;
2010-12-04 11:30:53 +00:00
void ( * irq_put ) ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
int ( * init ) ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
void ( * write_tail ) ( struct intel_ring_buffer * ring ,
2010-10-22 17:02:41 +01:00
u32 value ) ;
2011-01-04 17:34:02 +00:00
int __must_check ( * flush ) ( struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains ) ;
2010-10-27 16:11:02 +01:00
int ( * add_request ) ( struct intel_ring_buffer * ring ,
u32 * seqno ) ;
2010-10-27 12:18:21 +01:00
u32 ( * get_seqno ) ( struct intel_ring_buffer * ring ) ;
int ( * dispatch_execbuffer ) ( struct intel_ring_buffer * ring ,
2010-11-30 14:10:25 +00:00
u32 offset , u32 length ) ;
2010-11-02 16:31:01 +08:00
void ( * cleanup ) ( struct intel_ring_buffer * ring ) ;
2011-09-14 20:32:47 -07:00
int ( * sync_to ) ( struct intel_ring_buffer * ring ,
struct intel_ring_buffer * to ,
u32 seqno ) ;
2010-05-21 09:08:55 +08:00
2011-09-14 20:32:47 -07:00
u32 semaphore_register [ 3 ] ; /*our mbox written by others */
u32 signal_mbox [ 2 ] ; /* mboxes this ring signals to */
2010-05-21 09:08:55 +08:00
/**
* List of objects currently involved in rendering from the
* ringbuffer .
*
* Includes buffers having the contents of their GPU caches
* flushed , not necessarily primitives . last_rendering_seqno
* represents when the rendering involved will be completed .
*
* A reference is held on the buffer while on this list .
*/
struct list_head active_list ;
/**
* List of breadcrumbs associated with GPU requests currently
* outstanding .
*/
struct list_head request_list ;
2010-10-24 12:38:05 +01:00
/**
* List of objects currently pending a GPU write flush .
*
* All elements on this list will belong to either the
* active_list or flushing_list , last_rendering_seqno can
* be used to differentiate between the two elements .
*/
struct list_head gpu_write_list ;
2010-09-28 10:07:56 +01:00
/**
* Do we have some not yet emitted requests outstanding ?
*/
2010-11-10 20:40:02 +00:00
u32 outstanding_lazy_request ;
2010-09-28 10:07:56 +01:00
2010-05-21 09:08:55 +08:00
wait_queue_head_t irq_queue ;
drm_local_map_t map ;
2010-11-02 16:31:01 +08:00
void * private ;
2010-05-21 09:08:55 +08:00
} ;
2010-12-04 11:30:53 +00:00
static inline u32
intel_ring_sync_index ( struct intel_ring_buffer * ring ,
struct intel_ring_buffer * other )
{
int idx ;
/*
* cs - > 0 = vcs , 1 = bcs
* vcs - > 0 = bcs , 1 = cs ,
* bcs - > 0 = cs , 1 = vcs .
*/
idx = ( other - ring ) - 1 ;
if ( idx < 0 )
idx + = I915_NUM_RINGS ;
return idx ;
}
2010-05-21 09:08:55 +08:00
static inline u32
intel_read_status_page ( struct intel_ring_buffer * ring ,
2010-10-27 12:18:21 +01:00
int reg )
2010-05-21 09:08:55 +08:00
{
2010-10-27 12:18:21 +01:00
return ioread32 ( ring - > status_page . page_addr + reg ) ;
2010-05-21 09:08:55 +08:00
}
2011-01-13 19:06:50 +00:00
/**
* Reads a dword out of the status page , which is written to from the command
* queue by automatic updates , MI_REPORT_HEAD , MI_STORE_DATA_INDEX , or
* MI_STORE_DATA_IMM .
*
* The following dwords have a reserved meaning :
* 0x00 : ISR copy , updated when an ISR bit not set in the HWSTAM changes .
* 0x04 : ring 0 head pointer
* 0x05 : ring 1 head pointer ( 915 - class )
* 0x06 : ring 2 head pointer ( 915 - class )
* 0x10 - 0x1b : Context status DWords ( GM45 )
* 0x1f : Last written status offset . ( GM45 )
*
* The area from dword 0x20 to 0x3ff is available for driver usage .
*/
# define READ_HWSP(dev_priv, reg) intel_read_status_page(LP_RING(dev_priv), reg)
# define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
# define I915_GEM_HWS_INDEX 0x20
# define I915_BREADCRUMB_INDEX 0x21
2010-10-27 12:18:21 +01:00
void intel_cleanup_ring_buffer ( struct intel_ring_buffer * ring ) ;
2011-03-19 18:14:27 -07:00
2010-10-27 12:45:26 +01:00
int __must_check intel_wait_ring_buffer ( struct intel_ring_buffer * ring , int n ) ;
2011-03-19 18:14:27 -07:00
static inline int intel_wait_ring_idle ( struct intel_ring_buffer * ring )
{
2011-07-12 18:03:29 +01:00
return intel_wait_ring_buffer ( ring , ring - > size - 8 ) ;
2011-03-19 18:14:27 -07:00
}
2010-10-27 12:45:26 +01:00
int __must_check intel_ring_begin ( struct intel_ring_buffer * ring , int n ) ;
2010-10-27 12:18:21 +01:00
static inline void intel_ring_emit ( struct intel_ring_buffer * ring ,
u32 data )
2010-08-04 15:18:14 +01:00
{
2010-10-27 12:18:21 +01:00
iowrite32 ( data , ring - > virtual_start + ring - > tail ) ;
2010-08-04 15:18:14 +01:00
ring - > tail + = 4 ;
}
2010-10-27 12:18:21 +01:00
void intel_ring_advance ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
u32 intel_ring_get_seqno ( struct intel_ring_buffer * ring ) ;
2010-05-21 09:08:55 +08:00
2010-09-16 10:43:11 +08:00
int intel_init_render_ring_buffer ( struct drm_device * dev ) ;
int intel_init_bsd_ring_buffer ( struct drm_device * dev ) ;
2010-10-19 11:19:32 +01:00
int intel_init_blt_ring_buffer ( struct drm_device * dev ) ;
2010-05-21 09:08:55 +08:00
2010-10-27 12:18:21 +01:00
u32 intel_ring_get_active_head ( struct intel_ring_buffer * ring ) ;
void intel_ring_setup_status_page ( struct intel_ring_buffer * ring ) ;
2010-09-24 21:20:10 +02:00
2011-02-03 11:57:46 +00:00
static inline void i915_trace_irq_get ( struct intel_ring_buffer * ring , u32 seqno )
{
if ( ring - > trace_irq_seqno = = 0 & & ring - > irq_get ( ring ) )
ring - > trace_irq_seqno = seqno ;
}
2011-01-20 09:57:11 +00:00
/* DRI warts */
int intel_render_ring_init_dri ( struct drm_device * dev , u64 start , u32 size ) ;
2010-05-21 09:08:55 +08:00
# endif /* _INTEL_RINGBUFFER_H_ */