2010-05-22 00:26:39 +04:00
/*
* Copyright © 2008 - 2010 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
* Zou Nan hai < nanhai . zou @ intel . com >
* Xiang Hai hao < haihao . xiang @ intel . com >
*
*/
# include "drmP.h"
# include "drm.h"
# include "i915_drv.h"
2010-05-21 05:08:55 +04:00
# include "i915_drm.h"
2010-05-22 00:26:39 +04:00
# include "i915_trace.h"
2010-09-19 17:40:43 +04:00
# include "intel_drv.h"
2010-05-22 00:26:39 +04:00
2010-08-07 14:01:22 +04:00
static u32 i915_gem_get_seqno ( struct drm_device * dev )
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
u32 seqno ;
seqno = dev_priv - > next_seqno ;
/* reserve 0 for non-seqno */
if ( + + dev_priv - > next_seqno = = 0 )
dev_priv - > next_seqno = 1 ;
return seqno ;
}
2010-05-21 05:08:55 +04:00
static void
render_ring_flush ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains )
2010-05-22 00:26:39 +04:00
{
2010-08-07 14:01:22 +04:00
drm_i915_private_t * dev_priv = dev - > dev_private ;
u32 cmd ;
2010-05-22 00:26:39 +04:00
# if WATCH_EXEC
DRM_INFO ( " %s: invalidate %08x flush %08x \n " , __func__ ,
invalidate_domains , flush_domains ) ;
# endif
2010-08-07 14:01:22 +04:00
trace_i915_gem_request_flush ( dev , dev_priv - > next_seqno ,
2010-05-22 00:26:39 +04:00
invalidate_domains , flush_domains ) ;
if ( ( invalidate_domains | flush_domains ) & I915_GEM_GPU_DOMAINS ) {
/*
* read / write caches :
*
* I915_GEM_DOMAIN_RENDER is always invalidated , but is
* only flushed if MI_NO_WRITE_FLUSH is unset . On 965 , it is
* also flushed at 2 d versus 3 d pipeline switches .
*
* read - only caches :
*
* I915_GEM_DOMAIN_SAMPLER is flushed on pre - 965 if
* MI_READ_FLUSH is set , and is always flushed on 965.
*
* I915_GEM_DOMAIN_COMMAND may not exist ?
*
* I915_GEM_DOMAIN_INSTRUCTION , which exists on 965 , is
* invalidated when MI_EXE_FLUSH is set .
*
* I915_GEM_DOMAIN_VERTEX , which exists on 965 , is
* invalidated with every MI_FLUSH .
*
* TLBs :
*
* On 965 , TLBs associated with I915_GEM_DOMAIN_COMMAND
* and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
* I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
* are flushed at any MI_FLUSH .
*/
cmd = MI_FLUSH | MI_NO_WRITE_FLUSH ;
if ( ( invalidate_domains | flush_domains ) &
I915_GEM_DOMAIN_RENDER )
cmd & = ~ MI_NO_WRITE_FLUSH ;
2010-09-17 03:32:17 +04:00
if ( INTEL_INFO ( dev ) - > gen < 4 ) {
2010-05-22 00:26:39 +04:00
/*
* On the 965 , the sampler cache always gets flushed
* and this bit is reserved .
*/
if ( invalidate_domains & I915_GEM_DOMAIN_SAMPLER )
cmd | = MI_READ_FLUSH ;
}
if ( invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION )
cmd | = MI_EXE_FLUSH ;
# if WATCH_EXEC
DRM_INFO ( " %s: queue flush %08x to ring \n " , __func__ , cmd ) ;
# endif
2010-06-12 13:40:24 +04:00
intel_ring_begin ( dev , ring , 2 ) ;
2010-05-21 05:08:55 +04:00
intel_ring_emit ( dev , ring , cmd ) ;
intel_ring_emit ( dev , ring , MI_NOOP ) ;
intel_ring_advance ( dev , ring ) ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
}
2010-10-22 20:02:41 +04:00
static void ring_write_tail ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 value )
2010-09-16 06:43:12 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
2010-10-22 20:02:41 +04:00
I915_WRITE_TAIL ( ring , value ) ;
2010-09-16 06:43:12 +04:00
}
2010-09-24 23:20:10 +04:00
u32 intel_ring_get_active_head ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
2010-09-24 23:14:22 +04:00
u32 acthd_reg = INTEL_INFO ( dev ) - > gen > = 4 ?
RING_ACTHD ( ring - > mmio_base ) : ACTHD ;
2010-05-21 05:08:55 +04:00
return I915_READ ( acthd_reg ) ;
}
static int init_ring_common ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
u32 head ;
drm_i915_private_t * dev_priv = dev - > dev_private ;
struct drm_i915_gem_object * obj_priv ;
obj_priv = to_intel_bo ( ring - > gem_object ) ;
/* Stop the ring if it's running. */
2010-08-02 19:06:59 +04:00
I915_WRITE_CTL ( ring , 0 ) ;
2010-08-02 19:06:23 +04:00
I915_WRITE_HEAD ( ring , 0 ) ;
2010-10-22 20:02:41 +04:00
ring - > write_tail ( dev , ring , 0 ) ;
2010-05-21 05:08:55 +04:00
/* Initialize the ring. */
2010-08-02 18:33:33 +04:00
I915_WRITE_START ( ring , obj_priv - > gtt_offset ) ;
2010-08-02 19:06:23 +04:00
head = I915_READ_HEAD ( ring ) & HEAD_ADDR ;
2010-05-21 05:08:55 +04:00
/* G45 ring initialization fails to reset head to zero */
if ( head ! = 0 ) {
2010-12-05 23:42:33 +03:00
DRM_DEBUG_KMS ( " %s head not reset to zero "
" ctl %08x head %08x tail %08x start %08x \n " ,
ring - > name ,
I915_READ_CTL ( ring ) ,
I915_READ_HEAD ( ring ) ,
I915_READ_TAIL ( ring ) ,
I915_READ_START ( ring ) ) ;
2010-05-21 05:08:55 +04:00
2010-08-02 19:06:23 +04:00
I915_WRITE_HEAD ( ring , 0 ) ;
2010-05-21 05:08:55 +04:00
2010-12-05 23:42:33 +03:00
if ( I915_READ_HEAD ( ring ) & HEAD_ADDR ) {
DRM_ERROR ( " failed to set %s head to zero "
" ctl %08x head %08x tail %08x start %08x \n " ,
ring - > name ,
I915_READ_CTL ( ring ) ,
I915_READ_HEAD ( ring ) ,
I915_READ_TAIL ( ring ) ,
I915_READ_START ( ring ) ) ;
}
2010-05-21 05:08:55 +04:00
}
2010-08-02 19:06:59 +04:00
I915_WRITE_CTL ( ring ,
2010-05-21 05:08:55 +04:00
( ( ring - > gem_object - > size - PAGE_SIZE ) & RING_NR_PAGES )
2010-11-05 11:56:38 +03:00
| RING_REPORT_64K | RING_VALID ) ;
2010-05-21 05:08:55 +04:00
2010-08-02 19:06:23 +04:00
head = I915_READ_HEAD ( ring ) & HEAD_ADDR ;
2010-05-21 05:08:55 +04:00
/* If the head is still not zero, the ring is dead */
if ( head ! = 0 ) {
DRM_ERROR ( " %s initialization failed "
" ctl %08x head %08x tail %08x start %08x \n " ,
ring - > name ,
2010-08-02 19:06:59 +04:00
I915_READ_CTL ( ring ) ,
2010-08-02 19:06:23 +04:00
I915_READ_HEAD ( ring ) ,
2010-08-02 18:29:44 +04:00
I915_READ_TAIL ( ring ) ,
2010-08-02 18:33:33 +04:00
I915_READ_START ( ring ) ) ;
2010-05-21 05:08:55 +04:00
return - EIO ;
}
if ( ! drm_core_check_feature ( dev , DRIVER_MODESET ) )
i915_kernel_lost_context ( dev ) ;
else {
2010-08-02 19:06:23 +04:00
ring - > head = I915_READ_HEAD ( ring ) & HEAD_ADDR ;
2010-08-02 18:29:44 +04:00
ring - > tail = I915_READ_TAIL ( ring ) & TAIL_ADDR ;
2010-05-21 05:08:55 +04:00
ring - > space = ring - > head - ( ring - > tail + 8 ) ;
if ( ring - > space < 0 )
ring - > space + = ring - > size ;
}
return 0 ;
}
static int init_render_ring ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
int ret = init_ring_common ( dev , ring ) ;
2010-08-30 12:12:42 +04:00
int mode ;
2010-09-17 03:32:17 +04:00
if ( INTEL_INFO ( dev ) - > gen > 3 ) {
2010-08-30 12:12:42 +04:00
mode = VS_TIMER_DISPATCH < < 16 | VS_TIMER_DISPATCH ;
if ( IS_GEN6 ( dev ) )
mode | = MI_FLUSH_ENABLE < < 16 | MI_FLUSH_ENABLE ;
I915_WRITE ( MI_MODE , mode ) ;
2010-05-21 05:08:55 +04:00
}
return ret ;
}
2010-05-22 00:26:39 +04:00
# define PIPE_CONTROL_FLUSH(addr) \
2010-05-21 05:08:55 +04:00
do { \
2010-05-22 00:26:39 +04:00
OUT_RING ( GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
2010-05-27 06:26:42 +04:00
PIPE_CONTROL_DEPTH_STALL | 2 ) ; \
2010-05-22 00:26:39 +04:00
OUT_RING ( addr | PIPE_CONTROL_GLOBAL_GTT ) ; \
OUT_RING ( 0 ) ; \
OUT_RING ( 0 ) ; \
2010-05-21 05:08:55 +04:00
} while ( 0 )
2010-05-22 00:26:39 +04:00
/**
* Creates a new sequence number , emitting a write of it to the status page
* plus an interrupt , which will trigger i915_user_interrupt_handler .
*
* Must be called with struct_lock held .
*
* Returned sequence numbers are nonzero on success .
*/
2010-05-21 05:08:55 +04:00
static u32
render_ring_add_request ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring ,
u32 flush_domains )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
2010-08-07 14:01:22 +04:00
u32 seqno ;
seqno = i915_gem_get_seqno ( dev ) ;
2010-05-27 06:26:42 +04:00
if ( IS_GEN6 ( dev ) ) {
BEGIN_LP_RING ( 6 ) ;
OUT_RING ( GFX_OP_PIPE_CONTROL | 3 ) ;
OUT_RING ( PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
PIPE_CONTROL_NOTIFY ) ;
OUT_RING ( dev_priv - > seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT ) ;
OUT_RING ( seqno ) ;
OUT_RING ( 0 ) ;
OUT_RING ( 0 ) ;
ADVANCE_LP_RING ( ) ;
} else if ( HAS_PIPE_CONTROL ( dev ) ) {
2010-05-22 00:26:39 +04:00
u32 scratch_addr = dev_priv - > seqno_gfx_addr + 128 ;
/*
* Workaround qword write incoherence by flushing the
* PIPE_NOTIFY buffers out to memory before requesting
* an interrupt .
*/
BEGIN_LP_RING ( 32 ) ;
OUT_RING ( GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH ) ;
OUT_RING ( dev_priv - > seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT ) ;
OUT_RING ( seqno ) ;
OUT_RING ( 0 ) ;
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
scratch_addr + = 128 ; /* write to separate cachelines */
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
scratch_addr + = 128 ;
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
scratch_addr + = 128 ;
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
scratch_addr + = 128 ;
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
scratch_addr + = 128 ;
PIPE_CONTROL_FLUSH ( scratch_addr ) ;
OUT_RING ( GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
PIPE_CONTROL_NOTIFY ) ;
OUT_RING ( dev_priv - > seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT ) ;
OUT_RING ( seqno ) ;
OUT_RING ( 0 ) ;
ADVANCE_LP_RING ( ) ;
} else {
BEGIN_LP_RING ( 4 ) ;
OUT_RING ( MI_STORE_DWORD_INDEX ) ;
OUT_RING ( I915_GEM_HWS_INDEX < < MI_STORE_DWORD_INDEX_SHIFT ) ;
OUT_RING ( seqno ) ;
OUT_RING ( MI_USER_INTERRUPT ) ;
ADVANCE_LP_RING ( ) ;
}
return seqno ;
}
2010-05-21 05:08:55 +04:00
static u32
2010-09-24 19:02:42 +04:00
render_ring_get_seqno ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
drm_i915_private_t * dev_priv = ( drm_i915_private_t * ) dev - > dev_private ;
if ( HAS_PIPE_CONTROL ( dev ) )
return ( ( volatile u32 * ) ( dev_priv - > seqno_page ) ) [ 0 ] ;
else
return intel_read_status_page ( ring , I915_GEM_HWS_INDEX ) ;
}
static void
render_ring_get_user_irq ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = ( drm_i915_private_t * ) dev - > dev_private ;
unsigned long irqflags ;
spin_lock_irqsave ( & dev_priv - > user_irq_lock , irqflags ) ;
2010-05-21 05:08:55 +04:00
if ( dev - > irq_enabled & & ( + + ring - > user_irq_refcount = = 1 ) ) {
2010-05-22 00:26:39 +04:00
if ( HAS_PCH_SPLIT ( dev ) )
ironlake_enable_graphics_irq ( dev_priv , GT_PIPE_NOTIFY ) ;
else
i915_enable_irq ( dev_priv , I915_USER_INTERRUPT ) ;
}
spin_unlock_irqrestore ( & dev_priv - > user_irq_lock , irqflags ) ;
}
2010-05-21 05:08:55 +04:00
static void
render_ring_put_user_irq ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = ( drm_i915_private_t * ) dev - > dev_private ;
unsigned long irqflags ;
spin_lock_irqsave ( & dev_priv - > user_irq_lock , irqflags ) ;
2010-05-21 05:08:55 +04:00
BUG_ON ( dev - > irq_enabled & & ring - > user_irq_refcount < = 0 ) ;
if ( dev - > irq_enabled & & ( - - ring - > user_irq_refcount = = 0 ) ) {
2010-05-22 00:26:39 +04:00
if ( HAS_PCH_SPLIT ( dev ) )
ironlake_disable_graphics_irq ( dev_priv , GT_PIPE_NOTIFY ) ;
else
i915_disable_irq ( dev_priv , I915_USER_INTERRUPT ) ;
}
spin_unlock_irqrestore ( & dev_priv - > user_irq_lock , irqflags ) ;
}
2010-09-24 23:49:27 +04:00
void intel_ring_setup_status_page ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
if ( IS_GEN6 ( dev ) ) {
2010-09-24 23:14:22 +04:00
I915_WRITE ( RING_HWS_PGA_GEN6 ( ring - > mmio_base ) ,
ring - > status_page . gfx_addr ) ;
I915_READ ( RING_HWS_PGA_GEN6 ( ring - > mmio_base ) ) ; /* posting read */
2010-05-21 05:08:55 +04:00
} else {
2010-09-24 23:14:22 +04:00
I915_WRITE ( RING_HWS_PGA ( ring - > mmio_base ) ,
ring - > status_page . gfx_addr ) ;
I915_READ ( RING_HWS_PGA ( ring - > mmio_base ) ) ; /* posting read */
2010-05-21 05:08:55 +04:00
}
}
2010-09-19 20:53:44 +04:00
static void
2010-05-21 05:08:57 +04:00
bsd_ring_flush ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains )
{
2010-06-12 13:40:24 +04:00
intel_ring_begin ( dev , ring , 2 ) ;
2010-05-21 05:08:57 +04:00
intel_ring_emit ( dev , ring , MI_FLUSH ) ;
intel_ring_emit ( dev , ring , MI_NOOP ) ;
intel_ring_advance ( dev , ring ) ;
}
static int init_bsd_ring ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:57 +04:00
{
return init_ring_common ( dev , ring ) ;
}
static u32
2010-10-19 14:19:32 +04:00
ring_add_request ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 flush_domains )
2010-05-21 05:08:57 +04:00
{
u32 seqno ;
2010-08-07 14:01:22 +04:00
seqno = i915_gem_get_seqno ( dev ) ;
2010-05-21 05:08:57 +04:00
intel_ring_begin ( dev , ring , 4 ) ;
intel_ring_emit ( dev , ring , MI_STORE_DWORD_INDEX ) ;
intel_ring_emit ( dev , ring ,
I915_GEM_HWS_INDEX < < MI_STORE_DWORD_INDEX_SHIFT ) ;
intel_ring_emit ( dev , ring , seqno ) ;
intel_ring_emit ( dev , ring , MI_USER_INTERRUPT ) ;
intel_ring_advance ( dev , ring ) ;
DRM_DEBUG_DRIVER ( " %s %d \n " , ring - > name , seqno ) ;
return seqno ;
}
static void
bsd_ring_get_user_irq ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:57 +04:00
{
/* do nothing */
}
static void
bsd_ring_put_user_irq ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:57 +04:00
{
/* do nothing */
}
static u32
2010-10-19 14:19:32 +04:00
ring_status_page_get_seqno ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
2010-05-21 05:08:57 +04:00
{
return intel_read_status_page ( ring , I915_GEM_HWS_INDEX ) ;
}
static int
2010-10-19 14:19:32 +04:00
ring_dispatch_gem_execbuffer ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
struct drm_i915_gem_execbuffer2 * exec ,
struct drm_clip_rect * cliprects ,
uint64_t exec_offset )
2010-05-21 05:08:57 +04:00
{
uint32_t exec_start ;
exec_start = ( uint32_t ) exec_offset + exec - > batch_start_offset ;
intel_ring_begin ( dev , ring , 2 ) ;
intel_ring_emit ( dev , ring , MI_BATCH_BUFFER_START |
( 2 < < 6 ) | MI_BATCH_NON_SECURE_I965 ) ;
intel_ring_emit ( dev , ring , exec_start ) ;
intel_ring_advance ( dev , ring ) ;
return 0 ;
}
2010-05-21 05:08:55 +04:00
static int
render_ring_dispatch_gem_execbuffer ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring ,
struct drm_i915_gem_execbuffer2 * exec ,
struct drm_clip_rect * cliprects ,
uint64_t exec_offset )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
int nbox = exec - > num_cliprects ;
int i = 0 , count ;
uint32_t exec_start , exec_len ;
exec_start = ( uint32_t ) exec_offset + exec - > batch_start_offset ;
exec_len = ( uint32_t ) exec - > batch_len ;
2010-08-07 14:01:22 +04:00
trace_i915_gem_request_submit ( dev , dev_priv - > next_seqno + 1 ) ;
2010-05-22 00:26:39 +04:00
count = nbox ? nbox : 1 ;
for ( i = 0 ; i < count ; i + + ) {
if ( i < nbox ) {
int ret = i915_emit_box ( dev , cliprects , i ,
exec - > DR1 , exec - > DR4 ) ;
if ( ret )
return ret ;
}
if ( IS_I830 ( dev ) | | IS_845G ( dev ) ) {
2010-05-21 05:08:55 +04:00
intel_ring_begin ( dev , ring , 4 ) ;
intel_ring_emit ( dev , ring , MI_BATCH_BUFFER ) ;
intel_ring_emit ( dev , ring ,
exec_start | MI_BATCH_NON_SECURE ) ;
intel_ring_emit ( dev , ring , exec_start + exec_len - 4 ) ;
intel_ring_emit ( dev , ring , 0 ) ;
2010-05-22 00:26:39 +04:00
} else {
2010-10-21 21:51:09 +04:00
intel_ring_begin ( dev , ring , 2 ) ;
2010-09-17 03:32:17 +04:00
if ( INTEL_INFO ( dev ) - > gen > = 4 ) {
2010-05-21 05:08:55 +04:00
intel_ring_emit ( dev , ring ,
MI_BATCH_BUFFER_START | ( 2 < < 6 )
| MI_BATCH_NON_SECURE_I965 ) ;
intel_ring_emit ( dev , ring , exec_start ) ;
2010-05-22 00:26:39 +04:00
} else {
2010-05-21 05:08:55 +04:00
intel_ring_emit ( dev , ring , MI_BATCH_BUFFER_START
| ( 2 < < 6 ) ) ;
intel_ring_emit ( dev , ring , exec_start |
MI_BATCH_NON_SECURE ) ;
2010-05-22 00:26:39 +04:00
}
}
2010-05-21 05:08:55 +04:00
intel_ring_advance ( dev , ring ) ;
2010-05-22 00:26:39 +04:00
}
2010-10-21 17:57:17 +04:00
if ( IS_G4X ( dev ) | | IS_GEN5 ( dev ) ) {
2010-06-25 09:40:24 +04:00
intel_ring_begin ( dev , ring , 2 ) ;
intel_ring_emit ( dev , ring , MI_FLUSH |
MI_NO_WRITE_FLUSH |
MI_INVALIDATE_ISP ) ;
intel_ring_emit ( dev , ring , MI_NOOP ) ;
intel_ring_advance ( dev , ring ) ;
}
2010-05-22 00:26:39 +04:00
/* XXX breadcrumb */
2010-06-25 09:40:24 +04:00
2010-05-22 00:26:39 +04:00
return 0 ;
}
2010-05-21 05:08:55 +04:00
static void cleanup_status_page ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
struct drm_gem_object * obj ;
struct drm_i915_gem_object * obj_priv ;
2010-05-21 05:08:55 +04:00
obj = ring - > status_page . obj ;
if ( obj = = NULL )
2010-05-22 00:26:39 +04:00
return ;
obj_priv = to_intel_bo ( obj ) ;
kunmap ( obj_priv - > pages [ 0 ] ) ;
i915_gem_object_unpin ( obj ) ;
drm_gem_object_unreference ( obj ) ;
2010-05-21 05:08:55 +04:00
ring - > status_page . obj = NULL ;
2010-05-22 00:26:39 +04:00
memset ( & dev_priv - > hws_map , 0 , sizeof ( dev_priv - > hws_map ) ) ;
}
2010-05-21 05:08:55 +04:00
static int init_status_page ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
struct drm_gem_object * obj ;
struct drm_i915_gem_object * obj_priv ;
int ret ;
obj = i915_gem_alloc_object ( dev , 4096 ) ;
if ( obj = = NULL ) {
DRM_ERROR ( " Failed to allocate status page \n " ) ;
ret = - ENOMEM ;
goto err ;
}
obj_priv = to_intel_bo ( obj ) ;
obj_priv - > agp_type = AGP_USER_CACHED_MEMORY ;
ret = i915_gem_object_pin ( obj , 4096 ) ;
if ( ret ! = 0 ) {
goto err_unref ;
}
2010-05-21 05:08:55 +04:00
ring - > status_page . gfx_addr = obj_priv - > gtt_offset ;
ring - > status_page . page_addr = kmap ( obj_priv - > pages [ 0 ] ) ;
if ( ring - > status_page . page_addr = = NULL ) {
2010-05-22 00:26:39 +04:00
memset ( & dev_priv - > hws_map , 0 , sizeof ( dev_priv - > hws_map ) ) ;
goto err_unpin ;
}
2010-05-21 05:08:55 +04:00
ring - > status_page . obj = obj ;
memset ( ring - > status_page . page_addr , 0 , PAGE_SIZE ) ;
2010-05-22 00:26:39 +04:00
2010-09-24 23:49:27 +04:00
intel_ring_setup_status_page ( dev , ring ) ;
2010-05-21 05:08:55 +04:00
DRM_DEBUG_DRIVER ( " %s hws offset: 0x%08x \n " ,
ring - > name , ring - > status_page . gfx_addr ) ;
2010-05-22 00:26:39 +04:00
return 0 ;
err_unpin :
i915_gem_object_unpin ( obj ) ;
err_unref :
drm_gem_object_unreference ( obj ) ;
err :
2010-05-21 05:08:55 +04:00
return ret ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
int intel_init_ring_buffer ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
2010-08-02 18:29:44 +04:00
struct drm_i915_private * dev_priv = dev - > dev_private ;
2010-05-21 05:08:55 +04:00
struct drm_i915_gem_object * obj_priv ;
struct drm_gem_object * obj ;
2010-08-07 14:01:34 +04:00
int ret ;
2010-05-21 05:08:55 +04:00
ring - > dev = dev ;
2010-09-29 19:10:57 +04:00
INIT_LIST_HEAD ( & ring - > active_list ) ;
INIT_LIST_HEAD ( & ring - > request_list ) ;
2010-10-24 15:38:05 +04:00
INIT_LIST_HEAD ( & ring - > gpu_write_list ) ;
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
if ( I915_NEED_GFX_HWS ( dev ) ) {
ret = init_status_page ( dev , ring ) ;
if ( ret )
return ret ;
}
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
obj = i915_gem_alloc_object ( dev , ring - > size ) ;
2010-05-22 00:26:39 +04:00
if ( obj = = NULL ) {
DRM_ERROR ( " Failed to allocate ringbuffer \n " ) ;
2010-05-21 05:08:55 +04:00
ret = - ENOMEM ;
2010-08-07 14:01:34 +04:00
goto err_hws ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
ring - > gem_object = obj ;
2010-08-02 19:22:48 +04:00
ret = i915_gem_object_pin ( obj , PAGE_SIZE ) ;
2010-08-07 14:01:34 +04:00
if ( ret )
goto err_unref ;
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
obj_priv = to_intel_bo ( obj ) ;
ring - > map . size = ring - > size ;
2010-05-22 00:26:39 +04:00
ring - > map . offset = dev - > agp - > base + obj_priv - > gtt_offset ;
ring - > map . type = 0 ;
ring - > map . flags = 0 ;
ring - > map . mtrr = 0 ;
drm_core_ioremap_wc ( & ring - > map , dev ) ;
if ( ring - > map . handle = = NULL ) {
DRM_ERROR ( " Failed to map ringbuffer. \n " ) ;
2010-05-21 05:08:55 +04:00
ret = - EINVAL ;
2010-08-07 14:01:34 +04:00
goto err_unpin ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
ring - > virtual_start = ring - > map . handle ;
ret = ring - > init ( dev , ring ) ;
2010-08-07 14:01:34 +04:00
if ( ret )
goto err_unmap ;
2010-05-22 00:26:39 +04:00
if ( ! drm_core_check_feature ( dev , DRIVER_MODESET ) )
i915_kernel_lost_context ( dev ) ;
else {
2010-08-02 19:06:23 +04:00
ring - > head = I915_READ_HEAD ( ring ) & HEAD_ADDR ;
2010-08-02 18:29:44 +04:00
ring - > tail = I915_READ_TAIL ( ring ) & TAIL_ADDR ;
2010-05-22 00:26:39 +04:00
ring - > space = ring - > head - ( ring - > tail + 8 ) ;
if ( ring - > space < 0 )
2010-05-21 05:08:55 +04:00
ring - > space + = ring - > size ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
return ret ;
2010-08-07 14:01:34 +04:00
err_unmap :
drm_core_ioremapfree ( & ring - > map , dev ) ;
err_unpin :
i915_gem_object_unpin ( obj ) ;
err_unref :
drm_gem_object_unreference ( obj ) ;
ring - > gem_object = NULL ;
err_hws :
2010-05-21 05:08:55 +04:00
cleanup_status_page ( dev , ring ) ;
return ret ;
2010-05-22 00:26:39 +04:00
}
2010-05-21 05:08:55 +04:00
void intel_cleanup_ring_buffer ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
2010-05-21 05:08:55 +04:00
if ( ring - > gem_object = = NULL )
2010-05-22 00:26:39 +04:00
return ;
2010-05-21 05:08:55 +04:00
drm_core_ioremapfree ( & ring - > map , dev ) ;
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
i915_gem_object_unpin ( ring - > gem_object ) ;
drm_gem_object_unreference ( ring - > gem_object ) ;
ring - > gem_object = NULL ;
2010-11-02 13:38:58 +03:00
if ( ring - > cleanup )
ring - > cleanup ( ring ) ;
2010-05-21 05:08:55 +04:00
cleanup_status_page ( dev , ring ) ;
2010-05-22 00:26:39 +04:00
}
2010-09-19 20:53:44 +04:00
static int intel_wrap_ring_buffer ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
2010-05-22 00:26:39 +04:00
{
2010-05-21 05:08:55 +04:00
unsigned int * virt ;
2010-05-22 00:26:39 +04:00
int rem ;
2010-05-21 05:08:55 +04:00
rem = ring - > size - ring - > tail ;
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
if ( ring - > space < rem ) {
int ret = intel_wait_ring_buffer ( dev , ring , rem ) ;
2010-05-22 00:26:39 +04:00
if ( ret )
return ret ;
}
2010-05-21 05:08:55 +04:00
virt = ( unsigned int * ) ( ring - > virtual_start + ring - > tail ) ;
2010-08-04 18:18:12 +04:00
rem / = 8 ;
while ( rem - - ) {
2010-05-22 00:26:39 +04:00
* virt + + = MI_NOOP ;
2010-08-04 18:18:12 +04:00
* virt + + = MI_NOOP ;
}
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
ring - > tail = 0 ;
2010-07-01 20:53:00 +04:00
ring - > space = ring - > head - 8 ;
2010-05-22 00:26:39 +04:00
return 0 ;
}
2010-05-21 05:08:55 +04:00
int intel_wait_ring_buffer ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring , int n )
2010-05-22 00:26:39 +04:00
{
2010-05-21 05:08:55 +04:00
unsigned long end ;
2010-08-02 19:06:23 +04:00
drm_i915_private_t * dev_priv = dev - > dev_private ;
2010-11-05 11:56:38 +03:00
u32 head ;
head = intel_read_status_page ( ring , 4 ) ;
if ( head ) {
ring - > head = head & HEAD_ADDR ;
ring - > space = ring - > head - ( ring - > tail + 8 ) ;
if ( ring - > space < 0 )
ring - > space + = ring - > size ;
if ( ring - > space > = n )
return 0 ;
}
2010-05-22 00:26:39 +04:00
trace_i915_ring_wait_begin ( dev ) ;
2010-05-21 05:08:55 +04:00
end = jiffies + 3 * HZ ;
do {
2010-08-02 19:06:23 +04:00
ring - > head = I915_READ_HEAD ( ring ) & HEAD_ADDR ;
2010-05-22 00:26:39 +04:00
ring - > space = ring - > head - ( ring - > tail + 8 ) ;
if ( ring - > space < 0 )
2010-05-21 05:08:55 +04:00
ring - > space + = ring - > size ;
2010-05-22 00:26:39 +04:00
if ( ring - > space > = n ) {
trace_i915_ring_wait_end ( dev ) ;
return 0 ;
}
if ( dev - > primary - > master ) {
struct drm_i915_master_private * master_priv = dev - > primary - > master - > driver_priv ;
if ( master_priv - > sarea_priv )
master_priv - > sarea_priv - > perf_boxes | = I915_BOX_WAIT ;
}
2010-05-21 05:08:57 +04:00
2010-10-13 13:09:14 +04:00
msleep ( 1 ) ;
2010-05-21 05:08:55 +04:00
} while ( ! time_after ( jiffies , end ) ) ;
trace_i915_ring_wait_end ( dev ) ;
return - EBUSY ;
}
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
void intel_ring_begin ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring ,
int num_dwords )
2010-05-21 05:08:55 +04:00
{
2010-06-12 13:40:24 +04:00
int n = 4 * num_dwords ;
2010-05-21 05:08:55 +04:00
if ( unlikely ( ring - > tail + n > ring - > size ) )
intel_wrap_ring_buffer ( dev , ring ) ;
if ( unlikely ( ring - > space < n ) )
intel_wait_ring_buffer ( dev , ring , n ) ;
2010-08-04 18:18:13 +04:00
ring - > space - = n ;
2010-05-21 05:08:55 +04:00
}
2010-05-22 00:26:39 +04:00
2010-05-21 05:08:55 +04:00
void intel_ring_advance ( struct drm_device * dev ,
2010-09-19 20:53:44 +04:00
struct intel_ring_buffer * ring )
2010-05-21 05:08:55 +04:00
{
2010-08-04 18:18:13 +04:00
ring - > tail & = ring - > size - 1 ;
2010-10-22 20:02:41 +04:00
ring - > write_tail ( dev , ring , ring - > tail ) ;
2010-05-21 05:08:55 +04:00
}
2010-05-22 00:26:39 +04:00
2010-09-19 17:46:27 +04:00
static const struct intel_ring_buffer render_ring = {
2010-05-21 05:08:55 +04:00
. name = " render ring " ,
2010-09-18 14:02:01 +04:00
. id = RING_RENDER ,
2010-08-02 18:24:01 +04:00
. mmio_base = RENDER_RING_BASE ,
2010-05-21 05:08:55 +04:00
. size = 32 * PAGE_SIZE ,
. init = init_render_ring ,
2010-10-22 20:02:41 +04:00
. write_tail = ring_write_tail ,
2010-05-21 05:08:55 +04:00
. flush = render_ring_flush ,
. add_request = render_ring_add_request ,
2010-09-24 19:02:42 +04:00
. get_seqno = render_ring_get_seqno ,
2010-05-21 05:08:55 +04:00
. user_irq_get = render_ring_get_user_irq ,
. user_irq_put = render_ring_put_user_irq ,
. dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer ,
} ;
2010-05-21 05:08:57 +04:00
/* ring buffer for bit-stream decoder */
2010-09-19 17:46:27 +04:00
static const struct intel_ring_buffer bsd_ring = {
2010-05-21 05:08:57 +04:00
. name = " bsd ring " ,
2010-09-18 14:02:01 +04:00
. id = RING_BSD ,
2010-08-02 18:24:01 +04:00
. mmio_base = BSD_RING_BASE ,
2010-05-21 05:08:57 +04:00
. size = 32 * PAGE_SIZE ,
. init = init_bsd_ring ,
2010-10-22 20:02:41 +04:00
. write_tail = ring_write_tail ,
2010-05-21 05:08:57 +04:00
. flush = bsd_ring_flush ,
2010-10-19 14:19:32 +04:00
. add_request = ring_add_request ,
. get_seqno = ring_status_page_get_seqno ,
2010-05-21 05:08:57 +04:00
. user_irq_get = bsd_ring_get_user_irq ,
. user_irq_put = bsd_ring_put_user_irq ,
2010-10-19 14:19:32 +04:00
. dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer ,
2010-05-21 05:08:57 +04:00
} ;
2010-09-16 06:43:11 +04:00
2010-09-19 17:40:43 +04:00
2010-10-22 20:02:41 +04:00
static void gen6_bsd_ring_write_tail ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 value )
2010-09-19 17:40:43 +04:00
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
/* Every tail move must follow the sequence below */
I915_WRITE ( GEN6_BSD_SLEEP_PSMI_CONTROL ,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE ) ;
I915_WRITE ( GEN6_BSD_RNCID , 0x0 ) ;
if ( wait_for ( ( I915_READ ( GEN6_BSD_SLEEP_PSMI_CONTROL ) &
GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR ) = = 0 ,
50 ) )
DRM_ERROR ( " timed out waiting for IDLE Indicator \n " ) ;
2010-08-02 18:29:44 +04:00
I915_WRITE_TAIL ( ring , value ) ;
2010-09-19 17:40:43 +04:00
I915_WRITE ( GEN6_BSD_SLEEP_PSMI_CONTROL ,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE ) ;
}
2010-10-19 14:19:32 +04:00
static void gen6_ring_flush ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains )
2010-09-19 17:40:43 +04:00
{
intel_ring_begin ( dev , ring , 4 ) ;
intel_ring_emit ( dev , ring , MI_FLUSH_DW ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_advance ( dev , ring ) ;
}
static int
2010-10-19 14:19:32 +04:00
gen6_ring_dispatch_gem_execbuffer ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
struct drm_i915_gem_execbuffer2 * exec ,
struct drm_clip_rect * cliprects ,
uint64_t exec_offset )
2010-09-19 17:40:43 +04:00
{
uint32_t exec_start ;
2010-09-19 20:53:44 +04:00
2010-09-19 17:40:43 +04:00
exec_start = ( uint32_t ) exec_offset + exec - > batch_start_offset ;
2010-09-19 20:53:44 +04:00
2010-09-19 17:40:43 +04:00
intel_ring_begin ( dev , ring , 2 ) ;
2010-09-19 20:53:44 +04:00
intel_ring_emit ( dev , ring ,
MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 ) ;
/* bit0-7 is the length on GEN6+ */
2010-09-19 17:40:43 +04:00
intel_ring_emit ( dev , ring , exec_start ) ;
intel_ring_advance ( dev , ring ) ;
2010-09-19 20:53:44 +04:00
2010-09-19 17:40:43 +04:00
return 0 ;
}
/* ring buffer for Video Codec for Gen6+ */
2010-09-19 17:46:27 +04:00
static const struct intel_ring_buffer gen6_bsd_ring = {
2010-09-19 17:40:43 +04:00
. name = " gen6 bsd ring " ,
. id = RING_BSD ,
2010-08-02 18:24:01 +04:00
. mmio_base = GEN6_BSD_RING_BASE ,
2010-09-19 17:40:43 +04:00
. size = 32 * PAGE_SIZE ,
. init = init_bsd_ring ,
2010-10-22 20:02:41 +04:00
. write_tail = gen6_bsd_ring_write_tail ,
2010-10-19 14:19:32 +04:00
. flush = gen6_ring_flush ,
. add_request = ring_add_request ,
. get_seqno = ring_status_page_get_seqno ,
2010-09-19 17:40:43 +04:00
. user_irq_get = bsd_ring_get_user_irq ,
. user_irq_put = bsd_ring_put_user_irq ,
2010-10-19 14:19:32 +04:00
. dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer ,
} ;
/* Blitter support (SandyBridge+) */
static void
blt_ring_get_user_irq ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
{
/* do nothing */
}
static void
blt_ring_put_user_irq ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
{
/* do nothing */
}
2010-11-02 13:38:58 +03:00
/* Workaround for some stepping of SNB,
* each time when BLT engine ring tail moved ,
* the first command in the ring to be parsed
* should be MI_BATCH_BUFFER_START
*/
# define NEED_BLT_WORKAROUND(dev) \
( IS_GEN6 ( dev ) & & ( dev - > pdev - > revision < 8 ) )
static inline struct drm_i915_gem_object *
to_blt_workaround ( struct intel_ring_buffer * ring )
{
return ring - > private ;
}
static int blt_ring_init ( struct drm_device * dev ,
struct intel_ring_buffer * ring )
{
if ( NEED_BLT_WORKAROUND ( dev ) ) {
struct drm_i915_gem_object * obj ;
u32 __iomem * ptr ;
int ret ;
obj = to_intel_bo ( i915_gem_alloc_object ( dev , 4096 ) ) ;
if ( obj = = NULL )
return - ENOMEM ;
ret = i915_gem_object_pin ( & obj - > base , 4096 ) ;
if ( ret ) {
drm_gem_object_unreference ( & obj - > base ) ;
return ret ;
}
ptr = kmap ( obj - > pages [ 0 ] ) ;
iowrite32 ( MI_BATCH_BUFFER_END , ptr ) ;
iowrite32 ( MI_NOOP , ptr + 1 ) ;
kunmap ( obj - > pages [ 0 ] ) ;
ret = i915_gem_object_set_to_gtt_domain ( & obj - > base , false ) ;
if ( ret ) {
i915_gem_object_unpin ( & obj - > base ) ;
drm_gem_object_unreference ( & obj - > base ) ;
return ret ;
}
ring - > private = obj ;
}
return init_ring_common ( dev , ring ) ;
}
static void blt_ring_begin ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
int num_dwords )
{
if ( ring - > private ) {
intel_ring_begin ( dev , ring , num_dwords + 2 ) ;
intel_ring_emit ( dev , ring , MI_BATCH_BUFFER_START ) ;
intel_ring_emit ( dev , ring , to_blt_workaround ( ring ) - > gtt_offset ) ;
} else
intel_ring_begin ( dev , ring , 4 ) ;
}
static void blt_ring_flush ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 invalidate_domains ,
u32 flush_domains )
{
blt_ring_begin ( dev , ring , 4 ) ;
intel_ring_emit ( dev , ring , MI_FLUSH_DW ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_emit ( dev , ring , 0 ) ;
intel_ring_advance ( dev , ring ) ;
}
static u32
blt_ring_add_request ( struct drm_device * dev ,
struct intel_ring_buffer * ring ,
u32 flush_domains )
{
u32 seqno = i915_gem_get_seqno ( dev ) ;
blt_ring_begin ( dev , ring , 4 ) ;
intel_ring_emit ( dev , ring , MI_STORE_DWORD_INDEX ) ;
intel_ring_emit ( dev , ring ,
I915_GEM_HWS_INDEX < < MI_STORE_DWORD_INDEX_SHIFT ) ;
intel_ring_emit ( dev , ring , seqno ) ;
intel_ring_emit ( dev , ring , MI_USER_INTERRUPT ) ;
intel_ring_advance ( dev , ring ) ;
DRM_DEBUG_DRIVER ( " %s %d \n " , ring - > name , seqno ) ;
return seqno ;
}
static void blt_ring_cleanup ( struct intel_ring_buffer * ring )
{
if ( ! ring - > private )
return ;
i915_gem_object_unpin ( ring - > private ) ;
drm_gem_object_unreference ( ring - > private ) ;
ring - > private = NULL ;
}
2010-10-19 14:19:32 +04:00
static const struct intel_ring_buffer gen6_blt_ring = {
. name = " blt ring " ,
. id = RING_BLT ,
. mmio_base = BLT_RING_BASE ,
. size = 32 * PAGE_SIZE ,
2010-11-02 13:38:58 +03:00
. init = blt_ring_init ,
2010-10-22 20:02:41 +04:00
. write_tail = ring_write_tail ,
2010-11-02 13:38:58 +03:00
. flush = blt_ring_flush ,
. add_request = blt_ring_add_request ,
2010-10-19 14:19:32 +04:00
. get_seqno = ring_status_page_get_seqno ,
. user_irq_get = blt_ring_get_user_irq ,
. user_irq_put = blt_ring_put_user_irq ,
. dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer ,
2010-11-02 13:38:58 +03:00
. cleanup = blt_ring_cleanup ,
2010-09-19 17:40:43 +04:00
} ;
2010-09-16 06:43:11 +04:00
int intel_init_render_ring_buffer ( struct drm_device * dev )
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
dev_priv - > render_ring = render_ring ;
if ( ! I915_NEED_GFX_HWS ( dev ) ) {
dev_priv - > render_ring . status_page . page_addr
= dev_priv - > status_page_dmah - > vaddr ;
memset ( dev_priv - > render_ring . status_page . page_addr ,
0 , PAGE_SIZE ) ;
}
return intel_init_ring_buffer ( dev , & dev_priv - > render_ring ) ;
}
int intel_init_bsd_ring_buffer ( struct drm_device * dev )
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
2010-09-19 17:40:43 +04:00
if ( IS_GEN6 ( dev ) )
dev_priv - > bsd_ring = gen6_bsd_ring ;
else
dev_priv - > bsd_ring = bsd_ring ;
2010-09-16 06:43:11 +04:00
return intel_init_ring_buffer ( dev , & dev_priv - > bsd_ring ) ;
}
2010-10-19 14:19:32 +04:00
int intel_init_blt_ring_buffer ( struct drm_device * dev )
{
drm_i915_private_t * dev_priv = dev - > dev_private ;
dev_priv - > blt_ring = gen6_blt_ring ;
return intel_init_ring_buffer ( dev , & dev_priv - > blt_ring ) ;
}