2009-12-11 12:24:15 +03:00
/*
* Copyright ( C ) 2007 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include "drmP.h"
# include "drm.h"
2011-03-09 16:22:19 +03:00
# include <linux/ktime.h>
# include <linux/hrtimer.h>
2009-12-11 12:24:15 +03:00
# include "nouveau_drv.h"
2010-09-22 02:58:54 +04:00
# include "nouveau_ramht.h"
2009-12-11 12:24:15 +03:00
# include "nouveau_dma.h"
2010-09-21 20:57:11 +04:00
# define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
2011-01-28 06:44:32 +03:00
# define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
2009-12-11 12:24:15 +03:00
struct nouveau_fence {
struct nouveau_channel * channel ;
struct kref refcount ;
struct list_head entry ;
uint32_t sequence ;
bool signalled ;
2010-09-21 22:49:39 +04:00
void ( * work ) ( void * priv , bool signalled ) ;
void * priv ;
2009-12-11 12:24:15 +03:00
} ;
2010-09-22 02:58:54 +04:00
struct nouveau_semaphore {
struct kref ref ;
struct drm_device * dev ;
struct drm_mm_node * mem ;
} ;
2009-12-11 12:24:15 +03:00
static inline struct nouveau_fence *
nouveau_fence ( void * sync_obj )
{
return ( struct nouveau_fence * ) sync_obj ;
}
static void
nouveau_fence_del ( struct kref * ref )
{
struct nouveau_fence * fence =
container_of ( ref , struct nouveau_fence , refcount ) ;
2010-10-18 05:56:14 +04:00
nouveau_channel_ref ( NULL , & fence - > channel ) ;
2009-12-11 12:24:15 +03:00
kfree ( fence ) ;
}
void
nouveau_fence_update ( struct nouveau_channel * chan )
{
2010-09-21 20:57:11 +04:00
struct drm_device * dev = chan - > dev ;
struct nouveau_fence * tmp , * fence ;
2009-12-11 12:24:15 +03:00
uint32_t sequence ;
2010-08-28 19:56:33 +04:00
spin_lock ( & chan - > fence . lock ) ;
2010-12-08 04:35:45 +03:00
/* Fetch the last sequence if the channel is still up and running */
if ( likely ( ! list_empty ( & chan - > fence . pending ) ) ) {
if ( USE_REFCNT ( dev ) )
sequence = nvchan_rd32 ( chan , 0x48 ) ;
else
sequence = atomic_read ( & chan - > fence . last_sequence_irq ) ;
if ( chan - > fence . sequence_ack = = sequence )
goto out ;
chan - > fence . sequence_ack = sequence ;
}
2009-12-11 12:24:15 +03:00
2010-09-21 20:57:11 +04:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , entry ) {
2009-12-11 12:24:15 +03:00
sequence = fence - > sequence ;
fence - > signalled = true ;
list_del ( & fence - > entry ) ;
2010-09-21 22:49:39 +04:00
if ( unlikely ( fence - > work ) )
fence - > work ( fence - > priv , true ) ;
2009-12-11 12:24:15 +03:00
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
if ( sequence = = chan - > fence . sequence_ack )
break ;
}
2010-08-28 19:56:33 +04:00
out :
2010-05-31 06:00:43 +04:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 12:24:15 +03:00
}
int
nouveau_fence_new ( struct nouveau_channel * chan , struct nouveau_fence * * pfence ,
bool emit )
{
struct nouveau_fence * fence ;
int ret = 0 ;
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
if ( ! fence )
return - ENOMEM ;
kref_init ( & fence - > refcount ) ;
2010-10-18 05:56:14 +04:00
nouveau_channel_ref ( chan , & fence - > channel ) ;
2009-12-11 12:24:15 +03:00
if ( emit )
ret = nouveau_fence_emit ( fence ) ;
if ( ret )
2010-10-20 23:50:24 +04:00
nouveau_fence_unref ( & fence ) ;
2009-12-11 12:24:15 +03:00
* pfence = fence ;
return ret ;
}
struct nouveau_channel *
nouveau_fence_channel ( struct nouveau_fence * fence )
{
2010-10-18 05:56:40 +04:00
return fence ? nouveau_channel_get_unlocked ( fence - > channel ) : NULL ;
2009-12-11 12:24:15 +03:00
}
int
nouveau_fence_emit ( struct nouveau_fence * fence )
{
struct nouveau_channel * chan = fence - > channel ;
2010-09-21 20:57:11 +04:00
struct drm_device * dev = chan - > dev ;
2010-11-24 03:30:22 +03:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 12:24:15 +03:00
int ret ;
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
if ( unlikely ( chan - > fence . sequence = = chan - > fence . sequence_ack - 1 ) ) {
nouveau_fence_update ( chan ) ;
BUG_ON ( chan - > fence . sequence = =
chan - > fence . sequence_ack - 1 ) ;
}
fence - > sequence = + + chan - > fence . sequence ;
kref_get ( & fence - > refcount ) ;
2010-05-31 06:00:43 +04:00
spin_lock ( & chan - > fence . lock ) ;
2009-12-11 12:24:15 +03:00
list_add_tail ( & fence - > entry , & chan - > fence . pending ) ;
2010-05-31 06:00:43 +04:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 12:24:15 +03:00
2010-11-24 03:30:22 +03:00
if ( USE_REFCNT ( dev ) ) {
if ( dev_priv - > card_type < NV_C0 )
BEGIN_RING ( chan , NvSubSw , 0x0050 , 1 ) ;
else
2010-12-30 04:53:48 +03:00
BEGIN_NVC0 ( chan , 2 , NvSubM2MF , 0x0050 , 1 ) ;
2010-11-24 03:30:22 +03:00
} else {
BEGIN_RING ( chan , NvSubSw , 0x0150 , 1 ) ;
}
OUT_RING ( chan , fence - > sequence ) ;
2009-12-11 12:24:15 +03:00
FIRE_RING ( chan ) ;
return 0 ;
}
2010-09-21 22:49:39 +04:00
void
nouveau_fence_work ( struct nouveau_fence * fence ,
void ( * work ) ( void * priv , bool signalled ) ,
void * priv )
{
BUG_ON ( fence - > work ) ;
spin_lock ( & fence - > channel - > fence . lock ) ;
if ( fence - > signalled ) {
work ( priv , true ) ;
} else {
fence - > work = work ;
fence - > priv = priv ;
}
spin_unlock ( & fence - > channel - > fence . lock ) ;
}
2009-12-11 12:24:15 +03:00
void
2010-10-20 23:50:24 +04:00
__nouveau_fence_unref ( void * * sync_obj )
2009-12-11 12:24:15 +03:00
{
struct nouveau_fence * fence = nouveau_fence ( * sync_obj ) ;
if ( fence )
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
* sync_obj = NULL ;
}
void *
2010-10-20 23:50:24 +04:00
__nouveau_fence_ref ( void * sync_obj )
2009-12-11 12:24:15 +03:00
{
struct nouveau_fence * fence = nouveau_fence ( sync_obj ) ;
kref_get ( & fence - > refcount ) ;
return sync_obj ;
}
bool
2010-10-20 23:50:24 +04:00
__nouveau_fence_signalled ( void * sync_obj , void * sync_arg )
2009-12-11 12:24:15 +03:00
{
struct nouveau_fence * fence = nouveau_fence ( sync_obj ) ;
struct nouveau_channel * chan = fence - > channel ;
if ( fence - > signalled )
return true ;
nouveau_fence_update ( chan ) ;
return fence - > signalled ;
}
int
2010-10-20 23:50:24 +04:00
__nouveau_fence_wait ( void * sync_obj , void * sync_arg , bool lazy , bool intr )
2009-12-11 12:24:15 +03:00
{
unsigned long timeout = jiffies + ( 3 * DRM_HZ ) ;
2011-03-09 16:22:19 +03:00
unsigned long sleep_time = NSEC_PER_MSEC / 1000 ;
ktime_t t ;
2009-12-11 12:24:15 +03:00
int ret = 0 ;
while ( 1 ) {
2010-10-20 23:50:24 +04:00
if ( __nouveau_fence_signalled ( sync_obj , sync_arg ) )
2009-12-11 12:24:15 +03:00
break ;
if ( time_after_eq ( jiffies , timeout ) ) {
ret = - EBUSY ;
break ;
}
2010-07-26 12:23:54 +04:00
__set_current_state ( intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE ) ;
2011-03-09 16:22:19 +03:00
if ( lazy ) {
t = ktime_set ( 0 , sleep_time ) ;
schedule_hrtimeout ( & t , HRTIMER_MODE_REL ) ;
sleep_time * = 2 ;
if ( sleep_time > NSEC_PER_MSEC )
sleep_time = NSEC_PER_MSEC ;
}
2009-12-11 12:24:15 +03:00
if ( intr & & signal_pending ( current ) ) {
2009-12-15 04:04:25 +03:00
ret = - ERESTARTSYS ;
2009-12-11 12:24:15 +03:00
break ;
}
}
__set_current_state ( TASK_RUNNING ) ;
return ret ;
}
2010-09-22 02:58:54 +04:00
static struct nouveau_semaphore *
2011-01-28 05:08:29 +03:00
semaphore_alloc ( struct drm_device * dev )
2010-09-22 02:58:54 +04:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_semaphore * sema ;
2011-01-28 05:08:29 +03:00
int size = ( dev_priv - > chipset < 0x84 ) ? 4 : 16 ;
int ret , i ;
2010-09-22 02:58:54 +04:00
if ( ! USE_SEMA ( dev ) )
return NULL ;
sema = kmalloc ( sizeof ( * sema ) , GFP_KERNEL ) ;
if ( ! sema )
goto fail ;
2010-10-09 06:02:09 +04:00
ret = drm_mm_pre_get ( & dev_priv - > fence . heap ) ;
if ( ret )
goto fail ;
2010-09-22 02:58:54 +04:00
spin_lock ( & dev_priv - > fence . lock ) ;
2011-01-28 05:08:29 +03:00
sema - > mem = drm_mm_search_free ( & dev_priv - > fence . heap , size , 0 , 0 ) ;
2010-09-22 02:58:54 +04:00
if ( sema - > mem )
2011-01-28 05:08:29 +03:00
sema - > mem = drm_mm_get_block_atomic ( sema - > mem , size , 0 ) ;
2010-09-22 02:58:54 +04:00
spin_unlock ( & dev_priv - > fence . lock ) ;
if ( ! sema - > mem )
goto fail ;
kref_init ( & sema - > ref ) ;
sema - > dev = dev ;
2011-01-28 05:08:29 +03:00
for ( i = sema - > mem - > start ; i < sema - > mem - > start + size ; i + = 4 )
nouveau_bo_wr32 ( dev_priv - > fence . bo , i / 4 , 0 ) ;
2010-09-22 02:58:54 +04:00
return sema ;
fail :
kfree ( sema ) ;
return NULL ;
}
static void
2011-01-28 05:08:29 +03:00
semaphore_free ( struct kref * ref )
2010-09-22 02:58:54 +04:00
{
struct nouveau_semaphore * sema =
container_of ( ref , struct nouveau_semaphore , ref ) ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
spin_lock ( & dev_priv - > fence . lock ) ;
drm_mm_put_block ( sema - > mem ) ;
spin_unlock ( & dev_priv - > fence . lock ) ;
kfree ( sema ) ;
}
static void
semaphore_work ( void * priv , bool signalled )
{
struct nouveau_semaphore * sema = priv ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
if ( unlikely ( ! signalled ) )
nouveau_bo_wr32 ( dev_priv - > fence . bo , sema - > mem - > start / 4 , 1 ) ;
2011-01-28 05:08:29 +03:00
kref_put ( & sema - > ref , semaphore_free ) ;
2010-09-22 02:58:54 +04:00
}
static int
2011-01-28 05:08:29 +03:00
semaphore_acquire ( struct nouveau_channel * chan , struct nouveau_semaphore * sema )
2010-09-22 02:58:54 +04:00
{
2011-01-28 05:08:29 +03:00
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
struct nouveau_fence * fence = NULL ;
2011-06-07 09:21:23 +04:00
u64 offset = chan - > fence . vma . offset + sema - > mem - > start ;
2010-09-22 02:58:54 +04:00
int ret ;
2011-01-28 05:08:29 +03:00
if ( dev_priv - > chipset < 0x84 ) {
2011-06-17 17:41:54 +04:00
ret = RING_SPACE ( chan , 4 ) ;
2011-02-02 07:57:05 +03:00
if ( ret )
return ret ;
2010-10-02 19:04:46 +04:00
2011-06-17 17:41:54 +04:00
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 3 ) ;
OUT_RING ( chan , NvSema ) ;
2011-06-07 09:21:23 +04:00
OUT_RING ( chan , offset ) ;
2011-01-28 05:08:29 +03:00
OUT_RING ( chan , 1 ) ;
2011-01-28 06:44:32 +03:00
} else
if ( dev_priv - > chipset < 0xc0 ) {
2011-06-17 17:41:54 +04:00
ret = RING_SPACE ( chan , 7 ) ;
2011-01-28 05:08:29 +03:00
if ( ret )
return ret ;
2011-06-17 17:41:54 +04:00
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 1 ) ;
OUT_RING ( chan , chan - > vram_handle ) ;
2011-01-28 05:08:29 +03:00
BEGIN_RING ( chan , NvSubSw , 0x0010 , 4 ) ;
2011-02-02 06:21:57 +03:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
2011-01-28 05:08:29 +03:00
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 1 ) ; /* ACQUIRE_EQ */
2011-01-28 06:44:32 +03:00
} else {
ret = RING_SPACE ( chan , 5 ) ;
if ( ret )
return ret ;
BEGIN_NVC0 ( chan , 2 , NvSubM2MF , 0x0010 , 4 ) ;
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 0x1001 ) ; /* ACQUIRE_EQ */
2010-10-02 19:04:46 +04:00
}
2011-01-28 05:08:29 +03:00
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new ( chan , & fence , true ) ;
if ( ret )
return ret ;
2010-09-22 02:58:54 +04:00
2011-01-28 05:08:29 +03:00
kref_get ( & sema - > ref ) ;
nouveau_fence_work ( fence , semaphore_work , sema ) ;
nouveau_fence_unref ( & fence ) ;
return 0 ;
}
static int
semaphore_release ( struct nouveau_channel * chan , struct nouveau_semaphore * sema )
{
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
struct nouveau_fence * fence = NULL ;
2011-06-07 09:21:23 +04:00
u64 offset = chan - > fence . vma . offset + sema - > mem - > start ;
2011-01-28 05:08:29 +03:00
int ret ;
if ( dev_priv - > chipset < 0x84 ) {
2011-06-17 17:41:54 +04:00
ret = RING_SPACE ( chan , 5 ) ;
2011-01-28 05:08:29 +03:00
if ( ret )
return ret ;
2011-06-17 17:41:54 +04:00
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 2 ) ;
OUT_RING ( chan , NvSema ) ;
2011-06-07 09:21:23 +04:00
OUT_RING ( chan , offset ) ;
2011-01-28 05:08:29 +03:00
BEGIN_RING ( chan , NvSubSw , NV_SW_SEMAPHORE_RELEASE , 1 ) ;
OUT_RING ( chan , 1 ) ;
2011-01-28 06:44:32 +03:00
} else
if ( dev_priv - > chipset < 0xc0 ) {
2011-06-17 17:41:54 +04:00
ret = RING_SPACE ( chan , 7 ) ;
2011-01-28 05:08:29 +03:00
if ( ret )
return ret ;
2011-06-17 17:41:54 +04:00
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 1 ) ;
OUT_RING ( chan , chan - > vram_handle ) ;
2011-01-28 05:08:29 +03:00
BEGIN_RING ( chan , NvSubSw , 0x0010 , 4 ) ;
2011-02-02 06:21:57 +03:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
2011-01-28 05:08:29 +03:00
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 2 ) ; /* RELEASE */
2011-01-28 06:44:32 +03:00
} else {
ret = RING_SPACE ( chan , 5 ) ;
if ( ret )
return ret ;
BEGIN_NVC0 ( chan , 2 , NvSubM2MF , 0x0010 , 4 ) ;
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 0x1002 ) ; /* RELEASE */
2010-10-02 19:04:46 +04:00
}
2010-09-22 02:58:54 +04:00
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new ( chan , & fence , true ) ;
if ( ret )
return ret ;
kref_get ( & sema - > ref ) ;
nouveau_fence_work ( fence , semaphore_work , sema ) ;
2010-10-20 23:50:24 +04:00
nouveau_fence_unref ( & fence ) ;
2010-09-22 02:58:54 +04:00
return 0 ;
}
2010-09-21 20:57:11 +04:00
int
nouveau_fence_sync ( struct nouveau_fence * fence ,
struct nouveau_channel * wchan )
{
struct nouveau_channel * chan = nouveau_fence_channel ( fence ) ;
2010-09-22 02:58:54 +04:00
struct drm_device * dev = wchan - > dev ;
struct nouveau_semaphore * sema ;
2010-10-18 05:56:40 +04:00
int ret = 0 ;
2010-09-21 20:57:11 +04:00
2010-10-18 05:56:40 +04:00
if ( likely ( ! chan | | chan = = wchan | |
2010-10-20 23:50:24 +04:00
nouveau_fence_signalled ( fence ) ) )
2010-10-18 05:56:40 +04:00
goto out ;
2010-09-21 20:57:11 +04:00
2011-01-28 05:08:29 +03:00
sema = semaphore_alloc ( dev ) ;
2010-09-22 02:58:54 +04:00
if ( ! sema ) {
/* Early card or broken userspace, fall back to
* software sync . */
2010-10-20 23:50:24 +04:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 05:56:40 +04:00
goto out ;
2010-09-22 02:58:54 +04:00
}
2010-10-12 02:01:59 +04:00
/* try to take chan's mutex, if we can't take it right away
2010-10-06 10:16:59 +04:00
* we have to fallback to software sync to prevent locking
* order issues
*/
2010-10-12 02:01:59 +04:00
if ( ! mutex_trylock ( & chan - > mutex ) ) {
2010-10-20 23:50:24 +04:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 05:56:40 +04:00
goto out_unref ;
2010-10-06 10:16:59 +04:00
}
2010-09-22 02:58:54 +04:00
/* Make wchan wait until it gets signalled */
2011-01-28 05:08:29 +03:00
ret = semaphore_acquire ( wchan , sema ) ;
2010-10-02 19:04:46 +04:00
if ( ret )
2010-10-18 05:56:40 +04:00
goto out_unlock ;
2010-09-22 02:58:54 +04:00
2010-10-02 19:04:46 +04:00
/* Signal the semaphore from chan */
2011-01-28 05:08:29 +03:00
ret = semaphore_release ( chan , sema ) ;
2010-10-18 05:56:40 +04:00
out_unlock :
2010-10-12 02:01:59 +04:00
mutex_unlock ( & chan - > mutex ) ;
2010-10-18 05:56:40 +04:00
out_unref :
2011-01-28 05:08:29 +03:00
kref_put ( & sema - > ref , semaphore_free ) ;
2010-10-18 05:56:40 +04:00
out :
if ( chan )
nouveau_channel_put_unlocked ( & chan ) ;
2010-09-22 02:58:54 +04:00
return ret ;
2010-09-21 20:57:11 +04:00
}
2009-12-11 12:24:15 +03:00
int
2010-10-20 23:50:24 +04:00
__nouveau_fence_flush ( void * sync_obj , void * sync_arg )
2009-12-11 12:24:15 +03:00
{
return 0 ;
}
int
2010-09-21 20:57:11 +04:00
nouveau_fence_channel_init ( struct nouveau_channel * chan )
2009-12-11 12:24:15 +03:00
{
2010-09-22 02:58:54 +04:00
struct drm_device * dev = chan - > dev ;
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-09-21 20:57:11 +04:00
struct nouveau_gpuobj * obj = NULL ;
int ret ;
2011-06-17 17:41:54 +04:00
if ( dev_priv - > card_type < NV_C0 ) {
/* Create an NV_SW object for various sync purposes */
ret = nouveau_gpuobj_gr_new ( chan , NvSw , NV_SW ) ;
if ( ret )
return ret ;
2011-01-28 06:44:32 +03:00
2011-06-17 17:41:54 +04:00
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
2010-09-21 20:57:11 +04:00
2011-06-17 17:41:54 +04:00
BEGIN_RING ( chan , NvSubSw , 0 , 1 ) ;
OUT_RING ( chan , NvSw ) ;
FIRE_RING ( chan ) ;
}
2010-09-21 20:57:11 +04:00
2011-06-17 17:41:54 +04:00
/* Setup area of memory shared between all channels for x-chan sync */
2011-02-02 06:21:57 +03:00
if ( USE_SEMA ( dev ) & & dev_priv - > chipset < 0x84 ) {
2010-12-03 02:05:20 +03:00
struct ttm_mem_reg * mem = & dev_priv - > fence . bo - > bo . mem ;
2010-09-22 02:58:54 +04:00
ret = nouveau_gpuobj_dma_new ( chan , NV_CLASS_DMA_IN_MEMORY ,
mem - > start < < PAGE_SHIFT ,
2010-12-03 02:05:20 +03:00
mem - > size , NV_MEM_ACCESS_RW ,
2010-11-16 04:50:09 +03:00
NV_MEM_TARGET_VRAM , & obj ) ;
2010-09-22 02:58:54 +04:00
if ( ret )
return ret ;
ret = nouveau_ramht_insert ( chan , NvSema , obj ) ;
nouveau_gpuobj_ref ( NULL , & obj ) ;
if ( ret )
return ret ;
2011-08-23 04:23:11 +04:00
} else
if ( USE_SEMA ( dev ) ) {
2011-06-07 09:21:23 +04:00
/* map fence bo into channel's vm */
ret = nouveau_bo_vma_add ( dev_priv - > fence . bo , chan - > vm ,
& chan - > fence . vma ) ;
if ( ret )
return ret ;
2010-09-22 02:58:54 +04:00
}
2009-12-11 12:24:15 +03:00
INIT_LIST_HEAD ( & chan - > fence . pending ) ;
spin_lock_init ( & chan - > fence . lock ) ;
2010-05-31 06:00:43 +04:00
atomic_set ( & chan - > fence . last_sequence_irq , 0 ) ;
2009-12-11 12:24:15 +03:00
return 0 ;
}
void
2010-09-21 20:57:11 +04:00
nouveau_fence_channel_fini ( struct nouveau_channel * chan )
2009-12-11 12:24:15 +03:00
{
2011-06-07 09:21:23 +04:00
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
2010-09-21 20:57:11 +04:00
struct nouveau_fence * tmp , * fence ;
2009-12-11 12:24:15 +03:00
2010-10-18 05:57:19 +04:00
spin_lock ( & chan - > fence . lock ) ;
2010-09-21 20:57:11 +04:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , entry ) {
2009-12-11 12:24:15 +03:00
fence - > signalled = true ;
list_del ( & fence - > entry ) ;
2010-09-21 22:49:39 +04:00
if ( unlikely ( fence - > work ) )
fence - > work ( fence - > priv , false ) ;
2009-12-11 12:24:15 +03:00
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
}
2010-10-18 05:57:19 +04:00
spin_unlock ( & chan - > fence . lock ) ;
2011-06-07 09:21:23 +04:00
nouveau_bo_vma_del ( dev_priv - > fence . bo , & chan - > fence . vma ) ;
2009-12-11 12:24:15 +03:00
}
2010-09-22 02:58:54 +04:00
int
nouveau_fence_init ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2011-01-28 05:08:29 +03:00
int size = ( dev_priv - > chipset < 0x84 ) ? 4096 : 16384 ;
2010-09-22 02:58:54 +04:00
int ret ;
/* Create a shared VRAM heap for cross-channel sync. */
if ( USE_SEMA ( dev ) ) {
2011-06-07 08:21:29 +04:00
ret = nouveau_bo_new ( dev , size , 0 , TTM_PL_FLAG_VRAM ,
2011-02-16 01:41:56 +03:00
0 , 0 , & dev_priv - > fence . bo ) ;
2010-09-22 02:58:54 +04:00
if ( ret )
return ret ;
ret = nouveau_bo_pin ( dev_priv - > fence . bo , TTM_PL_FLAG_VRAM ) ;
if ( ret )
goto fail ;
ret = nouveau_bo_map ( dev_priv - > fence . bo ) ;
if ( ret )
goto fail ;
ret = drm_mm_init ( & dev_priv - > fence . heap , 0 ,
dev_priv - > fence . bo - > bo . mem . size ) ;
if ( ret )
goto fail ;
spin_lock_init ( & dev_priv - > fence . lock ) ;
}
return 0 ;
fail :
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
return ret ;
}
void
nouveau_fence_fini ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
if ( USE_SEMA ( dev ) ) {
drm_mm_takedown ( & dev_priv - > fence . heap ) ;
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_unpin ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
}
}