2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2007 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include "drmP.h"
# include "drm.h"
2011-03-09 14:22:19 +01:00
# include <linux/ktime.h>
# include <linux/hrtimer.h>
2009-12-11 19:24:15 +10:00
# include "nouveau_drv.h"
2010-09-22 00:58:54 +02:00
# include "nouveau_ramht.h"
2012-04-30 13:30:00 +10:00
# include "nouveau_fence.h"
2012-04-30 11:33:43 -05:00
# include "nouveau_software.h"
2009-12-11 19:24:15 +10:00
# include "nouveau_dma.h"
2010-09-21 18:57:11 +02:00
# define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
2011-01-28 13:44:32 +10:00
# define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
2009-12-11 19:24:15 +10:00
void
nouveau_fence_update ( struct nouveau_channel * chan )
{
2010-09-21 18:57:11 +02:00
struct drm_device * dev = chan - > dev ;
struct nouveau_fence * tmp , * fence ;
2009-12-11 19:24:15 +10:00
uint32_t sequence ;
2010-08-28 17:56:33 +02:00
spin_lock ( & chan - > fence . lock ) ;
2010-12-08 02:35:45 +01:00
/* Fetch the last sequence if the channel is still up and running */
if ( likely ( ! list_empty ( & chan - > fence . pending ) ) ) {
if ( USE_REFCNT ( dev ) )
sequence = nvchan_rd32 ( chan , 0x48 ) ;
else
sequence = atomic_read ( & chan - > fence . last_sequence_irq ) ;
if ( chan - > fence . sequence_ack = = sequence )
goto out ;
chan - > fence . sequence_ack = sequence ;
}
2009-12-11 19:24:15 +10:00
2012-04-30 13:30:00 +10:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , head ) {
2012-03-21 13:51:03 +10:00
if ( fence - > sequence > chan - > fence . sequence_ack )
break ;
2012-04-30 13:30:00 +10:00
fence - > channel = NULL ;
list_del ( & fence - > head ) ;
2012-03-21 13:51:03 +10:00
if ( fence - > work )
2010-09-21 20:49:39 +02:00
fence - > work ( fence - > priv , true ) ;
2012-04-30 13:30:00 +10:00
nouveau_fence_unref ( & fence ) ;
2009-12-11 19:24:15 +10:00
}
2012-03-21 13:51:03 +10:00
2010-08-28 17:56:33 +02:00
out :
2010-05-31 12:00:43 +10:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
}
int
2012-04-30 13:30:00 +10:00
nouveau_fence_emit ( struct nouveau_fence * fence , struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2010-09-21 18:57:11 +02:00
struct drm_device * dev = chan - > dev ;
2010-11-24 10:30:22 +10:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 19:24:15 +10:00
int ret ;
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
if ( unlikely ( chan - > fence . sequence = = chan - > fence . sequence_ack - 1 ) ) {
nouveau_fence_update ( chan ) ;
BUG_ON ( chan - > fence . sequence = =
chan - > fence . sequence_ack - 1 ) ;
}
fence - > sequence = + + chan - > fence . sequence ;
2012-04-30 13:30:00 +10:00
fence - > channel = chan ;
2009-12-11 19:24:15 +10:00
2012-04-30 13:30:00 +10:00
kref_get ( & fence - > kref ) ;
2010-05-31 12:00:43 +10:00
spin_lock ( & chan - > fence . lock ) ;
2012-04-30 13:30:00 +10:00
list_add_tail ( & fence - > head , & chan - > fence . pending ) ;
2010-05-31 12:00:43 +10:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
2010-11-24 10:30:22 +10:00
if ( USE_REFCNT ( dev ) ) {
if ( dev_priv - > card_type < NV_C0 )
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV10_SUBCHAN_REF_CNT , 1 ) ;
2010-11-24 10:30:22 +10:00
else
2012-04-01 21:09:13 +10:00
BEGIN_NVC0 ( chan , 0 , NV10_SUBCHAN_REF_CNT , 1 ) ;
2010-11-24 10:30:22 +10:00
} else {
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , NvSubSw , 0x0150 , 1 ) ;
2010-11-24 10:30:22 +10:00
}
OUT_RING ( chan , fence - > sequence ) ;
2009-12-11 19:24:15 +10:00
FIRE_RING ( chan ) ;
2012-04-25 23:20:33 +02:00
fence - > timeout = jiffies + 3 * DRM_HZ ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
bool
2012-04-30 13:30:00 +10:00
nouveau_fence_done ( struct nouveau_fence * fence )
2009-12-11 19:24:15 +10:00
{
2012-04-30 13:30:00 +10:00
if ( fence - > channel )
nouveau_fence_update ( fence - > channel ) ;
return ! fence - > channel ;
2009-12-11 19:24:15 +10:00
}
int
2012-04-30 12:51:48 +10:00
nouveau_fence_wait ( struct nouveau_fence * fence , bool lazy , bool intr )
2009-12-11 19:24:15 +10:00
{
2011-03-09 14:22:19 +01:00
unsigned long sleep_time = NSEC_PER_MSEC / 1000 ;
ktime_t t ;
2009-12-11 19:24:15 +10:00
int ret = 0 ;
2012-04-30 13:30:00 +10:00
while ( ! nouveau_fence_done ( fence ) ) {
if ( fence - > timeout & & time_after_eq ( jiffies , fence - > timeout ) ) {
2009-12-11 19:24:15 +10:00
ret = - EBUSY ;
break ;
}
2012-04-30 12:51:48 +10:00
__set_current_state ( intr ? TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE ) ;
2011-03-09 14:22:19 +01:00
if ( lazy ) {
t = ktime_set ( 0 , sleep_time ) ;
schedule_hrtimeout ( & t , HRTIMER_MODE_REL ) ;
sleep_time * = 2 ;
if ( sleep_time > NSEC_PER_MSEC )
sleep_time = NSEC_PER_MSEC ;
}
2009-12-11 19:24:15 +10:00
if ( intr & & signal_pending ( current ) ) {
2009-12-15 11:04:25 +10:00
ret = - ERESTARTSYS ;
2009-12-11 19:24:15 +10:00
break ;
}
}
__set_current_state ( TASK_RUNNING ) ;
2012-04-30 13:30:00 +10:00
return ret ;
}
static void
nouveau_fence_del ( struct kref * kref )
{
struct nouveau_fence * fence = container_of ( kref , typeof ( * fence ) , kref ) ;
kfree ( fence ) ;
}
void
nouveau_fence_unref ( struct nouveau_fence * * pfence )
{
if ( * pfence )
kref_put ( & ( * pfence ) - > kref , nouveau_fence_del ) ;
* pfence = NULL ;
}
struct nouveau_fence *
nouveau_fence_ref ( struct nouveau_fence * fence )
{
kref_get ( & fence - > kref ) ;
return fence ;
}
int
nouveau_fence_new ( struct nouveau_channel * chan , struct nouveau_fence * * pfence )
{
struct nouveau_fence * fence ;
int ret = 0 ;
2009-12-11 19:24:15 +10:00
2012-04-30 13:30:00 +10:00
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
if ( ! fence )
return - ENOMEM ;
kref_init ( & fence - > kref ) ;
if ( chan ) {
ret = nouveau_fence_emit ( fence , chan ) ;
if ( ret )
nouveau_fence_unref ( & fence ) ;
}
* pfence = fence ;
2009-12-11 19:24:15 +10:00
return ret ;
}
2012-04-30 13:30:00 +10:00
struct nouveau_semaphore {
struct kref ref ;
struct drm_device * dev ;
struct drm_mm_node * mem ;
} ;
void
nouveau_fence_work ( struct nouveau_fence * fence ,
void ( * work ) ( void * priv , bool signalled ) ,
void * priv )
{
if ( ! fence - > channel ) {
work ( priv , true ) ;
} else {
fence - > work = work ;
fence - > priv = priv ;
}
}
2010-09-22 00:58:54 +02:00
static struct nouveau_semaphore *
2011-01-28 12:08:29 +10:00
semaphore_alloc ( struct drm_device * dev )
2010-09-22 00:58:54 +02:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_semaphore * sema ;
2011-01-28 12:08:29 +10:00
int size = ( dev_priv - > chipset < 0x84 ) ? 4 : 16 ;
int ret , i ;
2010-09-22 00:58:54 +02:00
if ( ! USE_SEMA ( dev ) )
return NULL ;
sema = kmalloc ( sizeof ( * sema ) , GFP_KERNEL ) ;
if ( ! sema )
goto fail ;
2010-10-09 04:02:09 +02:00
ret = drm_mm_pre_get ( & dev_priv - > fence . heap ) ;
if ( ret )
goto fail ;
2010-09-22 00:58:54 +02:00
spin_lock ( & dev_priv - > fence . lock ) ;
2011-01-28 12:08:29 +10:00
sema - > mem = drm_mm_search_free ( & dev_priv - > fence . heap , size , 0 , 0 ) ;
2010-09-22 00:58:54 +02:00
if ( sema - > mem )
2011-01-28 12:08:29 +10:00
sema - > mem = drm_mm_get_block_atomic ( sema - > mem , size , 0 ) ;
2010-09-22 00:58:54 +02:00
spin_unlock ( & dev_priv - > fence . lock ) ;
if ( ! sema - > mem )
goto fail ;
kref_init ( & sema - > ref ) ;
sema - > dev = dev ;
2011-01-28 12:08:29 +10:00
for ( i = sema - > mem - > start ; i < sema - > mem - > start + size ; i + = 4 )
nouveau_bo_wr32 ( dev_priv - > fence . bo , i / 4 , 0 ) ;
2010-09-22 00:58:54 +02:00
return sema ;
fail :
kfree ( sema ) ;
return NULL ;
}
static void
2011-01-28 12:08:29 +10:00
semaphore_free ( struct kref * ref )
2010-09-22 00:58:54 +02:00
{
struct nouveau_semaphore * sema =
container_of ( ref , struct nouveau_semaphore , ref ) ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
spin_lock ( & dev_priv - > fence . lock ) ;
drm_mm_put_block ( sema - > mem ) ;
spin_unlock ( & dev_priv - > fence . lock ) ;
kfree ( sema ) ;
}
static void
semaphore_work ( void * priv , bool signalled )
{
struct nouveau_semaphore * sema = priv ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
if ( unlikely ( ! signalled ) )
nouveau_bo_wr32 ( dev_priv - > fence . bo , sema - > mem - > start / 4 , 1 ) ;
2011-01-28 12:08:29 +10:00
kref_put ( & sema - > ref , semaphore_free ) ;
2010-09-22 00:58:54 +02:00
}
static int
2011-01-28 12:08:29 +10:00
semaphore_acquire ( struct nouveau_channel * chan , struct nouveau_semaphore * sema )
2010-09-22 00:58:54 +02:00
{
2011-01-28 12:08:29 +10:00
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
struct nouveau_fence * fence = NULL ;
2011-06-07 15:21:23 +10:00
u64 offset = chan - > fence . vma . offset + sema - > mem - > start ;
2010-09-22 00:58:54 +02:00
int ret ;
2011-01-28 12:08:29 +10:00
if ( dev_priv - > chipset < 0x84 ) {
2011-06-17 23:41:54 +10:00
ret = RING_SPACE ( chan , 4 ) ;
2011-02-02 14:57:05 +10:00
if ( ret )
return ret ;
2010-10-02 17:04:46 +02:00
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 3 ) ;
2011-06-17 23:41:54 +10:00
OUT_RING ( chan , NvSema ) ;
2011-06-07 15:21:23 +10:00
OUT_RING ( chan , offset ) ;
2011-01-28 12:08:29 +10:00
OUT_RING ( chan , 1 ) ;
2011-01-28 13:44:32 +10:00
} else
if ( dev_priv - > chipset < 0xc0 ) {
2011-06-17 23:41:54 +10:00
ret = RING_SPACE ( chan , 7 ) ;
2011-01-28 12:08:29 +10:00
if ( ret )
return ret ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2011-06-17 23:41:54 +10:00
OUT_RING ( chan , chan - > vram_handle ) ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2011-02-02 13:21:57 +10:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
2011-01-28 12:08:29 +10:00
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 1 ) ; /* ACQUIRE_EQ */
2011-01-28 13:44:32 +10:00
} else {
ret = RING_SPACE ( chan , 5 ) ;
if ( ret )
return ret ;
2012-04-01 21:09:13 +10:00
BEGIN_NVC0 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2011-01-28 13:44:32 +10:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 0x1001 ) ; /* ACQUIRE_EQ */
2010-10-02 17:04:46 +02:00
}
2011-01-28 12:08:29 +10:00
/* Delay semaphore destruction until its work is done */
2012-04-30 13:30:00 +10:00
ret = nouveau_fence_new ( chan , & fence ) ;
2011-01-28 12:08:29 +10:00
if ( ret )
return ret ;
2010-09-22 00:58:54 +02:00
2011-01-28 12:08:29 +10:00
kref_get ( & sema - > ref ) ;
nouveau_fence_work ( fence , semaphore_work , sema ) ;
nouveau_fence_unref ( & fence ) ;
return 0 ;
}
static int
semaphore_release ( struct nouveau_channel * chan , struct nouveau_semaphore * sema )
{
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
struct nouveau_fence * fence = NULL ;
2011-06-07 15:21:23 +10:00
u64 offset = chan - > fence . vma . offset + sema - > mem - > start ;
2011-01-28 12:08:29 +10:00
int ret ;
if ( dev_priv - > chipset < 0x84 ) {
2011-06-17 23:41:54 +10:00
ret = RING_SPACE ( chan , 5 ) ;
2011-01-28 12:08:29 +10:00
if ( ret )
return ret ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 2 ) ;
2011-06-17 23:41:54 +10:00
OUT_RING ( chan , NvSema ) ;
2011-06-07 15:21:23 +10:00
OUT_RING ( chan , offset ) ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_SEMAPHORE_RELEASE , 1 ) ;
2011-01-28 12:08:29 +10:00
OUT_RING ( chan , 1 ) ;
2011-01-28 13:44:32 +10:00
} else
if ( dev_priv - > chipset < 0xc0 ) {
2011-06-17 23:41:54 +10:00
ret = RING_SPACE ( chan , 7 ) ;
2011-01-28 12:08:29 +10:00
if ( ret )
return ret ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2011-06-17 23:41:54 +10:00
OUT_RING ( chan , chan - > vram_handle ) ;
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2011-02-02 13:21:57 +10:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
2011-01-28 12:08:29 +10:00
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 2 ) ; /* RELEASE */
2011-01-28 13:44:32 +10:00
} else {
ret = RING_SPACE ( chan , 5 ) ;
if ( ret )
return ret ;
2012-04-01 21:09:13 +10:00
BEGIN_NVC0 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2011-01-28 13:44:32 +10:00
OUT_RING ( chan , upper_32_bits ( offset ) ) ;
OUT_RING ( chan , lower_32_bits ( offset ) ) ;
OUT_RING ( chan , 1 ) ;
OUT_RING ( chan , 0x1002 ) ; /* RELEASE */
2010-10-02 17:04:46 +02:00
}
2010-09-22 00:58:54 +02:00
/* Delay semaphore destruction until its work is done */
2012-04-30 13:30:00 +10:00
ret = nouveau_fence_new ( chan , & fence ) ;
2010-09-22 00:58:54 +02:00
if ( ret )
return ret ;
kref_get ( & sema - > ref ) ;
nouveau_fence_work ( fence , semaphore_work , sema ) ;
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2010-09-22 00:58:54 +02:00
return 0 ;
}
2010-09-21 18:57:11 +02:00
int
nouveau_fence_sync ( struct nouveau_fence * fence ,
struct nouveau_channel * wchan )
{
2012-04-30 13:30:00 +10:00
struct nouveau_channel * chan ;
2010-09-22 00:58:54 +02:00
struct drm_device * dev = wchan - > dev ;
struct nouveau_semaphore * sema ;
2010-10-18 03:56:40 +02:00
int ret = 0 ;
2010-09-21 18:57:11 +02:00
2012-04-30 13:30:00 +10:00
chan = fence ? nouveau_channel_get_unlocked ( fence - > channel ) : NULL ;
if ( likely ( ! chan | | chan = = wchan | | nouveau_fence_done ( fence ) ) )
2010-10-18 03:56:40 +02:00
goto out ;
2010-09-21 18:57:11 +02:00
2011-01-28 12:08:29 +10:00
sema = semaphore_alloc ( dev ) ;
2010-09-22 00:58:54 +02:00
if ( ! sema ) {
/* Early card or broken userspace, fall back to
* software sync . */
2010-10-20 21:50:24 +02:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 03:56:40 +02:00
goto out ;
2010-09-22 00:58:54 +02:00
}
2010-10-12 08:01:59 +10:00
/* try to take chan's mutex, if we can't take it right away
2010-10-06 16:16:59 +10:00
* we have to fallback to software sync to prevent locking
* order issues
*/
2010-10-12 08:01:59 +10:00
if ( ! mutex_trylock ( & chan - > mutex ) ) {
2010-10-20 21:50:24 +02:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 03:56:40 +02:00
goto out_unref ;
2010-10-06 16:16:59 +10:00
}
2010-09-22 00:58:54 +02:00
/* Make wchan wait until it gets signalled */
2011-01-28 12:08:29 +10:00
ret = semaphore_acquire ( wchan , sema ) ;
2010-10-02 17:04:46 +02:00
if ( ret )
2010-10-18 03:56:40 +02:00
goto out_unlock ;
2010-09-22 00:58:54 +02:00
2010-10-02 17:04:46 +02:00
/* Signal the semaphore from chan */
2011-01-28 12:08:29 +10:00
ret = semaphore_release ( chan , sema ) ;
2010-10-18 03:56:40 +02:00
out_unlock :
2010-10-12 08:01:59 +10:00
mutex_unlock ( & chan - > mutex ) ;
2010-10-18 03:56:40 +02:00
out_unref :
2011-01-28 12:08:29 +10:00
kref_put ( & sema - > ref , semaphore_free ) ;
2010-10-18 03:56:40 +02:00
out :
if ( chan )
nouveau_channel_put_unlocked ( & chan ) ;
2010-09-22 00:58:54 +02:00
return ret ;
2010-09-21 18:57:11 +02:00
}
2009-12-11 19:24:15 +10:00
int
2010-09-21 18:57:11 +02:00
nouveau_fence_channel_init ( struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2010-09-22 00:58:54 +02:00
struct drm_device * dev = chan - > dev ;
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-09-21 18:57:11 +02:00
struct nouveau_gpuobj * obj = NULL ;
int ret ;
2011-06-17 23:41:54 +10:00
if ( dev_priv - > card_type < NV_C0 ) {
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
2010-09-21 18:57:11 +02:00
2012-04-01 21:09:13 +10:00
BEGIN_NV04 ( chan , NvSubSw , NV01_SUBCHAN_OBJECT , 1 ) ;
2011-06-17 23:41:54 +10:00
OUT_RING ( chan , NvSw ) ;
FIRE_RING ( chan ) ;
}
2010-09-21 18:57:11 +02:00
2011-06-17 23:41:54 +10:00
/* Setup area of memory shared between all channels for x-chan sync */
2011-02-02 13:21:57 +10:00
if ( USE_SEMA ( dev ) & & dev_priv - > chipset < 0x84 ) {
2010-12-03 09:05:20 +10:00
struct ttm_mem_reg * mem = & dev_priv - > fence . bo - > bo . mem ;
2010-09-22 00:58:54 +02:00
2011-07-25 20:26:19 +10:00
ret = nouveau_gpuobj_dma_new ( chan , NV_CLASS_DMA_FROM_MEMORY ,
2010-09-22 00:58:54 +02:00
mem - > start < < PAGE_SHIFT ,
2010-12-03 09:05:20 +10:00
mem - > size , NV_MEM_ACCESS_RW ,
2010-11-16 11:50:09 +10:00
NV_MEM_TARGET_VRAM , & obj ) ;
2010-09-22 00:58:54 +02:00
if ( ret )
return ret ;
ret = nouveau_ramht_insert ( chan , NvSema , obj ) ;
nouveau_gpuobj_ref ( NULL , & obj ) ;
if ( ret )
return ret ;
2011-08-23 10:23:11 +10:00
} else
if ( USE_SEMA ( dev ) ) {
2011-06-07 15:21:23 +10:00
/* map fence bo into channel's vm */
ret = nouveau_bo_vma_add ( dev_priv - > fence . bo , chan - > vm ,
& chan - > fence . vma ) ;
if ( ret )
return ret ;
2010-09-22 00:58:54 +02:00
}
2010-05-31 12:00:43 +10:00
atomic_set ( & chan - > fence . last_sequence_irq , 0 ) ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
void
2010-09-21 18:57:11 +02:00
nouveau_fence_channel_fini ( struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2011-06-07 15:21:23 +10:00
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
2010-09-21 18:57:11 +02:00
struct nouveau_fence * tmp , * fence ;
2009-12-11 19:24:15 +10:00
2010-10-18 03:57:19 +02:00
spin_lock ( & chan - > fence . lock ) ;
2012-04-30 13:30:00 +10:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , head ) {
fence - > channel = NULL ;
list_del ( & fence - > head ) ;
2010-09-21 20:49:39 +02:00
if ( unlikely ( fence - > work ) )
fence - > work ( fence - > priv , false ) ;
2012-04-30 13:30:00 +10:00
kref_put ( & fence - > kref , nouveau_fence_del ) ;
2009-12-11 19:24:15 +10:00
}
2010-10-18 03:57:19 +02:00
spin_unlock ( & chan - > fence . lock ) ;
2011-06-07 15:21:23 +10:00
nouveau_bo_vma_del ( dev_priv - > fence . bo , & chan - > fence . vma ) ;
2009-12-11 19:24:15 +10:00
}
2010-09-22 00:58:54 +02:00
int
nouveau_fence_init ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2011-01-28 12:08:29 +10:00
int size = ( dev_priv - > chipset < 0x84 ) ? 4096 : 16384 ;
2010-09-22 00:58:54 +02:00
int ret ;
/* Create a shared VRAM heap for cross-channel sync. */
if ( USE_SEMA ( dev ) ) {
2011-06-07 14:21:29 +10:00
ret = nouveau_bo_new ( dev , size , 0 , TTM_PL_FLAG_VRAM ,
2012-04-02 11:53:06 +01:00
0 , 0 , NULL , & dev_priv - > fence . bo ) ;
2010-09-22 00:58:54 +02:00
if ( ret )
return ret ;
ret = nouveau_bo_pin ( dev_priv - > fence . bo , TTM_PL_FLAG_VRAM ) ;
if ( ret )
goto fail ;
ret = nouveau_bo_map ( dev_priv - > fence . bo ) ;
if ( ret )
goto fail ;
ret = drm_mm_init ( & dev_priv - > fence . heap , 0 ,
dev_priv - > fence . bo - > bo . mem . size ) ;
if ( ret )
goto fail ;
spin_lock_init ( & dev_priv - > fence . lock ) ;
}
return 0 ;
fail :
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
return ret ;
}
void
nouveau_fence_fini ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
if ( USE_SEMA ( dev ) ) {
drm_mm_takedown ( & dev_priv - > fence . heap ) ;
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_unpin ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
}
}