2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2007 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include "drmP.h"
# include "drm.h"
# include "nouveau_drv.h"
2010-09-22 00:58:54 +02:00
# include "nouveau_ramht.h"
2009-12-11 19:24:15 +10:00
# include "nouveau_dma.h"
2010-09-21 18:57:11 +02:00
# define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
2010-11-24 10:30:22 +10:00
# define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
nouveau_private ( dev ) - > card_type < NV_C0 )
2009-12-11 19:24:15 +10:00
struct nouveau_fence {
struct nouveau_channel * channel ;
struct kref refcount ;
struct list_head entry ;
uint32_t sequence ;
bool signalled ;
2010-09-21 20:49:39 +02:00
void ( * work ) ( void * priv , bool signalled ) ;
void * priv ;
2009-12-11 19:24:15 +10:00
} ;
2010-09-22 00:58:54 +02:00
struct nouveau_semaphore {
struct kref ref ;
struct drm_device * dev ;
struct drm_mm_node * mem ;
} ;
2009-12-11 19:24:15 +10:00
static inline struct nouveau_fence *
nouveau_fence ( void * sync_obj )
{
return ( struct nouveau_fence * ) sync_obj ;
}
static void
nouveau_fence_del ( struct kref * ref )
{
struct nouveau_fence * fence =
container_of ( ref , struct nouveau_fence , refcount ) ;
2010-10-18 03:56:14 +02:00
nouveau_channel_ref ( NULL , & fence - > channel ) ;
2009-12-11 19:24:15 +10:00
kfree ( fence ) ;
}
void
nouveau_fence_update ( struct nouveau_channel * chan )
{
2010-09-21 18:57:11 +02:00
struct drm_device * dev = chan - > dev ;
struct nouveau_fence * tmp , * fence ;
2009-12-11 19:24:15 +10:00
uint32_t sequence ;
2010-08-28 17:56:33 +02:00
spin_lock ( & chan - > fence . lock ) ;
2010-12-08 02:35:45 +01:00
/* Fetch the last sequence if the channel is still up and running */
if ( likely ( ! list_empty ( & chan - > fence . pending ) ) ) {
if ( USE_REFCNT ( dev ) )
sequence = nvchan_rd32 ( chan , 0x48 ) ;
else
sequence = atomic_read ( & chan - > fence . last_sequence_irq ) ;
if ( chan - > fence . sequence_ack = = sequence )
goto out ;
chan - > fence . sequence_ack = sequence ;
}
2009-12-11 19:24:15 +10:00
2010-09-21 18:57:11 +02:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , entry ) {
2009-12-11 19:24:15 +10:00
sequence = fence - > sequence ;
fence - > signalled = true ;
list_del ( & fence - > entry ) ;
2010-09-21 20:49:39 +02:00
if ( unlikely ( fence - > work ) )
fence - > work ( fence - > priv , true ) ;
2009-12-11 19:24:15 +10:00
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
if ( sequence = = chan - > fence . sequence_ack )
break ;
}
2010-08-28 17:56:33 +02:00
out :
2010-05-31 12:00:43 +10:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_fence_new ( struct nouveau_channel * chan , struct nouveau_fence * * pfence ,
bool emit )
{
struct nouveau_fence * fence ;
int ret = 0 ;
fence = kzalloc ( sizeof ( * fence ) , GFP_KERNEL ) ;
if ( ! fence )
return - ENOMEM ;
kref_init ( & fence - > refcount ) ;
2010-10-18 03:56:14 +02:00
nouveau_channel_ref ( chan , & fence - > channel ) ;
2009-12-11 19:24:15 +10:00
if ( emit )
ret = nouveau_fence_emit ( fence ) ;
if ( ret )
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2009-12-11 19:24:15 +10:00
* pfence = fence ;
return ret ;
}
struct nouveau_channel *
nouveau_fence_channel ( struct nouveau_fence * fence )
{
2010-10-18 03:56:40 +02:00
return fence ? nouveau_channel_get_unlocked ( fence - > channel ) : NULL ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_fence_emit ( struct nouveau_fence * fence )
{
struct nouveau_channel * chan = fence - > channel ;
2010-09-21 18:57:11 +02:00
struct drm_device * dev = chan - > dev ;
2010-11-24 10:30:22 +10:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 19:24:15 +10:00
int ret ;
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
if ( unlikely ( chan - > fence . sequence = = chan - > fence . sequence_ack - 1 ) ) {
nouveau_fence_update ( chan ) ;
BUG_ON ( chan - > fence . sequence = =
chan - > fence . sequence_ack - 1 ) ;
}
fence - > sequence = + + chan - > fence . sequence ;
kref_get ( & fence - > refcount ) ;
2010-05-31 12:00:43 +10:00
spin_lock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
list_add_tail ( & fence - > entry , & chan - > fence . pending ) ;
2010-05-31 12:00:43 +10:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
2010-11-24 10:30:22 +10:00
if ( USE_REFCNT ( dev ) ) {
if ( dev_priv - > card_type < NV_C0 )
BEGIN_RING ( chan , NvSubSw , 0x0050 , 1 ) ;
else
2010-12-30 11:53:48 +10:00
BEGIN_NVC0 ( chan , 2 , NvSubM2MF , 0x0050 , 1 ) ;
2010-11-24 10:30:22 +10:00
} else {
BEGIN_RING ( chan , NvSubSw , 0x0150 , 1 ) ;
}
OUT_RING ( chan , fence - > sequence ) ;
2009-12-11 19:24:15 +10:00
FIRE_RING ( chan ) ;
return 0 ;
}
2010-09-21 20:49:39 +02:00
void
nouveau_fence_work ( struct nouveau_fence * fence ,
void ( * work ) ( void * priv , bool signalled ) ,
void * priv )
{
BUG_ON ( fence - > work ) ;
spin_lock ( & fence - > channel - > fence . lock ) ;
if ( fence - > signalled ) {
work ( priv , true ) ;
} else {
fence - > work = work ;
fence - > priv = priv ;
}
spin_unlock ( & fence - > channel - > fence . lock ) ;
}
2009-12-11 19:24:15 +10:00
void
2010-10-20 21:50:24 +02:00
__nouveau_fence_unref ( void * * sync_obj )
2009-12-11 19:24:15 +10:00
{
struct nouveau_fence * fence = nouveau_fence ( * sync_obj ) ;
if ( fence )
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
* sync_obj = NULL ;
}
void *
2010-10-20 21:50:24 +02:00
__nouveau_fence_ref ( void * sync_obj )
2009-12-11 19:24:15 +10:00
{
struct nouveau_fence * fence = nouveau_fence ( sync_obj ) ;
kref_get ( & fence - > refcount ) ;
return sync_obj ;
}
bool
2010-10-20 21:50:24 +02:00
__nouveau_fence_signalled ( void * sync_obj , void * sync_arg )
2009-12-11 19:24:15 +10:00
{
struct nouveau_fence * fence = nouveau_fence ( sync_obj ) ;
struct nouveau_channel * chan = fence - > channel ;
if ( fence - > signalled )
return true ;
nouveau_fence_update ( chan ) ;
return fence - > signalled ;
}
int
2010-10-20 21:50:24 +02:00
__nouveau_fence_wait ( void * sync_obj , void * sync_arg , bool lazy , bool intr )
2009-12-11 19:24:15 +10:00
{
unsigned long timeout = jiffies + ( 3 * DRM_HZ ) ;
2010-11-25 16:37:17 +01:00
unsigned long sleep_time = jiffies + 1 ;
2009-12-11 19:24:15 +10:00
int ret = 0 ;
while ( 1 ) {
2010-10-20 21:50:24 +02:00
if ( __nouveau_fence_signalled ( sync_obj , sync_arg ) )
2009-12-11 19:24:15 +10:00
break ;
if ( time_after_eq ( jiffies , timeout ) ) {
ret = - EBUSY ;
break ;
}
2010-07-26 12:23:54 +04:00
__set_current_state ( intr ? TASK_INTERRUPTIBLE
: TASK_UNINTERRUPTIBLE ) ;
2010-11-25 16:37:17 +01:00
if ( lazy & & time_after_eq ( jiffies , sleep_time ) )
2009-12-11 19:24:15 +10:00
schedule_timeout ( 1 ) ;
if ( intr & & signal_pending ( current ) ) {
2009-12-15 11:04:25 +10:00
ret = - ERESTARTSYS ;
2009-12-11 19:24:15 +10:00
break ;
}
}
__set_current_state ( TASK_RUNNING ) ;
return ret ;
}
2010-09-22 00:58:54 +02:00
static struct nouveau_semaphore *
alloc_semaphore ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_semaphore * sema ;
2010-10-09 04:02:09 +02:00
int ret ;
2010-09-22 00:58:54 +02:00
if ( ! USE_SEMA ( dev ) )
return NULL ;
sema = kmalloc ( sizeof ( * sema ) , GFP_KERNEL ) ;
if ( ! sema )
goto fail ;
2010-10-09 04:02:09 +02:00
ret = drm_mm_pre_get ( & dev_priv - > fence . heap ) ;
if ( ret )
goto fail ;
2010-09-22 00:58:54 +02:00
spin_lock ( & dev_priv - > fence . lock ) ;
sema - > mem = drm_mm_search_free ( & dev_priv - > fence . heap , 4 , 0 , 0 ) ;
if ( sema - > mem )
2010-10-09 04:02:09 +02:00
sema - > mem = drm_mm_get_block_atomic ( sema - > mem , 4 , 0 ) ;
2010-09-22 00:58:54 +02:00
spin_unlock ( & dev_priv - > fence . lock ) ;
if ( ! sema - > mem )
goto fail ;
kref_init ( & sema - > ref ) ;
sema - > dev = dev ;
nouveau_bo_wr32 ( dev_priv - > fence . bo , sema - > mem - > start / 4 , 0 ) ;
return sema ;
fail :
kfree ( sema ) ;
return NULL ;
}
static void
free_semaphore ( struct kref * ref )
{
struct nouveau_semaphore * sema =
container_of ( ref , struct nouveau_semaphore , ref ) ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
spin_lock ( & dev_priv - > fence . lock ) ;
drm_mm_put_block ( sema - > mem ) ;
spin_unlock ( & dev_priv - > fence . lock ) ;
kfree ( sema ) ;
}
static void
semaphore_work ( void * priv , bool signalled )
{
struct nouveau_semaphore * sema = priv ;
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
if ( unlikely ( ! signalled ) )
nouveau_bo_wr32 ( dev_priv - > fence . bo , sema - > mem - > start / 4 , 1 ) ;
kref_put ( & sema - > ref , free_semaphore ) ;
}
static int
emit_semaphore ( struct nouveau_channel * chan , int method ,
struct nouveau_semaphore * sema )
{
struct drm_nouveau_private * dev_priv = sema - > dev - > dev_private ;
struct nouveau_fence * fence ;
2010-10-02 17:04:46 +02:00
bool smart = ( dev_priv - > card_type > = NV_50 ) ;
2010-09-22 00:58:54 +02:00
int ret ;
2010-10-02 17:04:46 +02:00
ret = RING_SPACE ( chan , smart ? 8 : 4 ) ;
2010-09-22 00:58:54 +02:00
if ( ret )
return ret ;
2010-10-02 17:04:46 +02:00
if ( smart ) {
2010-09-22 00:58:54 +02:00
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 1 ) ;
OUT_RING ( chan , NvSema ) ;
}
BEGIN_RING ( chan , NvSubSw , NV_SW_SEMAPHORE_OFFSET , 1 ) ;
OUT_RING ( chan , sema - > mem - > start ) ;
2010-10-02 17:04:46 +02:00
if ( smart & & method = = NV_SW_SEMAPHORE_ACQUIRE ) {
/*
* NV50 tries to be too smart and context - switch
* between semaphores instead of doing a " first come,
* first served " strategy like previous cards
* do .
*
* That ' s bad because the ACQUIRE latency can get as
* large as the PFIFO context time slice in the
* typical DRI2 case where you have several
* outstanding semaphores at the same moment .
*
* If we ' re going to ACQUIRE , force the card to
* context switch before , just in case the matching
* RELEASE is already scheduled to be executed in
* another channel .
*/
BEGIN_RING ( chan , NvSubSw , NV_SW_YIELD , 1 ) ;
OUT_RING ( chan , 0 ) ;
}
2010-09-22 00:58:54 +02:00
BEGIN_RING ( chan , NvSubSw , method , 1 ) ;
OUT_RING ( chan , 1 ) ;
2010-10-02 17:04:46 +02:00
if ( smart & & method = = NV_SW_SEMAPHORE_RELEASE ) {
/*
* Force the card to context switch , there may be
* another channel waiting for the semaphore we just
* released .
*/
BEGIN_RING ( chan , NvSubSw , NV_SW_YIELD , 1 ) ;
OUT_RING ( chan , 0 ) ;
}
2010-09-22 00:58:54 +02:00
/* Delay semaphore destruction until its work is done */
ret = nouveau_fence_new ( chan , & fence , true ) ;
if ( ret )
return ret ;
kref_get ( & sema - > ref ) ;
nouveau_fence_work ( fence , semaphore_work , sema ) ;
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2010-09-22 00:58:54 +02:00
return 0 ;
}
2010-09-21 18:57:11 +02:00
int
nouveau_fence_sync ( struct nouveau_fence * fence ,
struct nouveau_channel * wchan )
{
struct nouveau_channel * chan = nouveau_fence_channel ( fence ) ;
2010-09-22 00:58:54 +02:00
struct drm_device * dev = wchan - > dev ;
struct nouveau_semaphore * sema ;
2010-10-18 03:56:40 +02:00
int ret = 0 ;
2010-09-21 18:57:11 +02:00
2010-10-18 03:56:40 +02:00
if ( likely ( ! chan | | chan = = wchan | |
2010-10-20 21:50:24 +02:00
nouveau_fence_signalled ( fence ) ) )
2010-10-18 03:56:40 +02:00
goto out ;
2010-09-21 18:57:11 +02:00
2010-09-22 00:58:54 +02:00
sema = alloc_semaphore ( dev ) ;
if ( ! sema ) {
/* Early card or broken userspace, fall back to
* software sync . */
2010-10-20 21:50:24 +02:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 03:56:40 +02:00
goto out ;
2010-09-22 00:58:54 +02:00
}
2010-10-12 08:01:59 +10:00
/* try to take chan's mutex, if we can't take it right away
2010-10-06 16:16:59 +10:00
* we have to fallback to software sync to prevent locking
* order issues
*/
2010-10-12 08:01:59 +10:00
if ( ! mutex_trylock ( & chan - > mutex ) ) {
2010-10-20 21:50:24 +02:00
ret = nouveau_fence_wait ( fence , true , false ) ;
2010-10-18 03:56:40 +02:00
goto out_unref ;
2010-10-06 16:16:59 +10:00
}
2010-09-22 00:58:54 +02:00
/* Make wchan wait until it gets signalled */
ret = emit_semaphore ( wchan , NV_SW_SEMAPHORE_ACQUIRE , sema ) ;
2010-10-02 17:04:46 +02:00
if ( ret )
2010-10-18 03:56:40 +02:00
goto out_unlock ;
2010-09-22 00:58:54 +02:00
2010-10-02 17:04:46 +02:00
/* Signal the semaphore from chan */
ret = emit_semaphore ( chan , NV_SW_SEMAPHORE_RELEASE , sema ) ;
2010-10-18 03:56:40 +02:00
out_unlock :
2010-10-12 08:01:59 +10:00
mutex_unlock ( & chan - > mutex ) ;
2010-10-18 03:56:40 +02:00
out_unref :
2010-09-22 00:58:54 +02:00
kref_put ( & sema - > ref , free_semaphore ) ;
2010-10-18 03:56:40 +02:00
out :
if ( chan )
nouveau_channel_put_unlocked ( & chan ) ;
2010-09-22 00:58:54 +02:00
return ret ;
2010-09-21 18:57:11 +02:00
}
2009-12-11 19:24:15 +10:00
int
2010-10-20 21:50:24 +02:00
__nouveau_fence_flush ( void * sync_obj , void * sync_arg )
2009-12-11 19:24:15 +10:00
{
return 0 ;
}
int
2010-09-21 18:57:11 +02:00
nouveau_fence_channel_init ( struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2010-09-22 00:58:54 +02:00
struct drm_device * dev = chan - > dev ;
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-09-21 18:57:11 +02:00
struct nouveau_gpuobj * obj = NULL ;
int ret ;
/* Create an NV_SW object for various sync purposes */
2010-11-23 10:10:24 +10:00
ret = nouveau_gpuobj_gr_new ( chan , NvSw , NV_SW ) ;
2010-09-21 18:57:11 +02:00
if ( ret )
return ret ;
2010-11-24 10:30:22 +10:00
/* we leave subchannel empty for nvc0 */
if ( dev_priv - > card_type < NV_C0 ) {
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
BEGIN_RING ( chan , NvSubSw , 0 , 1 ) ;
OUT_RING ( chan , NvSw ) ;
}
2010-09-21 18:57:11 +02:00
2010-09-22 00:58:54 +02:00
/* Create a DMA object for the shared cross-channel sync area. */
if ( USE_SEMA ( dev ) ) {
2010-12-03 09:05:20 +10:00
struct ttm_mem_reg * mem = & dev_priv - > fence . bo - > bo . mem ;
2010-09-22 00:58:54 +02:00
ret = nouveau_gpuobj_dma_new ( chan , NV_CLASS_DMA_IN_MEMORY ,
mem - > start < < PAGE_SHIFT ,
2010-12-03 09:05:20 +10:00
mem - > size , NV_MEM_ACCESS_RW ,
2010-11-16 11:50:09 +10:00
NV_MEM_TARGET_VRAM , & obj ) ;
2010-09-22 00:58:54 +02:00
if ( ret )
return ret ;
ret = nouveau_ramht_insert ( chan , NvSema , obj ) ;
nouveau_gpuobj_ref ( NULL , & obj ) ;
if ( ret )
return ret ;
ret = RING_SPACE ( chan , 2 ) ;
if ( ret )
return ret ;
BEGIN_RING ( chan , NvSubSw , NV_SW_DMA_SEMAPHORE , 1 ) ;
OUT_RING ( chan , NvSema ) ;
}
2010-09-21 18:57:11 +02:00
FIRE_RING ( chan ) ;
2009-12-11 19:24:15 +10:00
INIT_LIST_HEAD ( & chan - > fence . pending ) ;
spin_lock_init ( & chan - > fence . lock ) ;
2010-05-31 12:00:43 +10:00
atomic_set ( & chan - > fence . last_sequence_irq , 0 ) ;
2010-09-21 18:57:11 +02:00
2009-12-11 19:24:15 +10:00
return 0 ;
}
void
2010-09-21 18:57:11 +02:00
nouveau_fence_channel_fini ( struct nouveau_channel * chan )
2009-12-11 19:24:15 +10:00
{
2010-09-21 18:57:11 +02:00
struct nouveau_fence * tmp , * fence ;
2009-12-11 19:24:15 +10:00
2010-10-18 03:57:19 +02:00
spin_lock ( & chan - > fence . lock ) ;
2010-09-21 18:57:11 +02:00
list_for_each_entry_safe ( fence , tmp , & chan - > fence . pending , entry ) {
2009-12-11 19:24:15 +10:00
fence - > signalled = true ;
list_del ( & fence - > entry ) ;
2010-09-21 20:49:39 +02:00
if ( unlikely ( fence - > work ) )
fence - > work ( fence - > priv , false ) ;
2009-12-11 19:24:15 +10:00
kref_put ( & fence - > refcount , nouveau_fence_del ) ;
}
2010-10-18 03:57:19 +02:00
spin_unlock ( & chan - > fence . lock ) ;
2009-12-11 19:24:15 +10:00
}
2010-09-22 00:58:54 +02:00
int
nouveau_fence_init ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
int ret ;
/* Create a shared VRAM heap for cross-channel sync. */
if ( USE_SEMA ( dev ) ) {
ret = nouveau_bo_new ( dev , NULL , 4096 , 0 , TTM_PL_FLAG_VRAM ,
0 , 0 , false , true , & dev_priv - > fence . bo ) ;
if ( ret )
return ret ;
ret = nouveau_bo_pin ( dev_priv - > fence . bo , TTM_PL_FLAG_VRAM ) ;
if ( ret )
goto fail ;
ret = nouveau_bo_map ( dev_priv - > fence . bo ) ;
if ( ret )
goto fail ;
ret = drm_mm_init ( & dev_priv - > fence . heap , 0 ,
dev_priv - > fence . bo - > bo . mem . size ) ;
if ( ret )
goto fail ;
spin_lock_init ( & dev_priv - > fence . lock ) ;
}
return 0 ;
fail :
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
return ret ;
}
void
nouveau_fence_fini ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
if ( USE_SEMA ( dev ) ) {
drm_mm_takedown ( & dev_priv - > fence . heap ) ;
nouveau_bo_unmap ( dev_priv - > fence . bo ) ;
nouveau_bo_unpin ( dev_priv - > fence . bo ) ;
nouveau_bo_ref ( NULL , & dev_priv - > fence . bo ) ;
}
}