2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2008 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include "drmP.h"
# include "drm.h"
# include "nouveau_drv.h"
# include "nouveau_drm.h"
# include "nouveau_dma.h"
# define nouveau_gem_pushbuf_sync(chan) 0
int
nouveau_gem_object_new ( struct drm_gem_object * gem )
{
return 0 ;
}
void
nouveau_gem_object_del ( struct drm_gem_object * gem )
{
struct nouveau_bo * nvbo = gem - > driver_private ;
struct ttm_buffer_object * bo = & nvbo - > bo ;
if ( ! nvbo )
return ;
nvbo - > gem = NULL ;
if ( unlikely ( nvbo - > pin_refcnt ) ) {
nvbo - > pin_refcnt = 1 ;
nouveau_bo_unpin ( nvbo ) ;
}
ttm_bo_unref ( & bo ) ;
2010-04-09 19:05:05 +00:00
drm_gem_object_release ( gem ) ;
kfree ( gem ) ;
2009-12-11 19:24:15 +10:00
}
2011-06-03 16:18:26 +10:00
int
nouveau_gem_object_open ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
struct nouveau_fpriv * fpriv = nouveau_fpriv ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
struct nouveau_vma * vma ;
int ret ;
2011-06-03 16:18:26 +10:00
if ( ! fpriv - > vm )
return 0 ;
2011-06-07 15:25:12 +10:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , false , 0 ) ;
if ( ret )
return ret ;
vma = nouveau_bo_vma_find ( nvbo , fpriv - > vm ) ;
if ( ! vma ) {
vma = kzalloc ( sizeof ( * vma ) , GFP_KERNEL ) ;
if ( ! vma ) {
ret = - ENOMEM ;
goto out ;
}
ret = nouveau_bo_vma_add ( nvbo , fpriv - > vm , vma ) ;
if ( ret ) {
kfree ( vma ) ;
goto out ;
}
} else {
vma - > refcount + + ;
}
out :
ttm_bo_unreserve ( & nvbo - > bo ) ;
return ret ;
2011-06-03 16:18:26 +10:00
}
void
nouveau_gem_object_close ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
struct nouveau_fpriv * fpriv = nouveau_fpriv ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
struct nouveau_vma * vma ;
int ret ;
2011-06-03 16:18:26 +10:00
if ( ! fpriv - > vm )
return ;
2011-06-07 15:25:12 +10:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , false , 0 ) ;
if ( ret )
return ;
vma = nouveau_bo_vma_find ( nvbo , fpriv - > vm ) ;
if ( vma ) {
2011-06-23 16:34:30 +02:00
if ( - - vma - > refcount = = 0 ) {
2011-06-07 15:25:12 +10:00
nouveau_bo_vma_del ( nvbo , vma ) ;
2011-06-23 16:34:30 +02:00
kfree ( vma ) ;
}
2011-06-07 15:25:12 +10:00
}
ttm_bo_unreserve ( & nvbo - > bo ) ;
2011-06-03 16:18:26 +10:00
}
2009-12-11 19:24:15 +10:00
int
2011-06-07 12:25:36 +10:00
nouveau_gem_new ( struct drm_device * dev , int size , int align , uint32_t domain ,
uint32_t tile_mode , uint32_t tile_flags ,
struct nouveau_bo * * pnvbo )
2009-12-11 19:24:15 +10:00
{
2011-02-10 13:41:01 +10:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo ;
2011-02-10 14:42:08 +10:00
u32 flags = 0 ;
2009-12-11 19:24:15 +10:00
int ret ;
2011-02-10 14:42:08 +10:00
if ( domain & NOUVEAU_GEM_DOMAIN_VRAM )
flags | = TTM_PL_FLAG_VRAM ;
if ( domain & NOUVEAU_GEM_DOMAIN_GART )
flags | = TTM_PL_FLAG_TT ;
if ( ! flags | | domain & NOUVEAU_GEM_DOMAIN_CPU )
flags | = TTM_PL_FLAG_SYSTEM ;
2011-06-07 14:21:29 +10:00
ret = nouveau_bo_new ( dev , size , align , flags , tile_mode ,
2011-02-16 08:41:56 +10:00
tile_flags , pnvbo ) ;
2009-12-11 19:24:15 +10:00
if ( ret )
return ret ;
nvbo = * pnvbo ;
2011-02-10 13:41:01 +10:00
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time . not possibly on
* earlier chips without busting the ABI .
*/
nvbo - > valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART ;
if ( dev_priv - > card_type > = NV_50 )
nvbo - > valid_domains & = domain ;
2009-12-11 19:24:15 +10:00
nvbo - > gem = drm_gem_object_alloc ( dev , nvbo - > bo . mem . size ) ;
if ( ! nvbo - > gem ) {
nouveau_bo_ref ( NULL , pnvbo ) ;
return - ENOMEM ;
}
2011-04-04 01:25:18 +02:00
nvbo - > bo . persistent_swap_storage = nvbo - > gem - > filp ;
2009-12-11 19:24:15 +10:00
nvbo - > gem - > driver_private = nvbo ;
return 0 ;
}
static int
2011-06-07 13:28:11 +10:00
nouveau_gem_info ( struct drm_file * file_priv , struct drm_gem_object * gem ,
struct drm_nouveau_gem_info * rep )
2009-12-11 19:24:15 +10:00
{
2011-06-07 13:28:11 +10:00
struct nouveau_fpriv * fpriv = nouveau_fpriv ( file_priv ) ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2011-06-07 13:28:11 +10:00
struct nouveau_vma * vma ;
2009-12-11 19:24:15 +10:00
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
rep - > domain = NOUVEAU_GEM_DOMAIN_GART ;
else
rep - > domain = NOUVEAU_GEM_DOMAIN_VRAM ;
2011-06-07 13:28:11 +10:00
rep - > offset = nvbo - > bo . offset ;
if ( fpriv - > vm ) {
vma = nouveau_bo_vma_find ( nvbo , fpriv - > vm ) ;
if ( ! vma )
return - EINVAL ;
rep - > offset = vma - > offset ;
}
2009-12-11 19:24:15 +10:00
rep - > size = nvbo - > bo . mem . num_pages < < PAGE_SHIFT ;
2011-02-16 08:41:56 +10:00
rep - > map_handle = nvbo - > bo . addr_space_offset ;
2009-12-11 19:24:15 +10:00
rep - > tile_mode = nvbo - > tile_mode ;
rep - > tile_flags = nvbo - > tile_flags ;
return 0 ;
}
int
nouveau_gem_ioctl_new ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct drm_nouveau_gem_new * req = data ;
struct nouveau_bo * nvbo = NULL ;
int ret = 0 ;
if ( unlikely ( dev_priv - > ttm . bdev . dev_mapping = = NULL ) )
dev_priv - > ttm . bdev . dev_mapping = dev_priv - > dev - > dev_mapping ;
2010-12-06 15:28:54 +10:00
if ( ! dev_priv - > engine . vram . flags_valid ( dev , req - > info . tile_flags ) ) {
NV_ERROR ( dev , " bad page flags: 0x%08x \n " , req - > info . tile_flags ) ;
2009-12-11 19:24:15 +10:00
return - EINVAL ;
2010-12-06 15:28:54 +10:00
}
2009-12-11 19:24:15 +10:00
2011-06-07 12:25:36 +10:00
ret = nouveau_gem_new ( dev , req - > info . size , req - > align ,
2011-02-10 14:42:08 +10:00
req - > info . domain , req - > info . tile_mode ,
req - > info . tile_flags , & nvbo ) ;
2009-12-11 19:24:15 +10:00
if ( ret )
return ret ;
ret = drm_gem_handle_create ( file_priv , nvbo - > gem , & req - > info . handle ) ;
2011-06-07 13:28:11 +10:00
if ( ret = = 0 ) {
ret = nouveau_gem_info ( file_priv , nvbo - > gem , & req - > info ) ;
if ( ret )
drm_gem_handle_delete ( file_priv , req - > info . handle ) ;
}
2010-09-27 16:17:17 +10:00
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( nvbo - > gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
static int
nouveau_gem_set_domain ( struct drm_gem_object * gem , uint32_t read_domains ,
uint32_t write_domains , uint32_t valid_domains )
{
struct nouveau_bo * nvbo = gem - > driver_private ;
struct ttm_buffer_object * bo = & nvbo - > bo ;
2011-02-10 13:41:01 +10:00
uint32_t domains = valid_domains & nvbo - > valid_domains &
2010-03-18 13:07:47 +01:00
( write_domains ? write_domains : read_domains ) ;
uint32_t pref_flags = 0 , valid_flags = 0 ;
2009-12-11 19:24:15 +10:00
2010-03-18 13:07:47 +01:00
if ( ! domains )
2009-12-11 19:24:15 +10:00
return - EINVAL ;
2010-03-18 13:07:47 +01:00
if ( valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
valid_flags | = TTM_PL_FLAG_VRAM ;
if ( valid_domains & NOUVEAU_GEM_DOMAIN_GART )
valid_flags | = TTM_PL_FLAG_TT ;
if ( ( domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
bo - > mem . mem_type = = TTM_PL_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else if ( ( domains & NOUVEAU_GEM_DOMAIN_GART ) & &
bo - > mem . mem_type = = TTM_PL_TT )
pref_flags | = TTM_PL_FLAG_TT ;
else if ( domains & NOUVEAU_GEM_DOMAIN_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else
pref_flags | = TTM_PL_FLAG_TT ;
nouveau_bo_placement_set ( nvbo , pref_flags , valid_flags ) ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
struct validate_op {
struct list_head vram_list ;
struct list_head gart_list ;
struct list_head both_list ;
} ;
static void
validate_fini_list ( struct list_head * list , struct nouveau_fence * fence )
{
struct list_head * entry , * tmp ;
struct nouveau_bo * nvbo ;
list_for_each_safe ( entry , tmp , list ) {
nvbo = list_entry ( entry , struct nouveau_bo , entry ) ;
2010-10-20 23:35:40 +02:00
nouveau_bo_fence ( nvbo , fence ) ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
if ( unlikely ( nvbo - > validate_mapped ) ) {
ttm_bo_kunmap ( & nvbo - > kmap ) ;
nvbo - > validate_mapped = false ;
}
2009-12-11 19:24:15 +10:00
list_del ( & nvbo - > entry ) ;
nvbo - > reserved_by = NULL ;
ttm_bo_unreserve ( & nvbo - > bo ) ;
2010-08-29 12:21:16 +02:00
drm_gem_object_unreference_unlocked ( nvbo - > gem ) ;
2009-12-11 19:24:15 +10:00
}
}
static void
2010-01-06 04:02:45 +01:00
validate_fini ( struct validate_op * op , struct nouveau_fence * fence )
2009-12-11 19:24:15 +10:00
{
2010-01-06 04:02:45 +01:00
validate_fini_list ( & op - > vram_list , fence ) ;
validate_fini_list ( & op - > gart_list , fence ) ;
validate_fini_list ( & op - > both_list , fence ) ;
2009-12-11 19:24:15 +10:00
}
static int
validate_init ( struct nouveau_channel * chan , struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
int nr_buffers , struct validate_op * op )
{
struct drm_device * dev = chan - > dev ;
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
uint32_t sequence ;
int trycnt = 0 ;
int ret , i ;
sequence = atomic_add_return ( 1 , & dev_priv - > ttm . validate_sequence ) ;
retry :
if ( + + trycnt > 100000 ) {
NV_ERROR ( dev , " %s failed and gave up. \n " , __func__ ) ;
return - EINVAL ;
}
for ( i = 0 ; i < nr_buffers ; i + + ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ i ] ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
gem = drm_gem_object_lookup ( dev , file_priv , b - > handle ) ;
if ( ! gem ) {
NV_ERROR ( dev , " Unknown handle 0x%08x \n " , b - > handle ) ;
validate_fini ( op , NULL ) ;
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
}
nvbo = gem - > driver_private ;
if ( nvbo - > reserved_by & & nvbo - > reserved_by = = file_priv ) {
NV_ERROR ( dev , " multiple instances of buffer %d on "
" validation list \n " , b - > handle ) ;
validate_fini ( op , NULL ) ;
return - EINVAL ;
}
2010-10-12 09:54:54 +10:00
ret = ttm_bo_reserve ( & nvbo - > bo , true , false , true , sequence ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
validate_fini ( op , NULL ) ;
2010-10-12 09:54:54 +10:00
if ( unlikely ( ret = = - EAGAIN ) )
ret = ttm_bo_wait_unreserved ( & nvbo - > bo , true ) ;
2010-08-29 12:21:16 +02:00
drm_gem_object_unreference_unlocked ( gem ) ;
2010-10-12 09:54:54 +10:00
if ( unlikely ( ret ) ) {
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " fail reserve \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
goto retry ;
}
2010-02-12 10:27:35 +10:00
b - > user_priv = ( uint64_t ) ( unsigned long ) nvbo ;
2009-12-11 19:24:15 +10:00
nvbo - > reserved_by = file_priv ;
nvbo - > pbbo_index = i ;
if ( ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART ) )
list_add_tail ( & nvbo - > entry , & op - > both_list ) ;
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
list_add_tail ( & nvbo - > entry , & op - > vram_list ) ;
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART )
list_add_tail ( & nvbo - > entry , & op - > gart_list ) ;
else {
NV_ERROR ( dev , " invalid valid domains: 0x%08x \n " ,
b - > valid_domains ) ;
2010-01-21 15:03:23 +10:00
list_add_tail ( & nvbo - > entry , & op - > both_list ) ;
2009-12-11 19:24:15 +10:00
validate_fini ( op , NULL ) ;
return - EINVAL ;
}
}
return 0 ;
}
2012-01-10 10:18:28 +10:00
static int
validate_sync ( struct nouveau_channel * chan , struct nouveau_bo * nvbo )
{
struct nouveau_fence * fence = NULL ;
int ret = 0 ;
spin_lock ( & nvbo - > bo . bdev - > fence_lock ) ;
if ( nvbo - > bo . sync_obj )
fence = nouveau_fence_ref ( nvbo - > bo . sync_obj ) ;
spin_unlock ( & nvbo - > bo . bdev - > fence_lock ) ;
if ( fence ) {
ret = nouveau_fence_sync ( fence , chan ) ;
nouveau_fence_unref ( & fence ) ;
}
return ret ;
}
2009-12-11 19:24:15 +10:00
static int
validate_list ( struct nouveau_channel * chan , struct list_head * list ,
struct drm_nouveau_gem_pushbuf_bo * pbbo , uint64_t user_pbbo_ptr )
{
2011-06-07 11:12:39 +10:00
struct drm_nouveau_private * dev_priv = chan - > dev - > dev_private ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf_bo __user * upbbo =
( void __force __user * ) ( uintptr_t ) user_pbbo_ptr ;
2010-02-12 10:27:35 +10:00
struct drm_device * dev = chan - > dev ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo ;
int ret , relocs = 0 ;
list_for_each_entry ( nvbo , list , entry ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ nvbo - > pbbo_index ] ;
2012-01-10 10:18:28 +10:00
ret = validate_sync ( chan , nvbo ) ;
2010-07-23 09:06:52 +10:00
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail pre-validate sync \n " ) ;
return ret ;
2009-12-11 19:24:15 +10:00
}
ret = nouveau_gem_set_domain ( nvbo - > gem , b - > read_domains ,
b - > write_domains ,
b - > valid_domains ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail set_domain \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2010-07-23 09:06:52 +10:00
nvbo - > channel = ( b - > read_domains & ( 1 < < 31 ) ) ? NULL : chan ;
2010-11-22 08:50:27 +10:00
ret = nouveau_bo_validate ( nvbo , true , false , false ) ;
2009-12-11 19:24:15 +10:00
nvbo - > channel = NULL ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " fail ttm_validate \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2012-01-10 10:18:28 +10:00
ret = validate_sync ( chan , nvbo ) ;
2010-07-23 09:06:52 +10:00
if ( unlikely ( ret ) ) {
NV_ERROR ( dev , " fail post-validate sync \n " ) ;
return ret ;
}
2011-06-07 11:12:39 +10:00
if ( dev_priv - > card_type < NV_50 ) {
if ( nvbo - > bo . offset = = b - > presumed . offset & &
( ( nvbo - > bo . mem . mem_type = = TTM_PL_VRAM & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_VRAM ) | |
( nvbo - > bo . mem . mem_type = = TTM_PL_TT & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_GART ) ) )
continue ;
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_GART ;
else
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_VRAM ;
b - > presumed . offset = nvbo - > bo . offset ;
b - > presumed . valid = 0 ;
relocs + + ;
if ( DRM_COPY_TO_USER ( & upbbo [ nvbo - > pbbo_index ] . presumed ,
& b - > presumed , sizeof ( b - > presumed ) ) )
return - EFAULT ;
}
2009-12-11 19:24:15 +10:00
}
return relocs ;
}
static int
nouveau_gem_pushbuf_validate ( struct nouveau_channel * chan ,
struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
uint64_t user_buffers , int nr_buffers ,
struct validate_op * op , int * apply_relocs )
{
2010-02-12 10:27:35 +10:00
struct drm_device * dev = chan - > dev ;
2009-12-11 19:24:15 +10:00
int ret , relocs = 0 ;
INIT_LIST_HEAD ( & op - > vram_list ) ;
INIT_LIST_HEAD ( & op - > gart_list ) ;
INIT_LIST_HEAD ( & op - > both_list ) ;
if ( nr_buffers = = 0 )
return 0 ;
ret = validate_init ( chan , file_priv , pbbo , nr_buffers , op ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " validate_init \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
ret = validate_list ( chan , & op - > vram_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " validate vram_list \n " ) ;
2009-12-11 19:24:15 +10:00
validate_fini ( op , NULL ) ;
return ret ;
}
relocs + = ret ;
ret = validate_list ( chan , & op - > gart_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " validate gart_list \n " ) ;
2009-12-11 19:24:15 +10:00
validate_fini ( op , NULL ) ;
return ret ;
}
relocs + = ret ;
ret = validate_list ( chan , & op - > both_list , pbbo , user_buffers ) ;
if ( unlikely ( ret < 0 ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " validate both_list \n " ) ;
2009-12-11 19:24:15 +10:00
validate_fini ( op , NULL ) ;
return ret ;
}
relocs + = ret ;
* apply_relocs = relocs ;
return 0 ;
}
static inline void *
u_memcpya ( uint64_t user , unsigned nmemb , unsigned size )
{
void * mem ;
void __user * userptr = ( void __force __user * ) ( uintptr_t ) user ;
mem = kmalloc ( nmemb * size , GFP_KERNEL ) ;
if ( ! mem )
return ERR_PTR ( - ENOMEM ) ;
if ( DRM_COPY_FROM_USER ( mem , userptr , nmemb * size ) ) {
kfree ( mem ) ;
return ERR_PTR ( - EFAULT ) ;
}
return mem ;
}
static int
2010-02-12 10:27:35 +10:00
nouveau_gem_pushbuf_reloc_apply ( struct drm_device * dev ,
struct drm_nouveau_gem_pushbuf * req ,
struct drm_nouveau_gem_pushbuf_bo * bo )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_gem_pushbuf_reloc * reloc = NULL ;
2010-01-10 20:10:53 +01:00
int ret = 0 ;
unsigned i ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
reloc = u_memcpya ( req - > relocs , req - > nr_relocs , sizeof ( * reloc ) ) ;
2009-12-11 19:24:15 +10:00
if ( IS_ERR ( reloc ) )
return PTR_ERR ( reloc ) ;
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_relocs ; i + + ) {
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf_reloc * r = & reloc [ i ] ;
struct drm_nouveau_gem_pushbuf_bo * b ;
2010-02-12 10:27:35 +10:00
struct nouveau_bo * nvbo ;
2009-12-11 19:24:15 +10:00
uint32_t data ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( r - > bo_index > req - > nr_buffers ) ) {
NV_ERROR ( dev , " reloc bo index invalid \n " ) ;
2009-12-11 19:24:15 +10:00
ret = - EINVAL ;
break ;
}
b = & bo [ r - > bo_index ] ;
2010-02-12 10:27:35 +10:00
if ( b - > presumed . valid )
2009-12-11 19:24:15 +10:00
continue ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( r - > reloc_bo_index > req - > nr_buffers ) ) {
NV_ERROR ( dev , " reloc container bo index invalid \n " ) ;
ret = - EINVAL ;
break ;
}
nvbo = ( void * ) ( unsigned long ) bo [ r - > reloc_bo_index ] . user_priv ;
if ( unlikely ( r - > reloc_bo_offset + 4 >
nvbo - > bo . mem . num_pages < < PAGE_SHIFT ) ) {
NV_ERROR ( dev , " reloc outside of bo \n " ) ;
ret = - EINVAL ;
break ;
}
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 , nvbo - > bo . mem . num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
NV_ERROR ( dev , " failed kmap for reloc \n " ) ;
break ;
}
nvbo - > validate_mapped = true ;
}
2009-12-11 19:24:15 +10:00
if ( r - > flags & NOUVEAU_GEM_RELOC_LOW )
2010-02-12 10:27:35 +10:00
data = b - > presumed . offset + r - > data ;
2009-12-11 19:24:15 +10:00
else
if ( r - > flags & NOUVEAU_GEM_RELOC_HIGH )
2010-02-12 10:27:35 +10:00
data = ( b - > presumed . offset + r - > data ) > > 32 ;
2009-12-11 19:24:15 +10:00
else
data = r - > data ;
if ( r - > flags & NOUVEAU_GEM_RELOC_OR ) {
2010-02-12 10:27:35 +10:00
if ( b - > presumed . domain = = NOUVEAU_GEM_DOMAIN_GART )
2009-12-11 19:24:15 +10:00
data | = r - > tor ;
else
data | = r - > vor ;
}
2010-11-17 12:28:29 +00:00
spin_lock ( & nvbo - > bo . bdev - > fence_lock ) ;
2010-02-12 10:27:35 +10:00
ret = ttm_bo_wait ( & nvbo - > bo , false , false , false ) ;
2010-11-17 12:28:29 +00:00
spin_unlock ( & nvbo - > bo . bdev - > fence_lock ) ;
2010-02-12 10:27:35 +10:00
if ( ret ) {
NV_ERROR ( dev , " reloc wait_idle failed: %d \n " , ret ) ;
break ;
}
nouveau_bo_wr32 ( nvbo , r - > reloc_bo_offset > > 2 , data ) ;
2009-12-11 19:24:15 +10:00
}
kfree ( reloc ) ;
return ret ;
}
int
nouveau_gem_ioctl_pushbuf ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2010-02-12 10:27:35 +10:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf * req = data ;
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf_push * push ;
struct drm_nouveau_gem_pushbuf_bo * bo ;
2009-12-11 19:24:15 +10:00
struct nouveau_channel * chan ;
struct validate_op op ;
2010-07-03 18:36:39 +02:00
struct nouveau_fence * fence = NULL ;
2010-02-12 10:27:35 +10:00
int i , j , ret = 0 , do_reloc = 0 ;
2009-12-11 19:24:15 +10:00
2011-06-01 19:18:48 +10:00
chan = nouveau_channel_get ( file_priv , req - > channel ) ;
2010-10-06 16:16:59 +10:00
if ( IS_ERR ( chan ) )
return PTR_ERR ( chan ) ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
req - > vram_available = dev_priv - > fb_aper_free ;
req - > gart_available = dev_priv - > gart_info . aper_free ;
if ( unlikely ( req - > nr_push = = 0 ) )
goto out_next ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_push > NOUVEAU_GEM_MAX_PUSH ) ) {
NV_ERROR ( dev , " pushbuf push count exceeds limit: %d max %d \n " ,
req - > nr_push , NOUVEAU_GEM_MAX_PUSH ) ;
2010-10-06 16:16:59 +10:00
nouveau_channel_put ( & chan ) ;
2010-02-12 10:27:35 +10:00
return - EINVAL ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ) ) {
NV_ERROR ( dev , " pushbuf bo count exceeds limit: %d max %d \n " ,
req - > nr_buffers , NOUVEAU_GEM_MAX_BUFFERS ) ;
2010-10-06 16:16:59 +10:00
nouveau_channel_put ( & chan ) ;
2010-02-12 10:27:35 +10:00
return - EINVAL ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) ) {
NV_ERROR ( dev , " pushbuf reloc count exceeds limit: %d max %d \n " ,
req - > nr_relocs , NOUVEAU_GEM_MAX_RELOCS ) ;
2010-10-06 16:16:59 +10:00
nouveau_channel_put ( & chan ) ;
2009-12-11 19:24:15 +10:00
return - EINVAL ;
}
2010-02-12 10:27:35 +10:00
push = u_memcpya ( req - > push , req - > nr_push , sizeof ( * push ) ) ;
2010-10-06 16:16:59 +10:00
if ( IS_ERR ( push ) ) {
nouveau_channel_put ( & chan ) ;
2010-02-12 10:27:35 +10:00
return PTR_ERR ( push ) ;
2010-10-06 16:16:59 +10:00
}
2010-02-12 10:27:35 +10:00
2009-12-11 19:24:15 +10:00
bo = u_memcpya ( req - > buffers , req - > nr_buffers , sizeof ( * bo ) ) ;
2010-02-12 10:27:35 +10:00
if ( IS_ERR ( bo ) ) {
kfree ( push ) ;
2010-10-06 16:16:59 +10:00
nouveau_channel_put ( & chan ) ;
2009-12-11 19:24:15 +10:00
return PTR_ERR ( bo ) ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2010-07-23 09:06:52 +10:00
/* Mark push buffers as being used on PFIFO, the validation code
* will then make sure that if the pushbuf bo moves , that they
* happen on the kernel channel , which will in turn cause a sync
* to happen before we try and submit the push buffer .
*/
for ( i = 0 ; i < req - > nr_push ; i + + ) {
if ( push [ i ] . bo_index > = req - > nr_buffers ) {
NV_ERROR ( dev , " push %d buffer not in list \n " , i ) ;
ret = - EINVAL ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2010-07-23 09:06:52 +10:00
}
bo [ push [ i ] . bo_index ] . read_domains | = ( 1 < < 31 ) ;
}
2009-12-11 19:24:15 +10:00
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate ( chan , file_priv , bo , req - > buffers ,
req - > nr_buffers , & op , & do_reloc ) ;
if ( ret ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
NV_ERROR ( dev , " validate: %d \n " , ret ) ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2009-12-11 19:24:15 +10:00
}
/* Apply any relocations that are required */
if ( do_reloc ) {
2010-02-12 10:27:35 +10:00
ret = nouveau_gem_pushbuf_reloc_apply ( dev , req , bo ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
NV_ERROR ( dev , " reloc apply: %d \n " , ret ) ;
goto out ;
}
}
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
2010-02-12 10:27:35 +10:00
ret = nouveau_dma_wait ( chan , req - > nr_push + 1 , 6 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2010-02-11 16:37:26 +10:00
NV_INFO ( dev , " nv50cal_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
nv50_dma_push ( chan , nvbo , push [ i ] . offset ,
push [ i ] . length ) ;
}
2010-02-11 16:37:26 +10:00
} else
2010-08-25 12:54:53 +02:00
if ( dev_priv - > chipset > = 0x25 ) {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * 2 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
NV_ERROR ( dev , " cal_space: %d \n " , ret ) ;
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
struct drm_mm_node * mem = nvbo - > bo . mem . mm_node ;
OUT_RING ( chan , ( ( mem - > start < < PAGE_SHIFT ) +
push [ i ] . offset ) | 2 ) ;
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
} else {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * ( 2 + NOUVEAU_DMA_SKIPS ) ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
NV_ERROR ( dev , " jmp_space: %d \n " , ret ) ;
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
struct drm_mm_node * mem = nvbo - > bo . mem . mm_node ;
uint32_t cmd ;
cmd = chan - > pushbuf_base + ( ( chan - > dma . cur + 2 ) < < 2 ) ;
cmd | = 0x20000000 ;
if ( unlikely ( cmd ! = req - > suffix0 ) ) {
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 ,
nvbo - > bo . mem .
num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
WIND_RING ( chan ) ;
goto out ;
}
nvbo - > validate_mapped = true ;
}
nouveau_bo_wr32 ( nvbo , ( push [ i ] . offset +
push [ i ] . length - 8 ) / 4 , cmd ) ;
}
OUT_RING ( chan , ( ( mem - > start < < PAGE_SHIFT ) +
push [ i ] . offset ) | 0x20000000 ) ;
2009-12-11 19:24:15 +10:00
OUT_RING ( chan , 0 ) ;
2010-02-12 10:27:35 +10:00
for ( j = 0 ; j < NOUVEAU_DMA_SKIPS ; j + + )
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
}
2010-01-06 04:02:45 +01:00
ret = nouveau_fence_new ( chan , & fence , true ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
NV_ERROR ( dev , " error fencing pushbuf: %d \n " , ret ) ;
WIND_RING ( chan ) ;
goto out ;
}
out :
2010-01-06 04:02:45 +01:00
validate_fini ( & op , fence ) ;
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2011-03-07 12:31:35 +01:00
out_prevalid :
2009-12-11 19:24:15 +10:00
kfree ( bo ) ;
2010-02-12 10:27:35 +10:00
kfree ( push ) ;
2009-12-11 19:24:15 +10:00
out_next :
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
req - > suffix0 = 0x00000000 ;
req - > suffix1 = 0x00000000 ;
} else
2010-08-25 12:54:53 +02:00
if ( dev_priv - > chipset > = 0x25 ) {
2009-12-11 19:24:15 +10:00
req - > suffix0 = 0x00020000 ;
req - > suffix1 = 0x00000000 ;
} else {
req - > suffix0 = 0x20000000 |
( chan - > pushbuf_base + ( ( chan - > dma . cur + 2 ) < < 2 ) ) ;
req - > suffix1 = 0x00000000 ;
}
2010-10-06 16:16:59 +10:00
nouveau_channel_put ( & chan ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
static inline uint32_t
domain_to_ttm ( struct nouveau_bo * nvbo , uint32_t domain )
{
uint32_t flags = 0 ;
if ( domain & NOUVEAU_GEM_DOMAIN_VRAM )
flags | = TTM_PL_FLAG_VRAM ;
if ( domain & NOUVEAU_GEM_DOMAIN_GART )
flags | = TTM_PL_FLAG_TT ;
return flags ;
}
int
nouveau_gem_ioctl_cpu_prep ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_cpu_prep * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
bool no_wait = ! ! ( req - > flags & NOUVEAU_GEM_CPU_PREP_NOWAIT ) ;
int ret = - EINVAL ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
nvbo = nouveau_gem_object ( gem ) ;
2010-10-11 11:48:45 +10:00
spin_lock ( & nvbo - > bo . bdev - > fence_lock ) ;
ret = ttm_bo_wait ( & nvbo - > bo , true , true , no_wait ) ;
spin_unlock ( & nvbo - > bo . bdev - > fence_lock ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
int
nouveau_gem_ioctl_cpu_fini ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2010-10-11 11:48:45 +10:00
return 0 ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_gem_ioctl_info ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_info * req = data ;
struct drm_gem_object * gem ;
int ret ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
2011-06-07 13:28:11 +10:00
ret = nouveau_gem_info ( file_priv , gem , req ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}