2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2008 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
2012-07-20 08:17:34 +10:00
# include "nouveau_drm.h"
2009-12-11 19:24:15 +10:00
# include "nouveau_dma.h"
2012-04-30 13:30:00 +10:00
# include "nouveau_fence.h"
2012-07-20 08:17:34 +10:00
# include "nouveau_abi16.h"
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
# include "nouveau_ttm.h"
# include "nouveau_gem.h"
2009-12-11 19:24:15 +10:00
void
nouveau_gem_object_del ( struct drm_gem_object * gem )
{
2013-10-02 10:15:17 +02:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
2009-12-11 19:24:15 +10:00
struct ttm_buffer_object * bo = & nvbo - > bo ;
2014-12-11 10:05:00 +10:00
struct device * dev = drm - > dev - > dev ;
int ret ;
ret = pm_runtime_get_sync ( dev ) ;
if ( WARN_ON ( ret < 0 & & ret ! = - EACCES ) )
return ;
2009-12-11 19:24:15 +10:00
2012-04-02 11:53:06 +01:00
if ( gem - > import_attach )
drm_prime_gem_destroy ( gem , nvbo - > bo . sg ) ;
2010-04-09 19:05:05 +00:00
drm_gem_object_release ( gem ) ;
2013-10-02 10:15:17 +02:00
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem - > filp = NULL ;
ttm_bo_unref ( & bo ) ;
2014-12-11 10:05:00 +10:00
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_put_autosuspend ( dev ) ;
2009-12-11 19:24:15 +10:00
}
2011-06-03 16:18:26 +10:00
int
nouveau_gem_object_open ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_vma * vma ;
2014-12-11 10:05:00 +10:00
struct device * dev = drm - > dev - > dev ;
2011-06-07 15:25:12 +10:00
int ret ;
2011-06-03 16:18:26 +10:00
2014-08-10 04:10:20 +10:00
if ( ! cli - > vm )
2011-06-03 16:18:26 +10:00
return 0 ;
2014-07-21 13:15:51 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , false , NULL ) ;
2011-06-07 15:25:12 +10:00
if ( ret )
return ret ;
2014-08-10 04:10:20 +10:00
vma = nouveau_bo_vma_find ( nvbo , cli - > vm ) ;
2011-06-07 15:25:12 +10:00
if ( ! vma ) {
vma = kzalloc ( sizeof ( * vma ) , GFP_KERNEL ) ;
if ( ! vma ) {
ret = - ENOMEM ;
goto out ;
}
2014-12-11 10:05:00 +10:00
ret = pm_runtime_get_sync ( dev ) ;
2015-09-11 15:00:56 +05:30
if ( ret < 0 & & ret ! = - EACCES ) {
kfree ( vma ) ;
2014-12-11 10:05:00 +10:00
goto out ;
2015-09-11 15:00:56 +05:30
}
2014-12-11 10:05:00 +10:00
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( nvbo , cli - > vm , vma ) ;
2014-12-11 10:05:00 +10:00
if ( ret )
2011-06-07 15:25:12 +10:00
kfree ( vma ) ;
2014-12-11 10:05:00 +10:00
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_put_autosuspend ( dev ) ;
2011-06-07 15:25:12 +10:00
} else {
vma - > refcount + + ;
}
out :
ttm_bo_unreserve ( & nvbo - > bo ) ;
return ret ;
2011-06-03 16:18:26 +10:00
}
2013-05-07 09:48:30 +10:00
static void
nouveau_gem_object_delete ( void * data )
{
2015-01-14 15:36:34 +10:00
struct nvkm_vma * vma = data ;
nvkm_vm_unmap ( vma ) ;
nvkm_vm_put ( vma ) ;
2013-05-07 09:48:30 +10:00
kfree ( vma ) ;
}
static void
2015-01-14 15:36:34 +10:00
nouveau_gem_object_unmap ( struct nouveau_bo * nvbo , struct nvkm_vma * vma )
2013-05-07 09:48:30 +10:00
{
const bool mapped = nvbo - > bo . mem . mem_type ! = TTM_PL_SYSTEM ;
2014-04-09 16:19:30 +02:00
struct reservation_object * resv = nvbo - > bo . resv ;
struct reservation_object_list * fobj ;
2014-04-02 17:14:48 +02:00
struct fence * fence = NULL ;
2013-05-07 09:48:30 +10:00
2014-04-09 16:19:30 +02:00
fobj = reservation_object_get_list ( resv ) ;
2013-05-07 09:48:30 +10:00
list_del ( & vma - > head ) ;
2014-04-09 16:19:30 +02:00
if ( fobj & & fobj - > shared_count > 1 )
ttm_bo_wait ( & nvbo - > bo , true , false , false ) ;
else if ( fobj & & fobj - > shared_count = = 1 )
fence = rcu_dereference_protected ( fobj - > shared [ 0 ] ,
reservation_object_held ( resv ) ) ;
else
2014-04-02 17:14:48 +02:00
fence = reservation_object_get_excl ( nvbo - > bo . resv ) ;
2013-05-07 09:48:30 +10:00
2014-04-09 16:19:30 +02:00
if ( fence & & mapped ) {
2013-05-07 09:48:30 +10:00
nouveau_fence_work ( fence , nouveau_gem_object_delete , vma ) ;
} else {
if ( mapped )
2015-01-14 15:36:34 +10:00
nvkm_vm_unmap ( vma ) ;
nvkm_vm_put ( vma ) ;
2013-05-07 09:48:30 +10:00
kfree ( vma ) ;
}
}
2011-06-03 16:18:26 +10:00
void
nouveau_gem_object_close ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
struct device * dev = drm - > dev - > dev ;
2015-01-14 15:36:34 +10:00
struct nvkm_vma * vma ;
2011-06-07 15:25:12 +10:00
int ret ;
2011-06-03 16:18:26 +10:00
2014-08-10 04:10:20 +10:00
if ( ! cli - > vm )
2011-06-03 16:18:26 +10:00
return ;
2011-06-07 15:25:12 +10:00
2014-07-21 13:15:51 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , false , NULL ) ;
2011-06-07 15:25:12 +10:00
if ( ret )
return ;
2014-08-10 04:10:20 +10:00
vma = nouveau_bo_vma_find ( nvbo , cli - > vm ) ;
2011-06-07 15:25:12 +10:00
if ( vma ) {
2014-12-11 10:05:00 +10:00
if ( - - vma - > refcount = = 0 ) {
ret = pm_runtime_get_sync ( dev ) ;
if ( ! WARN_ON ( ret < 0 & & ret ! = - EACCES ) ) {
nouveau_gem_object_unmap ( nvbo , vma ) ;
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_put_autosuspend ( dev ) ;
}
}
2011-06-07 15:25:12 +10:00
}
ttm_bo_unreserve ( & nvbo - > bo ) ;
2011-06-03 16:18:26 +10:00
}
2009-12-11 19:24:15 +10:00
int
2011-06-07 12:25:36 +10:00
nouveau_gem_new ( struct drm_device * dev , int size , int align , uint32_t domain ,
uint32_t tile_mode , uint32_t tile_flags ,
struct nouveau_bo * * pnvbo )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = nouveau_drm ( dev ) ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo ;
2011-02-10 14:42:08 +10:00
u32 flags = 0 ;
2009-12-11 19:24:15 +10:00
int ret ;
2011-02-10 14:42:08 +10:00
if ( domain & NOUVEAU_GEM_DOMAIN_VRAM )
flags | = TTM_PL_FLAG_VRAM ;
if ( domain & NOUVEAU_GEM_DOMAIN_GART )
flags | = TTM_PL_FLAG_TT ;
if ( ! flags | | domain & NOUVEAU_GEM_DOMAIN_CPU )
flags | = TTM_PL_FLAG_SYSTEM ;
2015-02-26 12:44:51 +09:00
if ( domain & NOUVEAU_GEM_DOMAIN_COHERENT )
flags | = TTM_PL_FLAG_UNCACHED ;
2011-06-07 14:21:29 +10:00
ret = nouveau_bo_new ( dev , size , align , flags , tile_mode ,
2014-01-09 11:03:15 +01:00
tile_flags , NULL , NULL , pnvbo ) ;
2009-12-11 19:24:15 +10:00
if ( ret )
return ret ;
nvbo = * pnvbo ;
2011-02-10 13:41:01 +10:00
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time . not possibly on
* earlier chips without busting the ABI .
*/
nvbo - > valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART ;
2014-08-10 04:10:22 +10:00
if ( drm - > device . info . family > = NV_DEVICE_INFO_V0_TESLA )
2011-02-10 13:41:01 +10:00
nvbo - > valid_domains & = domain ;
2013-10-02 10:15:17 +02:00
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller , instead of a normal nouveau_bo ttm reference . */
ret = drm_gem_object_init ( dev , & nvbo - > gem , nvbo - > bo . mem . size ) ;
if ( ret ) {
2009-12-11 19:24:15 +10:00
nouveau_bo_ref ( NULL , pnvbo ) ;
return - ENOMEM ;
}
2013-10-02 10:15:17 +02:00
nvbo - > bo . persistent_swap_storage = nvbo - > gem . filp ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
static int
2011-06-07 13:28:11 +10:00
nouveau_gem_info ( struct drm_file * file_priv , struct drm_gem_object * gem ,
struct drm_nouveau_gem_info * rep )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_vma * vma ;
2009-12-11 19:24:15 +10:00
2015-10-20 01:15:39 -04:00
if ( is_power_of_2 ( nvbo - > valid_domains ) )
rep - > domain = nvbo - > valid_domains ;
else if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
2009-12-11 19:24:15 +10:00
rep - > domain = NOUVEAU_GEM_DOMAIN_GART ;
else
rep - > domain = NOUVEAU_GEM_DOMAIN_VRAM ;
2011-06-07 13:28:11 +10:00
rep - > offset = nvbo - > bo . offset ;
2014-08-10 04:10:20 +10:00
if ( cli - > vm ) {
vma = nouveau_bo_vma_find ( nvbo , cli - > vm ) ;
2011-06-07 13:28:11 +10:00
if ( ! vma )
return - EINVAL ;
rep - > offset = vma - > offset ;
}
2009-12-11 19:24:15 +10:00
rep - > size = nvbo - > bo . mem . num_pages < < PAGE_SHIFT ;
2013-07-24 21:08:53 +02:00
rep - > map_handle = drm_vma_node_offset_addr ( & nvbo - > bo . vma_node ) ;
2009-12-11 19:24:15 +10:00
rep - > tile_mode = nvbo - > tile_mode ;
rep - > tile_flags = nvbo - > tile_flags ;
return 0 ;
}
int
nouveau_gem_ioctl_new ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = nouveau_drm ( dev ) ;
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2015-08-20 14:54:06 +10:00
struct nvkm_fb * fb = nvxx_fb ( & drm - > device ) ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_new * req = data ;
struct nouveau_bo * nvbo = NULL ;
int ret = 0 ;
2015-08-20 14:54:20 +10:00
if ( ! nvkm_fb_memtype_valid ( fb , req - > info . tile_flags ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " bad page flags: 0x%08x \n " , req - > info . tile_flags ) ;
2009-12-11 19:24:15 +10:00
return - EINVAL ;
2010-12-06 15:28:54 +10:00
}
2009-12-11 19:24:15 +10:00
2011-06-07 12:25:36 +10:00
ret = nouveau_gem_new ( dev , req - > info . size , req - > align ,
2011-02-10 14:42:08 +10:00
req - > info . domain , req - > info . tile_mode ,
req - > info . tile_flags , & nvbo ) ;
2009-12-11 19:24:15 +10:00
if ( ret )
return ret ;
2013-10-02 10:15:17 +02:00
ret = drm_gem_handle_create ( file_priv , & nvbo - > gem , & req - > info . handle ) ;
2011-06-07 13:28:11 +10:00
if ( ret = = 0 ) {
2013-10-02 10:15:17 +02:00
ret = nouveau_gem_info ( file_priv , & nvbo - > gem , & req - > info ) ;
2011-06-07 13:28:11 +10:00
if ( ret )
drm_gem_handle_delete ( file_priv , req - > info . handle ) ;
}
2010-09-27 16:17:17 +10:00
/* drop reference from allocate - handle holds it now */
2013-10-02 10:15:17 +02:00
drm_gem_object_unreference_unlocked ( & nvbo - > gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
static int
nouveau_gem_set_domain ( struct drm_gem_object * gem , uint32_t read_domains ,
uint32_t write_domains , uint32_t valid_domains )
{
2013-10-02 10:15:17 +02:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2009-12-11 19:24:15 +10:00
struct ttm_buffer_object * bo = & nvbo - > bo ;
2011-02-10 13:41:01 +10:00
uint32_t domains = valid_domains & nvbo - > valid_domains &
2010-03-18 13:07:47 +01:00
( write_domains ? write_domains : read_domains ) ;
uint32_t pref_flags = 0 , valid_flags = 0 ;
2009-12-11 19:24:15 +10:00
2010-03-18 13:07:47 +01:00
if ( ! domains )
2009-12-11 19:24:15 +10:00
return - EINVAL ;
2010-03-18 13:07:47 +01:00
if ( valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
valid_flags | = TTM_PL_FLAG_VRAM ;
if ( valid_domains & NOUVEAU_GEM_DOMAIN_GART )
valid_flags | = TTM_PL_FLAG_TT ;
if ( ( domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
bo - > mem . mem_type = = TTM_PL_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else if ( ( domains & NOUVEAU_GEM_DOMAIN_GART ) & &
bo - > mem . mem_type = = TTM_PL_TT )
pref_flags | = TTM_PL_FLAG_TT ;
else if ( domains & NOUVEAU_GEM_DOMAIN_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else
pref_flags | = TTM_PL_FLAG_TT ;
nouveau_bo_placement_set ( nvbo , pref_flags , valid_flags ) ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
struct validate_op {
2014-04-09 16:18:58 +02:00
struct list_head list ;
2013-06-27 13:48:17 +02:00
struct ww_acquire_ctx ticket ;
2009-12-11 19:24:15 +10:00
} ;
static void
2014-04-09 16:19:30 +02:00
validate_fini_no_ticket ( struct validate_op * op , struct nouveau_fence * fence ,
struct drm_nouveau_gem_pushbuf_bo * pbbo )
2009-12-11 19:24:15 +10:00
{
struct nouveau_bo * nvbo ;
2014-04-09 16:19:30 +02:00
struct drm_nouveau_gem_pushbuf_bo * b ;
2009-12-11 19:24:15 +10:00
2014-04-09 16:18:58 +02:00
while ( ! list_empty ( & op - > list ) ) {
nvbo = list_entry ( op - > list . next , struct nouveau_bo , entry ) ;
2014-04-09 16:19:30 +02:00
b = & pbbo [ nvbo - > pbbo_index ] ;
2010-10-20 23:35:40 +02:00
2013-11-13 15:18:32 +10:00
if ( likely ( fence ) )
2014-04-09 16:19:30 +02:00
nouveau_bo_fence ( nvbo , fence , ! ! b - > write_domains ) ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
if ( unlikely ( nvbo - > validate_mapped ) ) {
ttm_bo_kunmap ( & nvbo - > kmap ) ;
nvbo - > validate_mapped = false ;
}
2009-12-11 19:24:15 +10:00
list_del ( & nvbo - > entry ) ;
nvbo - > reserved_by = NULL ;
2014-04-09 16:18:58 +02:00
ttm_bo_unreserve_ticket ( & nvbo - > bo , & op - > ticket ) ;
2013-10-02 10:15:17 +02:00
drm_gem_object_unreference_unlocked ( & nvbo - > gem ) ;
2009-12-11 19:24:15 +10:00
}
}
2013-06-27 13:48:17 +02:00
static void
2014-04-09 16:19:30 +02:00
validate_fini ( struct validate_op * op , struct nouveau_fence * fence ,
struct drm_nouveau_gem_pushbuf_bo * pbbo )
2013-06-27 13:48:17 +02:00
{
2014-04-09 16:19:30 +02:00
validate_fini_no_ticket ( op , fence , pbbo ) ;
2013-06-27 13:48:17 +02:00
ww_acquire_fini ( & op - > ticket ) ;
2009-12-11 19:24:15 +10:00
}
static int
validate_init ( struct nouveau_channel * chan , struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
int nr_buffers , struct validate_op * op )
{
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2012-07-20 08:17:34 +10:00
struct drm_device * dev = chan - > drm - > dev ;
2009-12-11 19:24:15 +10:00
int trycnt = 0 ;
int ret , i ;
2013-01-15 14:57:20 +01:00
struct nouveau_bo * res_bo = NULL ;
2014-04-09 16:18:58 +02:00
LIST_HEAD ( gart_list ) ;
LIST_HEAD ( vram_list ) ;
LIST_HEAD ( both_list ) ;
2009-12-11 19:24:15 +10:00
2013-06-27 13:48:17 +02:00
ww_acquire_init ( & op - > ticket , & reservation_ww_class ) ;
2009-12-11 19:24:15 +10:00
retry :
if ( + + trycnt > 100000 ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " %s failed and gave up. \n " , __func__ ) ;
2009-12-11 19:24:15 +10:00
return - EINVAL ;
}
for ( i = 0 ; i < nr_buffers ; i + + ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ i ] ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
gem = drm_gem_object_lookup ( dev , file_priv , b - > handle ) ;
if ( ! gem ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " Unknown handle 0x%08x \n " , b - > handle ) ;
2014-04-09 16:18:58 +02:00
ret = - ENOENT ;
break ;
2009-12-11 19:24:15 +10:00
}
2013-10-02 10:15:17 +02:00
nvbo = nouveau_gem_object ( gem ) ;
2013-01-15 14:57:20 +01:00
if ( nvbo = = res_bo ) {
res_bo = NULL ;
drm_gem_object_unreference_unlocked ( gem ) ;
continue ;
}
2009-12-11 19:24:15 +10:00
if ( nvbo - > reserved_by & & nvbo - > reserved_by = = file_priv ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " multiple instances of buffer %d on "
2009-12-11 19:24:15 +10:00
" validation list \n " , b - > handle ) ;
2012-06-25 18:04:27 +10:00
drm_gem_object_unreference_unlocked ( gem ) ;
2014-04-09 16:18:58 +02:00
ret = - EINVAL ;
break ;
2009-12-11 19:24:15 +10:00
}
2013-06-27 13:48:17 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , true , false , true , & op - > ticket ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2014-04-09 16:18:58 +02:00
list_splice_tail_init ( & vram_list , & op - > list ) ;
list_splice_tail_init ( & gart_list , & op - > list ) ;
list_splice_tail_init ( & both_list , & op - > list ) ;
2014-04-09 16:19:30 +02:00
validate_fini_no_ticket ( op , NULL , NULL ) ;
2013-06-27 13:48:19 +02:00
if ( unlikely ( ret = = - EDEADLK ) ) {
2013-01-15 14:57:20 +01:00
ret = ttm_bo_reserve_slowpath ( & nvbo - > bo , true ,
2013-06-27 13:48:17 +02:00
& op - > ticket ) ;
2013-01-15 14:57:20 +01:00
if ( ! ret )
res_bo = nvbo ;
}
2010-10-12 09:54:54 +10:00
if ( unlikely ( ret ) ) {
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail reserve \n " ) ;
2014-04-09 16:18:58 +02:00
break ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
b - > user_priv = ( uint64_t ) ( unsigned long ) nvbo ;
2009-12-11 19:24:15 +10:00
nvbo - > reserved_by = file_priv ;
nvbo - > pbbo_index = i ;
if ( ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART ) )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & both_list ) ;
2009-12-11 19:24:15 +10:00
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & vram_list ) ;
2009-12-11 19:24:15 +10:00
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & gart_list ) ;
2009-12-11 19:24:15 +10:00
else {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " invalid valid domains: 0x%08x \n " ,
2009-12-11 19:24:15 +10:00
b - > valid_domains ) ;
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & both_list ) ;
ret = - EINVAL ;
break ;
2009-12-11 19:24:15 +10:00
}
2013-01-15 14:57:20 +01:00
if ( nvbo = = res_bo )
goto retry ;
2009-12-11 19:24:15 +10:00
}
2013-06-27 13:48:17 +02:00
ww_acquire_done ( & op - > ticket ) ;
2014-04-09 16:18:58 +02:00
list_splice_tail ( & vram_list , & op - > list ) ;
list_splice_tail ( & gart_list , & op - > list ) ;
list_splice_tail ( & both_list , & op - > list ) ;
if ( ret )
2014-04-09 16:19:30 +02:00
validate_fini ( op , NULL , NULL ) ;
2014-04-09 16:18:58 +02:00
return ret ;
2009-12-11 19:24:15 +10:00
}
static int
2013-02-03 22:02:47 +01:00
validate_list ( struct nouveau_channel * chan , struct nouveau_cli * cli ,
struct list_head * list , struct drm_nouveau_gem_pushbuf_bo * pbbo ,
uint64_t user_pbbo_ptr )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = chan - > drm ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf_bo __user * upbbo =
( void __force __user * ) ( uintptr_t ) user_pbbo_ptr ;
struct nouveau_bo * nvbo ;
int ret , relocs = 0 ;
list_for_each_entry ( nvbo , list , entry ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ nvbo - > pbbo_index ] ;
2013-10-02 10:15:17 +02:00
ret = nouveau_gem_set_domain ( & nvbo - > gem , b - > read_domains ,
2009-12-11 19:24:15 +10:00
b - > write_domains ,
b - > valid_domains ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail set_domain \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2012-11-28 11:25:44 +00:00
ret = nouveau_bo_validate ( nvbo , true , false ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail ttm_validate \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2014-09-16 11:15:07 +02:00
ret = nouveau_fence_sync ( nvbo , chan , ! ! b - > write_domains , true ) ;
2010-07-23 09:06:52 +10:00
if ( unlikely ( ret ) ) {
2014-01-09 11:03:11 +01:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail post-validate sync \n " ) ;
2010-07-23 09:06:52 +10:00
return ret ;
}
2014-08-10 04:10:22 +10:00
if ( drm - > device . info . family < NV_DEVICE_INFO_V0_TESLA ) {
2011-06-07 11:12:39 +10:00
if ( nvbo - > bo . offset = = b - > presumed . offset & &
( ( nvbo - > bo . mem . mem_type = = TTM_PL_VRAM & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_VRAM ) | |
( nvbo - > bo . mem . mem_type = = TTM_PL_TT & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_GART ) ) )
continue ;
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_GART ;
else
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_VRAM ;
b - > presumed . offset = nvbo - > bo . offset ;
b - > presumed . valid = 0 ;
relocs + + ;
2013-12-11 11:34:44 +01:00
if ( copy_to_user ( & upbbo [ nvbo - > pbbo_index ] . presumed ,
2011-06-07 11:12:39 +10:00
& b - > presumed , sizeof ( b - > presumed ) ) )
return - EFAULT ;
}
2009-12-11 19:24:15 +10:00
}
return relocs ;
}
static int
nouveau_gem_pushbuf_validate ( struct nouveau_channel * chan ,
struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
uint64_t user_buffers , int nr_buffers ,
struct validate_op * op , int * apply_relocs )
{
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2014-04-09 16:18:58 +02:00
int ret ;
2009-12-11 19:24:15 +10:00
2014-04-09 16:18:58 +02:00
INIT_LIST_HEAD ( & op - > list ) ;
2009-12-11 19:24:15 +10:00
if ( nr_buffers = = 0 )
return 0 ;
ret = validate_init ( chan , file_priv , pbbo , nr_buffers , op ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validate_init \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2014-04-09 16:18:58 +02:00
ret = validate_list ( chan , cli , & op - > list , pbbo , user_buffers ) ;
2009-12-11 19:24:15 +10:00
if ( unlikely ( ret < 0 ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validating bo list \n " ) ;
2014-04-09 16:19:30 +02:00
validate_fini ( op , NULL , NULL ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
2014-04-09 16:18:58 +02:00
* apply_relocs = ret ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
2013-09-02 16:31:31 +02:00
static inline void
u_free ( void * addr )
{
2015-06-30 14:59:18 -07:00
kvfree ( addr ) ;
2013-09-02 16:31:31 +02:00
}
2009-12-11 19:24:15 +10:00
static inline void *
u_memcpya ( uint64_t user , unsigned nmemb , unsigned size )
{
void * mem ;
void __user * userptr = ( void __force __user * ) ( uintptr_t ) user ;
2013-09-02 16:31:31 +02:00
size * = nmemb ;
mem = kmalloc ( size , GFP_KERNEL | __GFP_NOWARN ) ;
if ( ! mem )
mem = vmalloc ( size ) ;
2009-12-11 19:24:15 +10:00
if ( ! mem )
return ERR_PTR ( - ENOMEM ) ;
2013-12-11 11:34:44 +01:00
if ( copy_from_user ( mem , userptr , size ) ) {
2013-09-02 16:31:31 +02:00
u_free ( mem ) ;
2009-12-11 19:24:15 +10:00
return ERR_PTR ( - EFAULT ) ;
}
return mem ;
}
static int
2013-02-03 22:02:47 +01:00
nouveau_gem_pushbuf_reloc_apply ( struct nouveau_cli * cli ,
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf * req ,
struct drm_nouveau_gem_pushbuf_bo * bo )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_gem_pushbuf_reloc * reloc = NULL ;
2010-01-10 20:10:53 +01:00
int ret = 0 ;
unsigned i ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
reloc = u_memcpya ( req - > relocs , req - > nr_relocs , sizeof ( * reloc ) ) ;
2009-12-11 19:24:15 +10:00
if ( IS_ERR ( reloc ) )
return PTR_ERR ( reloc ) ;
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_relocs ; i + + ) {
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf_reloc * r = & reloc [ i ] ;
struct drm_nouveau_gem_pushbuf_bo * b ;
2010-02-12 10:27:35 +10:00
struct nouveau_bo * nvbo ;
2009-12-11 19:24:15 +10:00
uint32_t data ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( r - > bo_index > req - > nr_buffers ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc bo index invalid \n " ) ;
2009-12-11 19:24:15 +10:00
ret = - EINVAL ;
break ;
}
b = & bo [ r - > bo_index ] ;
2010-02-12 10:27:35 +10:00
if ( b - > presumed . valid )
2009-12-11 19:24:15 +10:00
continue ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( r - > reloc_bo_index > req - > nr_buffers ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc container bo index invalid \n " ) ;
2010-02-12 10:27:35 +10:00
ret = - EINVAL ;
break ;
}
nvbo = ( void * ) ( unsigned long ) bo [ r - > reloc_bo_index ] . user_priv ;
if ( unlikely ( r - > reloc_bo_offset + 4 >
nvbo - > bo . mem . num_pages < < PAGE_SHIFT ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc outside of bo \n " ) ;
2010-02-12 10:27:35 +10:00
ret = - EINVAL ;
break ;
}
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 , nvbo - > bo . mem . num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " failed kmap for reloc \n " ) ;
2010-02-12 10:27:35 +10:00
break ;
}
nvbo - > validate_mapped = true ;
}
2009-12-11 19:24:15 +10:00
if ( r - > flags & NOUVEAU_GEM_RELOC_LOW )
2010-02-12 10:27:35 +10:00
data = b - > presumed . offset + r - > data ;
2009-12-11 19:24:15 +10:00
else
if ( r - > flags & NOUVEAU_GEM_RELOC_HIGH )
2010-02-12 10:27:35 +10:00
data = ( b - > presumed . offset + r - > data ) > > 32 ;
2009-12-11 19:24:15 +10:00
else
data = r - > data ;
if ( r - > flags & NOUVEAU_GEM_RELOC_OR ) {
2010-02-12 10:27:35 +10:00
if ( b - > presumed . domain = = NOUVEAU_GEM_DOMAIN_GART )
2009-12-11 19:24:15 +10:00
data | = r - > tor ;
else
data | = r - > vor ;
}
2014-04-09 16:19:30 +02:00
ret = ttm_bo_wait ( & nvbo - > bo , true , false , false ) ;
2010-02-12 10:27:35 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc wait_idle failed: %d \n " , ret ) ;
2010-02-12 10:27:35 +10:00
break ;
}
nouveau_bo_wr32 ( nvbo , r - > reloc_bo_offset > > 2 , data ) ;
2009-12-11 19:24:15 +10:00
}
2013-09-02 16:31:31 +02:00
u_free ( reloc ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
int
nouveau_gem_ioctl_pushbuf ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2015-11-03 10:17:49 +10:00
struct nouveau_abi16 * abi16 = nouveau_abi16_get ( file_priv ) ;
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2012-07-20 08:17:34 +10:00
struct nouveau_abi16_chan * temp ;
struct nouveau_drm * drm = nouveau_drm ( dev ) ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf * req = data ;
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf_push * push ;
struct drm_nouveau_gem_pushbuf_bo * bo ;
2012-07-20 08:17:34 +10:00
struct nouveau_channel * chan = NULL ;
2009-12-11 19:24:15 +10:00
struct validate_op op ;
2010-07-03 18:36:39 +02:00
struct nouveau_fence * fence = NULL ;
2010-02-12 10:27:35 +10:00
int i , j , ret = 0 , do_reloc = 0 ;
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
if ( unlikely ( ! abi16 ) )
return - ENOMEM ;
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
list_for_each_entry ( temp , & abi16 - > channels , head ) {
2015-09-04 14:40:32 +10:00
if ( temp - > chan - > chid = = req - > channel ) {
2012-07-20 08:17:34 +10:00
chan = temp - > chan ;
break ;
}
}
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
if ( ! chan )
return nouveau_abi16_put ( abi16 , - ENOENT ) ;
req - > vram_available = drm - > gem . vram_available ;
req - > gart_available = drm - > gem . gart_available ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_push = = 0 ) )
goto out_next ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_push > NOUVEAU_GEM_MAX_PUSH ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf push count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_push , NOUVEAU_GEM_MAX_PUSH ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf bo count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_buffers , NOUVEAU_GEM_MAX_BUFFERS ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf reloc count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_relocs , NOUVEAU_GEM_MAX_RELOCS ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
push = u_memcpya ( req - > push , req - > nr_push , sizeof ( * push ) ) ;
2012-07-20 08:17:34 +10:00
if ( IS_ERR ( push ) )
return nouveau_abi16_put ( abi16 , PTR_ERR ( push ) ) ;
2010-02-12 10:27:35 +10:00
2009-12-11 19:24:15 +10:00
bo = u_memcpya ( req - > buffers , req - > nr_buffers , sizeof ( * bo ) ) ;
2010-02-12 10:27:35 +10:00
if ( IS_ERR ( bo ) ) {
2013-09-02 16:31:31 +02:00
u_free ( push ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , PTR_ERR ( bo ) ) ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2012-03-16 12:40:17 +10:00
/* Ensure all push buffers are on validate list */
2010-07-23 09:06:52 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
if ( push [ i ] . bo_index > = req - > nr_buffers ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " push %d buffer not in list \n " , i ) ;
2010-07-23 09:06:52 +10:00
ret = - EINVAL ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2010-07-23 09:06:52 +10:00
}
}
2009-12-11 19:24:15 +10:00
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate ( chan , file_priv , bo , req - > buffers ,
req - > nr_buffers , & op , & do_reloc ) ;
if ( ret ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validate: %d \n " , ret ) ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2009-12-11 19:24:15 +10:00
}
/* Apply any relocations that are required */
if ( do_reloc ) {
2013-02-03 22:02:47 +01:00
ret = nouveau_gem_pushbuf_reloc_apply ( cli , req , bo ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc apply: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
}
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
2012-04-30 13:55:29 +10:00
ret = nouveau_dma_wait ( chan , req - > nr_push + 1 , 16 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " nv50cal_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
nv50_dma_push ( chan , nvbo , push [ i ] . offset ,
push [ i ] . length ) ;
}
2010-02-11 16:37:26 +10:00
} else
2014-08-10 04:10:22 +10:00
if ( drm - > device . info . chipset > = 0x25 ) {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * 2 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " cal_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
2012-08-04 05:46:01 +10:00
OUT_RING ( chan , ( nvbo - > bo . offset + push [ i ] . offset ) | 2 ) ;
2010-02-12 10:27:35 +10:00
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
} else {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * ( 2 + NOUVEAU_DMA_SKIPS ) ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " jmp_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
uint32_t cmd ;
2012-07-20 08:17:34 +10:00
cmd = chan - > push . vma . offset + ( ( chan - > dma . cur + 2 ) < < 2 ) ;
2010-02-12 10:27:35 +10:00
cmd | = 0x20000000 ;
if ( unlikely ( cmd ! = req - > suffix0 ) ) {
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 ,
nvbo - > bo . mem .
num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
WIND_RING ( chan ) ;
goto out ;
}
nvbo - > validate_mapped = true ;
}
nouveau_bo_wr32 ( nvbo , ( push [ i ] . offset +
push [ i ] . length - 8 ) / 4 , cmd ) ;
}
2012-08-04 05:46:01 +10:00
OUT_RING ( chan , 0x20000000 |
( nvbo - > bo . offset + push [ i ] . offset ) ) ;
2009-12-11 19:24:15 +10:00
OUT_RING ( chan , 0 ) ;
2010-02-12 10:27:35 +10:00
for ( j = 0 ; j < NOUVEAU_DMA_SKIPS ; j + + )
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
}
2013-02-14 13:43:21 +10:00
ret = nouveau_fence_new ( chan , false , & fence ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " error fencing pushbuf: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
WIND_RING ( chan ) ;
goto out ;
}
out :
2014-04-09 16:19:30 +02:00
validate_fini ( & op , fence , bo ) ;
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2011-03-07 12:31:35 +01:00
out_prevalid :
2013-09-02 16:31:31 +02:00
u_free ( bo ) ;
u_free ( push ) ;
2009-12-11 19:24:15 +10:00
out_next :
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
req - > suffix0 = 0x00000000 ;
req - > suffix1 = 0x00000000 ;
} else
2014-08-10 04:10:22 +10:00
if ( drm - > device . info . chipset > = 0x25 ) {
2009-12-11 19:24:15 +10:00
req - > suffix0 = 0x00020000 ;
req - > suffix1 = 0x00000000 ;
} else {
req - > suffix0 = 0x20000000 |
2012-07-20 08:17:34 +10:00
( chan - > push . vma . offset + ( ( chan - > dma . cur + 2 ) < < 2 ) ) ;
2009-12-11 19:24:15 +10:00
req - > suffix1 = 0x00000000 ;
}
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , ret ) ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_gem_ioctl_cpu_prep ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_cpu_prep * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
bool no_wait = ! ! ( req - > flags & NOUVEAU_GEM_CPU_PREP_NOWAIT ) ;
2014-05-14 15:38:23 +02:00
bool write = ! ! ( req - > flags & NOUVEAU_GEM_CPU_PREP_WRITE ) ;
2014-01-21 13:00:24 +01:00
int ret ;
2009-12-11 19:24:15 +10:00
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
nvbo = nouveau_gem_object ( gem ) ;
2014-05-14 15:38:23 +02:00
if ( no_wait )
ret = reservation_object_test_signaled_rcu ( nvbo - > bo . resv , write ) ? 0 : - EBUSY ;
else {
long lret ;
2014-01-21 13:00:24 +01:00
2014-05-14 15:38:23 +02:00
lret = reservation_object_wait_timeout_rcu ( nvbo - > bo . resv , write , true , 30 * HZ ) ;
if ( ! lret )
ret = - EBUSY ;
else if ( lret > 0 )
ret = 0 ;
else
ret = lret ;
2014-01-21 13:00:24 +01:00
}
2014-10-27 18:49:19 +09:00
nouveau_bo_sync_for_cpu ( nvbo ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( gem ) ;
2014-01-21 13:00:24 +01:00
2009-12-11 19:24:15 +10:00
return ret ;
}
int
nouveau_gem_ioctl_cpu_fini ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2014-10-27 18:49:19 +09:00
struct drm_nouveau_gem_cpu_fini * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
return - ENOENT ;
nvbo = nouveau_gem_object ( gem ) ;
nouveau_bo_sync_for_device ( nvbo ) ;
drm_gem_object_unreference_unlocked ( gem ) ;
2010-10-11 11:48:45 +10:00
return 0 ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_gem_ioctl_info ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_info * req = data ;
struct drm_gem_object * gem ;
int ret ;
gem = drm_gem_object_lookup ( dev , file_priv , req - > handle ) ;
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
2011-06-07 13:28:11 +10:00
ret = nouveau_gem_info ( file_priv , gem , req ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}