2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) 2008 Ben Skeggs .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*/
2016-05-20 09:22:55 +10:00
# include "nouveau_drv.h"
2009-12-11 19:24:15 +10:00
# include "nouveau_dma.h"
2012-04-30 13:30:00 +10:00
# include "nouveau_fence.h"
2012-07-20 08:17:34 +10:00
# include "nouveau_abi16.h"
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
# include "nouveau_ttm.h"
# include "nouveau_gem.h"
2017-11-01 03:56:20 +10:00
# include "nouveau_mem.h"
2017-11-01 03:56:19 +10:00
# include "nouveau_vmm.h"
2009-12-11 19:24:15 +10:00
2017-11-01 03:56:20 +10:00
# include <nvif/class.h>
2009-12-11 19:24:15 +10:00
void
nouveau_gem_object_del ( struct drm_gem_object * gem )
{
2013-10-02 10:15:17 +02:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
struct device * dev = drm - > dev - > dev ;
int ret ;
ret = pm_runtime_get_sync ( dev ) ;
if ( WARN_ON ( ret < 0 & & ret ! = - EACCES ) )
return ;
2009-12-11 19:24:15 +10:00
2012-04-02 11:53:06 +01:00
if ( gem - > import_attach )
drm_prime_gem_destroy ( gem , nvbo - > bo . sg ) ;
2019-01-25 12:02:07 +01:00
ttm_bo_put ( & nvbo - > bo ) ;
2014-12-11 10:05:00 +10:00
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_put_autosuspend ( dev ) ;
2009-12-11 19:24:15 +10:00
}
2011-06-03 16:18:26 +10:00
int
nouveau_gem_object_open ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
struct device * dev = drm - > dev - > dev ;
2019-02-19 17:21:48 +10:00
struct nouveau_vmm * vmm = cli - > svm . cli ? & cli - > svm : & cli - > vmm ;
2017-11-01 03:56:19 +10:00
struct nouveau_vma * vma ;
2011-06-07 15:25:12 +10:00
int ret ;
2011-06-03 16:18:26 +10:00
2019-02-19 17:21:48 +10:00
if ( vmm - > vmm . object . oclass < NVIF_CLASS_VMM_NV50 )
2011-06-03 16:18:26 +10:00
return 0 ;
2016-04-06 11:12:03 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , NULL ) ;
2011-06-07 15:25:12 +10:00
if ( ret )
return ret ;
2017-11-01 03:56:19 +10:00
ret = pm_runtime_get_sync ( dev ) ;
2020-05-20 18:25:49 +08:00
if ( ret < 0 & & ret ! = - EACCES ) {
pm_runtime_put_autosuspend ( dev ) ;
2017-11-01 03:56:19 +10:00
goto out ;
2020-05-20 18:25:49 +08:00
}
2011-06-07 15:25:12 +10:00
2019-02-19 17:21:48 +10:00
ret = nouveau_vma_new ( nvbo , vmm , & vma ) ;
2017-11-01 03:56:19 +10:00
pm_runtime_mark_last_busy ( dev ) ;
pm_runtime_put_autosuspend ( dev ) ;
2011-06-07 15:25:12 +10:00
out :
ttm_bo_unreserve ( & nvbo - > bo ) ;
return ret ;
2011-06-03 16:18:26 +10:00
}
2017-11-01 03:56:20 +10:00
struct nouveau_gem_object_unmap {
struct nouveau_cli_work work ;
struct nouveau_vma * vma ;
} ;
2013-05-07 09:48:30 +10:00
static void
2017-11-01 03:56:20 +10:00
nouveau_gem_object_delete ( struct nouveau_vma * vma )
2013-05-07 09:48:30 +10:00
{
2018-05-08 20:39:47 +10:00
nouveau_fence_unref ( & vma - > fence ) ;
2017-11-01 03:56:19 +10:00
nouveau_vma_del ( & vma ) ;
2013-05-07 09:48:30 +10:00
}
2017-11-01 03:56:20 +10:00
static void
nouveau_gem_object_delete_work ( struct nouveau_cli_work * w )
{
struct nouveau_gem_object_unmap * work =
container_of ( w , typeof ( * work ) , work ) ;
nouveau_gem_object_delete ( work - > vma ) ;
kfree ( work ) ;
}
2013-05-07 09:48:30 +10:00
static void
2017-11-01 03:56:19 +10:00
nouveau_gem_object_unmap ( struct nouveau_bo * nvbo , struct nouveau_vma * vma )
2013-05-07 09:48:30 +10:00
{
2018-05-08 20:39:47 +10:00
struct dma_fence * fence = vma - > fence ? & vma - > fence - > base : NULL ;
2017-11-01 03:56:20 +10:00
struct nouveau_gem_object_unmap * work ;
2014-04-09 16:19:30 +02:00
2017-11-01 03:56:19 +10:00
list_del_init ( & vma - > head ) ;
2013-05-07 09:48:30 +10:00
2018-05-08 20:39:47 +10:00
if ( ! fence ) {
2017-11-01 03:56:20 +10:00
nouveau_gem_object_delete ( vma ) ;
return ;
}
if ( ! ( work = kmalloc ( sizeof ( * work ) , GFP_KERNEL ) ) ) {
WARN_ON ( dma_fence_wait_timeout ( fence , false , 2 * HZ ) < = 0 ) ;
nouveau_gem_object_delete ( vma ) ;
return ;
}
work - > work . func = nouveau_gem_object_delete_work ;
work - > vma = vma ;
nouveau_cli_work_queue ( vma - > vmm - > cli , fence , & work - > work ) ;
2013-05-07 09:48:30 +10:00
}
2011-06-03 16:18:26 +10:00
void
nouveau_gem_object_close ( struct drm_gem_object * gem , struct drm_file * file_priv )
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2011-06-07 15:25:12 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2014-12-11 10:05:00 +10:00
struct nouveau_drm * drm = nouveau_bdev ( nvbo - > bo . bdev ) ;
struct device * dev = drm - > dev - > dev ;
2019-02-19 17:21:48 +10:00
struct nouveau_vmm * vmm = cli - > svm . cli ? & cli - > svm : & cli - > vmm ;
2017-11-01 03:56:19 +10:00
struct nouveau_vma * vma ;
2011-06-07 15:25:12 +10:00
int ret ;
2011-06-03 16:18:26 +10:00
2019-02-19 17:21:48 +10:00
if ( vmm - > vmm . object . oclass < NVIF_CLASS_VMM_NV50 )
2011-06-03 16:18:26 +10:00
return ;
2011-06-07 15:25:12 +10:00
2016-04-06 11:12:03 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , false , false , NULL ) ;
2011-06-07 15:25:12 +10:00
if ( ret )
return ;
2019-02-19 17:21:48 +10:00
vma = nouveau_vma_find ( nvbo , vmm ) ;
2011-06-07 15:25:12 +10:00
if ( vma ) {
2017-11-01 03:56:19 +10:00
if ( - - vma - > refs = = 0 ) {
2014-12-11 10:05:00 +10:00
ret = pm_runtime_get_sync ( dev ) ;
if ( ! WARN_ON ( ret < 0 & & ret ! = - EACCES ) ) {
nouveau_gem_object_unmap ( nvbo , vma ) ;
pm_runtime_mark_last_busy ( dev ) ;
}
2020-05-20 18:36:04 +08:00
pm_runtime_put_autosuspend ( dev ) ;
2014-12-11 10:05:00 +10:00
}
2011-06-07 15:25:12 +10:00
}
ttm_bo_unreserve ( & nvbo - > bo ) ;
2011-06-03 16:18:26 +10:00
}
2009-12-11 19:24:15 +10:00
int
2016-05-23 12:34:49 +10:00
nouveau_gem_new ( struct nouveau_cli * cli , u64 size , int align , uint32_t domain ,
2011-06-07 12:25:36 +10:00
uint32_t tile_mode , uint32_t tile_flags ,
struct nouveau_bo * * pnvbo )
2009-12-11 19:24:15 +10:00
{
2017-11-01 03:56:19 +10:00
struct nouveau_drm * drm = cli - > drm ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo ;
2011-02-10 14:42:08 +10:00
u32 flags = 0 ;
2009-12-11 19:24:15 +10:00
int ret ;
2011-02-10 14:42:08 +10:00
if ( domain & NOUVEAU_GEM_DOMAIN_VRAM )
flags | = TTM_PL_FLAG_VRAM ;
if ( domain & NOUVEAU_GEM_DOMAIN_GART )
flags | = TTM_PL_FLAG_TT ;
if ( ! flags | | domain & NOUVEAU_GEM_DOMAIN_CPU )
flags | = TTM_PL_FLAG_SYSTEM ;
2015-02-26 12:44:51 +09:00
if ( domain & NOUVEAU_GEM_DOMAIN_COHERENT )
flags | = TTM_PL_FLAG_UNCACHED ;
2019-09-16 16:19:23 +02:00
nvbo = nouveau_bo_alloc ( cli , & size , & align , flags , tile_mode ,
tile_flags ) ;
2019-08-14 11:00:48 +02:00
if ( IS_ERR ( nvbo ) )
return PTR_ERR ( nvbo ) ;
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller , instead of a normal nouveau_bo ttm reference . */
ret = drm_gem_object_init ( drm - > dev , & nvbo - > bo . base , size ) ;
if ( ret ) {
nouveau_bo_ref ( NULL , & nvbo ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2019-08-14 11:00:48 +02:00
}
ret = nouveau_bo_init ( nvbo , size , align , flags , NULL , NULL ) ;
if ( ret ) {
nouveau_bo_ref ( NULL , & nvbo ) ;
return ret ;
}
2009-12-11 19:24:15 +10:00
2011-02-10 13:41:01 +10:00
/* we restrict allowed domains on nv50+ to only the types
* that were requested at creation time . not possibly on
* earlier chips without busting the ABI .
*/
nvbo - > valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART ;
2016-05-18 13:57:42 +10:00
if ( drm - > client . device . info . family > = NV_DEVICE_INFO_V0_TESLA )
2011-02-10 13:41:01 +10:00
nvbo - > valid_domains & = domain ;
2019-08-05 16:01:08 +02:00
nvbo - > bo . persistent_swap_storage = nvbo - > bo . base . filp ;
2019-08-14 11:00:48 +02:00
* pnvbo = nvbo ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
static int
2011-06-07 13:28:11 +10:00
nouveau_gem_info ( struct drm_file * file_priv , struct drm_gem_object * gem ,
struct drm_nouveau_gem_info * rep )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2019-02-19 17:21:48 +10:00
struct nouveau_vmm * vmm = cli - > svm . cli ? & cli - > svm : & cli - > vmm ;
2017-11-01 03:56:19 +10:00
struct nouveau_vma * vma ;
2009-12-11 19:24:15 +10:00
2015-10-20 01:15:39 -04:00
if ( is_power_of_2 ( nvbo - > valid_domains ) )
rep - > domain = nvbo - > valid_domains ;
else if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
2009-12-11 19:24:15 +10:00
rep - > domain = NOUVEAU_GEM_DOMAIN_GART ;
else
rep - > domain = NOUVEAU_GEM_DOMAIN_VRAM ;
2011-06-07 13:28:11 +10:00
rep - > offset = nvbo - > bo . offset ;
2019-02-19 17:21:48 +10:00
if ( vmm - > vmm . object . oclass > = NVIF_CLASS_VMM_NV50 ) {
vma = nouveau_vma_find ( nvbo , vmm ) ;
2011-06-07 13:28:11 +10:00
if ( ! vma )
return - EINVAL ;
2017-11-01 03:56:19 +10:00
rep - > offset = vma - > addr ;
2011-06-07 13:28:11 +10:00
}
2009-12-11 19:24:15 +10:00
rep - > size = nvbo - > bo . mem . num_pages < < PAGE_SHIFT ;
2019-08-05 16:01:10 +02:00
rep - > map_handle = drm_vma_node_offset_addr ( & nvbo - > bo . base . vma_node ) ;
2017-11-01 03:56:19 +10:00
rep - > tile_mode = nvbo - > mode ;
rep - > tile_flags = nvbo - > contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG ;
if ( cli - > device . info . family > = NV_DEVICE_INFO_V0_FERMI )
rep - > tile_flags | = nvbo - > kind < < 8 ;
else
if ( cli - > device . info . family > = NV_DEVICE_INFO_V0_TESLA )
rep - > tile_flags | = nvbo - > kind < < 8 | nvbo - > comp < < 16 ;
else
rep - > tile_flags | = nvbo - > zeta ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
int
nouveau_gem_ioctl_new ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_new * req = data ;
struct nouveau_bo * nvbo = NULL ;
int ret = 0 ;
2016-05-24 17:29:55 +10:00
ret = nouveau_gem_new ( cli , req - > info . size , req - > align ,
2011-02-10 14:42:08 +10:00
req - > info . domain , req - > info . tile_mode ,
req - > info . tile_flags , & nvbo ) ;
2009-12-11 19:24:15 +10:00
if ( ret )
return ret ;
2019-08-05 16:01:08 +02:00
ret = drm_gem_handle_create ( file_priv , & nvbo - > bo . base ,
& req - > info . handle ) ;
2011-06-07 13:28:11 +10:00
if ( ret = = 0 ) {
2019-08-05 16:01:08 +02:00
ret = nouveau_gem_info ( file_priv , & nvbo - > bo . base , & req - > info ) ;
2011-06-07 13:28:11 +10:00
if ( ret )
drm_gem_handle_delete ( file_priv , req - > info . handle ) ;
}
2010-09-27 16:17:17 +10:00
/* drop reference from allocate - handle holds it now */
2019-08-05 16:01:08 +02:00
drm_gem_object_put_unlocked ( & nvbo - > bo . base ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
static int
nouveau_gem_set_domain ( struct drm_gem_object * gem , uint32_t read_domains ,
uint32_t write_domains , uint32_t valid_domains )
{
2013-10-02 10:15:17 +02:00
struct nouveau_bo * nvbo = nouveau_gem_object ( gem ) ;
2009-12-11 19:24:15 +10:00
struct ttm_buffer_object * bo = & nvbo - > bo ;
2011-02-10 13:41:01 +10:00
uint32_t domains = valid_domains & nvbo - > valid_domains &
2010-03-18 13:07:47 +01:00
( write_domains ? write_domains : read_domains ) ;
uint32_t pref_flags = 0 , valid_flags = 0 ;
2009-12-11 19:24:15 +10:00
2010-03-18 13:07:47 +01:00
if ( ! domains )
2009-12-11 19:24:15 +10:00
return - EINVAL ;
2010-03-18 13:07:47 +01:00
if ( valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
valid_flags | = TTM_PL_FLAG_VRAM ;
if ( valid_domains & NOUVEAU_GEM_DOMAIN_GART )
valid_flags | = TTM_PL_FLAG_TT ;
if ( ( domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
bo - > mem . mem_type = = TTM_PL_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else if ( ( domains & NOUVEAU_GEM_DOMAIN_GART ) & &
bo - > mem . mem_type = = TTM_PL_TT )
pref_flags | = TTM_PL_FLAG_TT ;
else if ( domains & NOUVEAU_GEM_DOMAIN_VRAM )
pref_flags | = TTM_PL_FLAG_VRAM ;
else
pref_flags | = TTM_PL_FLAG_TT ;
nouveau_bo_placement_set ( nvbo , pref_flags , valid_flags ) ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
struct validate_op {
2014-04-09 16:18:58 +02:00
struct list_head list ;
2013-06-27 13:48:17 +02:00
struct ww_acquire_ctx ticket ;
2009-12-11 19:24:15 +10:00
} ;
static void
2019-02-19 17:21:48 +10:00
validate_fini_no_ticket ( struct validate_op * op , struct nouveau_channel * chan ,
struct nouveau_fence * fence ,
2014-04-09 16:19:30 +02:00
struct drm_nouveau_gem_pushbuf_bo * pbbo )
2009-12-11 19:24:15 +10:00
{
struct nouveau_bo * nvbo ;
2014-04-09 16:19:30 +02:00
struct drm_nouveau_gem_pushbuf_bo * b ;
2009-12-11 19:24:15 +10:00
2014-04-09 16:18:58 +02:00
while ( ! list_empty ( & op - > list ) ) {
nvbo = list_entry ( op - > list . next , struct nouveau_bo , entry ) ;
2014-04-09 16:19:30 +02:00
b = & pbbo [ nvbo - > pbbo_index ] ;
2010-10-20 23:35:40 +02:00
2018-05-08 20:39:47 +10:00
if ( likely ( fence ) ) {
2014-04-09 16:19:30 +02:00
nouveau_bo_fence ( nvbo , fence , ! ! b - > write_domains ) ;
2009-12-11 19:24:15 +10:00
2019-02-19 17:21:48 +10:00
if ( chan - > vmm - > vmm . object . oclass > = NVIF_CLASS_VMM_NV50 ) {
struct nouveau_vma * vma =
( void * ) ( unsigned long ) b - > user_priv ;
2018-05-08 20:39:47 +10:00
nouveau_fence_unref ( & vma - > fence ) ;
dma_fence_get ( & fence - > base ) ;
vma - > fence = fence ;
}
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( nvbo - > validate_mapped ) ) {
ttm_bo_kunmap ( & nvbo - > kmap ) ;
nvbo - > validate_mapped = false ;
}
2009-12-11 19:24:15 +10:00
list_del ( & nvbo - > entry ) ;
nvbo - > reserved_by = NULL ;
2017-11-08 21:06:03 +01:00
ttm_bo_unreserve ( & nvbo - > bo ) ;
2019-08-05 16:01:08 +02:00
drm_gem_object_put_unlocked ( & nvbo - > bo . base ) ;
2009-12-11 19:24:15 +10:00
}
}
2013-06-27 13:48:17 +02:00
static void
2019-02-19 17:21:48 +10:00
validate_fini ( struct validate_op * op , struct nouveau_channel * chan ,
struct nouveau_fence * fence ,
2014-04-09 16:19:30 +02:00
struct drm_nouveau_gem_pushbuf_bo * pbbo )
2013-06-27 13:48:17 +02:00
{
2019-02-19 17:21:48 +10:00
validate_fini_no_ticket ( op , chan , fence , pbbo ) ;
2013-06-27 13:48:17 +02:00
ww_acquire_fini ( & op - > ticket ) ;
2009-12-11 19:24:15 +10:00
}
static int
validate_init ( struct nouveau_channel * chan , struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
int nr_buffers , struct validate_op * op )
{
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2009-12-11 19:24:15 +10:00
int trycnt = 0 ;
2016-10-24 17:30:38 +02:00
int ret = - EINVAL , i ;
2013-01-15 14:57:20 +01:00
struct nouveau_bo * res_bo = NULL ;
2014-04-09 16:18:58 +02:00
LIST_HEAD ( gart_list ) ;
LIST_HEAD ( vram_list ) ;
LIST_HEAD ( both_list ) ;
2009-12-11 19:24:15 +10:00
2013-06-27 13:48:17 +02:00
ww_acquire_init ( & op - > ticket , & reservation_ww_class ) ;
2009-12-11 19:24:15 +10:00
retry :
if ( + + trycnt > 100000 ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " %s failed and gave up. \n " , __func__ ) ;
2009-12-11 19:24:15 +10:00
return - EINVAL ;
}
for ( i = 0 ; i < nr_buffers ; i + + ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ i ] ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file_priv , b - > handle ) ;
2009-12-11 19:24:15 +10:00
if ( ! gem ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " Unknown handle 0x%08x \n " , b - > handle ) ;
2014-04-09 16:18:58 +02:00
ret = - ENOENT ;
break ;
2009-12-11 19:24:15 +10:00
}
2013-10-02 10:15:17 +02:00
nvbo = nouveau_gem_object ( gem ) ;
2013-01-15 14:57:20 +01:00
if ( nvbo = = res_bo ) {
res_bo = NULL ;
2018-06-18 14:53:11 +02:00
drm_gem_object_put_unlocked ( gem ) ;
2013-01-15 14:57:20 +01:00
continue ;
}
2009-12-11 19:24:15 +10:00
if ( nvbo - > reserved_by & & nvbo - > reserved_by = = file_priv ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " multiple instances of buffer %d on "
2009-12-11 19:24:15 +10:00
" validation list \n " , b - > handle ) ;
2018-06-18 14:53:11 +02:00
drm_gem_object_put_unlocked ( gem ) ;
2014-04-09 16:18:58 +02:00
ret = - EINVAL ;
break ;
2009-12-11 19:24:15 +10:00
}
2016-04-06 11:12:03 +02:00
ret = ttm_bo_reserve ( & nvbo - > bo , true , false , & op - > ticket ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2014-04-09 16:18:58 +02:00
list_splice_tail_init ( & vram_list , & op - > list ) ;
list_splice_tail_init ( & gart_list , & op - > list ) ;
list_splice_tail_init ( & both_list , & op - > list ) ;
2019-02-19 17:21:48 +10:00
validate_fini_no_ticket ( op , chan , NULL , NULL ) ;
2013-06-27 13:48:19 +02:00
if ( unlikely ( ret = = - EDEADLK ) ) {
2013-01-15 14:57:20 +01:00
ret = ttm_bo_reserve_slowpath ( & nvbo - > bo , true ,
2013-06-27 13:48:17 +02:00
& op - > ticket ) ;
2013-01-15 14:57:20 +01:00
if ( ! ret )
res_bo = nvbo ;
}
2010-10-12 09:54:54 +10:00
if ( unlikely ( ret ) ) {
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail reserve \n " ) ;
2014-04-09 16:18:58 +02:00
break ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
}
2019-02-19 17:21:48 +10:00
if ( chan - > vmm - > vmm . object . oclass > = NVIF_CLASS_VMM_NV50 ) {
struct nouveau_vmm * vmm = chan - > vmm ;
2018-05-08 20:39:47 +10:00
struct nouveau_vma * vma = nouveau_vma_find ( nvbo , vmm ) ;
if ( ! vma ) {
NV_PRINTK ( err , cli , " vma not found! \n " ) ;
ret = - EINVAL ;
break ;
}
b - > user_priv = ( uint64_t ) ( unsigned long ) vma ;
} else {
b - > user_priv = ( uint64_t ) ( unsigned long ) nvbo ;
}
2009-12-11 19:24:15 +10:00
nvbo - > reserved_by = file_priv ;
nvbo - > pbbo_index = i ;
if ( ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM ) & &
( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART ) )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & both_list ) ;
2009-12-11 19:24:15 +10:00
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_VRAM )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & vram_list ) ;
2009-12-11 19:24:15 +10:00
else
if ( b - > valid_domains & NOUVEAU_GEM_DOMAIN_GART )
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & gart_list ) ;
2009-12-11 19:24:15 +10:00
else {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " invalid valid domains: 0x%08x \n " ,
2009-12-11 19:24:15 +10:00
b - > valid_domains ) ;
2014-04-09 16:18:58 +02:00
list_add_tail ( & nvbo - > entry , & both_list ) ;
ret = - EINVAL ;
break ;
2009-12-11 19:24:15 +10:00
}
2013-01-15 14:57:20 +01:00
if ( nvbo = = res_bo )
goto retry ;
2009-12-11 19:24:15 +10:00
}
2013-06-27 13:48:17 +02:00
ww_acquire_done ( & op - > ticket ) ;
2014-04-09 16:18:58 +02:00
list_splice_tail ( & vram_list , & op - > list ) ;
list_splice_tail ( & gart_list , & op - > list ) ;
list_splice_tail ( & both_list , & op - > list ) ;
if ( ret )
2019-02-19 17:21:48 +10:00
validate_fini ( op , chan , NULL , NULL ) ;
2014-04-09 16:18:58 +02:00
return ret ;
2009-12-11 19:24:15 +10:00
}
static int
2013-02-03 22:02:47 +01:00
validate_list ( struct nouveau_channel * chan , struct nouveau_cli * cli ,
2019-11-04 18:38:00 +01:00
struct list_head * list , struct drm_nouveau_gem_pushbuf_bo * pbbo )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = chan - > drm ;
2009-12-11 19:24:15 +10:00
struct nouveau_bo * nvbo ;
int ret , relocs = 0 ;
list_for_each_entry ( nvbo , list , entry ) {
struct drm_nouveau_gem_pushbuf_bo * b = & pbbo [ nvbo - > pbbo_index ] ;
2019-08-05 16:01:08 +02:00
ret = nouveau_gem_set_domain ( & nvbo - > bo . base , b - > read_domains ,
2009-12-11 19:24:15 +10:00
b - > write_domains ,
b - > valid_domains ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail set_domain \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2012-11-28 11:25:44 +00:00
ret = nouveau_bo_validate ( nvbo , true , false ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail ttm_validate \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2014-09-16 11:15:07 +02:00
ret = nouveau_fence_sync ( nvbo , chan , ! ! b - > write_domains , true ) ;
2010-07-23 09:06:52 +10:00
if ( unlikely ( ret ) ) {
2014-01-09 11:03:11 +01:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " fail post-validate sync \n " ) ;
2010-07-23 09:06:52 +10:00
return ret ;
}
2016-05-18 13:57:42 +10:00
if ( drm - > client . device . info . family < NV_DEVICE_INFO_V0_TESLA ) {
2011-06-07 11:12:39 +10:00
if ( nvbo - > bo . offset = = b - > presumed . offset & &
( ( nvbo - > bo . mem . mem_type = = TTM_PL_VRAM & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_VRAM ) | |
( nvbo - > bo . mem . mem_type = = TTM_PL_TT & &
b - > presumed . domain & NOUVEAU_GEM_DOMAIN_GART ) ) )
continue ;
if ( nvbo - > bo . mem . mem_type = = TTM_PL_TT )
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_GART ;
else
b - > presumed . domain = NOUVEAU_GEM_DOMAIN_VRAM ;
b - > presumed . offset = nvbo - > bo . offset ;
b - > presumed . valid = 0 ;
relocs + + ;
}
2009-12-11 19:24:15 +10:00
}
return relocs ;
}
static int
nouveau_gem_pushbuf_validate ( struct nouveau_channel * chan ,
struct drm_file * file_priv ,
struct drm_nouveau_gem_pushbuf_bo * pbbo ,
2019-11-04 18:38:00 +01:00
int nr_buffers ,
struct validate_op * op , bool * apply_relocs )
2009-12-11 19:24:15 +10:00
{
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2014-04-09 16:18:58 +02:00
int ret ;
2009-12-11 19:24:15 +10:00
2014-04-09 16:18:58 +02:00
INIT_LIST_HEAD ( & op - > list ) ;
2009-12-11 19:24:15 +10:00
if ( nr_buffers = = 0 )
return 0 ;
ret = validate_init ( chan , file_priv , pbbo , nr_buffers , op ) ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( ret ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validate_init \n " ) ;
2009-12-11 19:24:15 +10:00
return ret ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2019-11-04 18:38:00 +01:00
ret = validate_list ( chan , cli , & op - > list , pbbo ) ;
2009-12-11 19:24:15 +10:00
if ( unlikely ( ret < 0 ) ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validating bo list \n " ) ;
2019-02-19 17:21:48 +10:00
validate_fini ( op , chan , NULL , NULL ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
2014-04-09 16:18:58 +02:00
* apply_relocs = ret ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
2013-09-02 16:31:31 +02:00
static inline void
u_free ( void * addr )
{
2015-06-30 14:59:18 -07:00
kvfree ( addr ) ;
2013-09-02 16:31:31 +02:00
}
2009-12-11 19:24:15 +10:00
static inline void *
u_memcpya ( uint64_t user , unsigned nmemb , unsigned size )
{
void * mem ;
void __user * userptr = ( void __force __user * ) ( uintptr_t ) user ;
2013-09-02 16:31:31 +02:00
size * = nmemb ;
2017-05-08 15:57:27 -07:00
mem = kvmalloc ( size , GFP_KERNEL ) ;
2009-12-11 19:24:15 +10:00
if ( ! mem )
return ERR_PTR ( - ENOMEM ) ;
2013-12-11 11:34:44 +01:00
if ( copy_from_user ( mem , userptr , size ) ) {
2013-09-02 16:31:31 +02:00
u_free ( mem ) ;
2009-12-11 19:24:15 +10:00
return ERR_PTR ( - EFAULT ) ;
}
return mem ;
}
static int
2013-02-03 22:02:47 +01:00
nouveau_gem_pushbuf_reloc_apply ( struct nouveau_cli * cli ,
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf * req ,
2019-11-04 18:38:00 +01:00
struct drm_nouveau_gem_pushbuf_reloc * reloc ,
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf_bo * bo )
2009-12-11 19:24:15 +10:00
{
2010-01-10 20:10:53 +01:00
int ret = 0 ;
unsigned i ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_relocs ; i + + ) {
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf_reloc * r = & reloc [ i ] ;
struct drm_nouveau_gem_pushbuf_bo * b ;
2010-02-12 10:27:35 +10:00
struct nouveau_bo * nvbo ;
2009-12-11 19:24:15 +10:00
uint32_t data ;
2018-07-03 15:30:56 +03:00
if ( unlikely ( r - > bo_index > = req - > nr_buffers ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc bo index invalid \n " ) ;
2009-12-11 19:24:15 +10:00
ret = - EINVAL ;
break ;
}
b = & bo [ r - > bo_index ] ;
2010-02-12 10:27:35 +10:00
if ( b - > presumed . valid )
2009-12-11 19:24:15 +10:00
continue ;
2018-07-03 15:30:56 +03:00
if ( unlikely ( r - > reloc_bo_index > = req - > nr_buffers ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc container bo index invalid \n " ) ;
2010-02-12 10:27:35 +10:00
ret = - EINVAL ;
break ;
}
nvbo = ( void * ) ( unsigned long ) bo [ r - > reloc_bo_index ] . user_priv ;
if ( unlikely ( r - > reloc_bo_offset + 4 >
nvbo - > bo . mem . num_pages < < PAGE_SHIFT ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc outside of bo \n " ) ;
2010-02-12 10:27:35 +10:00
ret = - EINVAL ;
break ;
}
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 , nvbo - > bo . mem . num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " failed kmap for reloc \n " ) ;
2010-02-12 10:27:35 +10:00
break ;
}
nvbo - > validate_mapped = true ;
}
2009-12-11 19:24:15 +10:00
if ( r - > flags & NOUVEAU_GEM_RELOC_LOW )
2010-02-12 10:27:35 +10:00
data = b - > presumed . offset + r - > data ;
2009-12-11 19:24:15 +10:00
else
if ( r - > flags & NOUVEAU_GEM_RELOC_HIGH )
2010-02-12 10:27:35 +10:00
data = ( b - > presumed . offset + r - > data ) > > 32 ;
2009-12-11 19:24:15 +10:00
else
data = r - > data ;
if ( r - > flags & NOUVEAU_GEM_RELOC_OR ) {
2010-02-12 10:27:35 +10:00
if ( b - > presumed . domain = = NOUVEAU_GEM_DOMAIN_GART )
2009-12-11 19:24:15 +10:00
data | = r - > tor ;
else
data | = r - > vor ;
}
2016-04-06 11:12:04 +02:00
ret = ttm_bo_wait ( & nvbo - > bo , false , false ) ;
2010-02-12 10:27:35 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc wait_idle failed: %d \n " , ret ) ;
2010-02-12 10:27:35 +10:00
break ;
}
nouveau_bo_wr32 ( nvbo , r - > reloc_bo_offset > > 2 , data ) ;
2009-12-11 19:24:15 +10:00
}
2013-09-02 16:31:31 +02:00
u_free ( reloc ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}
int
nouveau_gem_ioctl_pushbuf ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2015-11-03 10:17:49 +10:00
struct nouveau_abi16 * abi16 = nouveau_abi16_get ( file_priv ) ;
2013-02-03 22:02:47 +01:00
struct nouveau_cli * cli = nouveau_cli ( file_priv ) ;
2012-07-20 08:17:34 +10:00
struct nouveau_abi16_chan * temp ;
struct nouveau_drm * drm = nouveau_drm ( dev ) ;
2009-12-11 19:24:15 +10:00
struct drm_nouveau_gem_pushbuf * req = data ;
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf_push * push ;
2019-11-04 18:38:00 +01:00
struct drm_nouveau_gem_pushbuf_reloc * reloc = NULL ;
2010-02-12 10:27:35 +10:00
struct drm_nouveau_gem_pushbuf_bo * bo ;
2012-07-20 08:17:34 +10:00
struct nouveau_channel * chan = NULL ;
2009-12-11 19:24:15 +10:00
struct validate_op op ;
2010-07-03 18:36:39 +02:00
struct nouveau_fence * fence = NULL ;
2019-11-04 18:38:00 +01:00
int i , j , ret = 0 ;
2020-01-23 15:52:12 +10:00
bool do_reloc = false , sync = false ;
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
if ( unlikely ( ! abi16 ) )
return - ENOMEM ;
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
list_for_each_entry ( temp , & abi16 - > channels , head ) {
2015-09-04 14:40:32 +10:00
if ( temp - > chan - > chid = = req - > channel ) {
2012-07-20 08:17:34 +10:00
chan = temp - > chan ;
break ;
}
}
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
if ( ! chan )
return nouveau_abi16_put ( abi16 , - ENOENT ) ;
2020-01-23 13:47:12 +10:00
if ( unlikely ( atomic_read ( & chan - > killed ) ) )
return nouveau_abi16_put ( abi16 , - ENODEV ) ;
2012-07-20 08:17:34 +10:00
2020-01-23 15:52:12 +10:00
sync = req - > vram_available & NOUVEAU_GEM_PUSHBUF_SYNC ;
2012-07-20 08:17:34 +10:00
req - > vram_available = drm - > gem . vram_available ;
req - > gart_available = drm - > gem . gart_available ;
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_push = = 0 ) )
goto out_next ;
2009-12-11 19:24:15 +10:00
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_push > NOUVEAU_GEM_MAX_PUSH ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf push count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_push , NOUVEAU_GEM_MAX_PUSH ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf bo count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_buffers , NOUVEAU_GEM_MAX_BUFFERS ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
if ( unlikely ( req - > nr_relocs > NOUVEAU_GEM_MAX_RELOCS ) ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " pushbuf reloc count exceeds limit: %d max %d \n " ,
2010-02-12 10:27:35 +10:00
req - > nr_relocs , NOUVEAU_GEM_MAX_RELOCS ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , - EINVAL ) ;
2009-12-11 19:24:15 +10:00
}
2010-02-12 10:27:35 +10:00
push = u_memcpya ( req - > push , req - > nr_push , sizeof ( * push ) ) ;
2012-07-20 08:17:34 +10:00
if ( IS_ERR ( push ) )
return nouveau_abi16_put ( abi16 , PTR_ERR ( push ) ) ;
2010-02-12 10:27:35 +10:00
2009-12-11 19:24:15 +10:00
bo = u_memcpya ( req - > buffers , req - > nr_buffers , sizeof ( * bo ) ) ;
2010-02-12 10:27:35 +10:00
if ( IS_ERR ( bo ) ) {
2013-09-02 16:31:31 +02:00
u_free ( push ) ;
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , PTR_ERR ( bo ) ) ;
2010-02-12 10:27:35 +10:00
}
2009-12-11 19:24:15 +10:00
2012-03-16 12:40:17 +10:00
/* Ensure all push buffers are on validate list */
2010-07-23 09:06:52 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
if ( push [ i ] . bo_index > = req - > nr_buffers ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " push %d buffer not in list \n " , i ) ;
2010-07-23 09:06:52 +10:00
ret = - EINVAL ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2010-07-23 09:06:52 +10:00
}
}
2009-12-11 19:24:15 +10:00
/* Validate buffer list */
2019-11-04 18:38:00 +01:00
revalidate :
ret = nouveau_gem_pushbuf_validate ( chan , file_priv , bo ,
2009-12-11 19:24:15 +10:00
req - > nr_buffers , & op , & do_reloc ) ;
if ( ret ) {
2010-10-12 09:54:54 +10:00
if ( ret ! = - ERESTARTSYS )
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " validate: %d \n " , ret ) ;
2011-03-07 12:31:35 +01:00
goto out_prevalid ;
2009-12-11 19:24:15 +10:00
}
/* Apply any relocations that are required */
if ( do_reloc ) {
2019-11-04 18:38:00 +01:00
if ( ! reloc ) {
validate_fini ( & op , chan , NULL , bo ) ;
reloc = u_memcpya ( req - > relocs , req - > nr_relocs , sizeof ( * reloc ) ) ;
if ( IS_ERR ( reloc ) ) {
ret = PTR_ERR ( reloc ) ;
goto out_prevalid ;
}
goto revalidate ;
}
ret = nouveau_gem_pushbuf_reloc_apply ( cli , req , reloc , bo ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " reloc apply: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
}
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
2012-04-30 13:55:29 +10:00
ret = nouveau_dma_wait ( chan , req - > nr_push + 1 , 16 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " nv50cal_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
2018-05-08 20:39:47 +10:00
struct nouveau_vma * vma = ( void * ) ( unsigned long )
2010-02-12 10:27:35 +10:00
bo [ push [ i ] . bo_index ] . user_priv ;
2018-05-08 20:39:47 +10:00
nv50_dma_push ( chan , vma - > addr + push [ i ] . offset ,
2010-02-12 10:27:35 +10:00
push [ i ] . length ) ;
}
2010-02-11 16:37:26 +10:00
} else
2016-05-18 13:57:42 +10:00
if ( drm - > client . device . info . chipset > = 0x25 ) {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * 2 ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " cal_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
2012-08-04 05:46:01 +10:00
OUT_RING ( chan , ( nvbo - > bo . offset + push [ i ] . offset ) | 2 ) ;
2010-02-12 10:27:35 +10:00
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
} else {
2010-02-12 10:27:35 +10:00
ret = RING_SPACE ( chan , req - > nr_push * ( 2 + NOUVEAU_DMA_SKIPS ) ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " jmp_space: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
goto out ;
}
2010-02-12 10:27:35 +10:00
for ( i = 0 ; i < req - > nr_push ; i + + ) {
struct nouveau_bo * nvbo = ( void * ) ( unsigned long )
bo [ push [ i ] . bo_index ] . user_priv ;
uint32_t cmd ;
2017-11-01 03:56:19 +10:00
cmd = chan - > push . addr + ( ( chan - > dma . cur + 2 ) < < 2 ) ;
2010-02-12 10:27:35 +10:00
cmd | = 0x20000000 ;
if ( unlikely ( cmd ! = req - > suffix0 ) ) {
if ( ! nvbo - > kmap . virtual ) {
ret = ttm_bo_kmap ( & nvbo - > bo , 0 ,
nvbo - > bo . mem .
num_pages ,
& nvbo - > kmap ) ;
if ( ret ) {
WIND_RING ( chan ) ;
goto out ;
}
nvbo - > validate_mapped = true ;
}
nouveau_bo_wr32 ( nvbo , ( push [ i ] . offset +
push [ i ] . length - 8 ) / 4 , cmd ) ;
}
2012-08-04 05:46:01 +10:00
OUT_RING ( chan , 0x20000000 |
( nvbo - > bo . offset + push [ i ] . offset ) ) ;
2009-12-11 19:24:15 +10:00
OUT_RING ( chan , 0 ) ;
2010-02-12 10:27:35 +10:00
for ( j = 0 ; j < NOUVEAU_DMA_SKIPS ; j + + )
OUT_RING ( chan , 0 ) ;
}
2009-12-11 19:24:15 +10:00
}
2013-02-14 13:43:21 +10:00
ret = nouveau_fence_new ( chan , false , & fence ) ;
2009-12-11 19:24:15 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " error fencing pushbuf: %d \n " , ret ) ;
2009-12-11 19:24:15 +10:00
WIND_RING ( chan ) ;
goto out ;
}
2020-01-23 15:52:12 +10:00
if ( sync ) {
if ( ! ( ret = nouveau_fence_wait ( fence , false , false ) ) ) {
if ( ( ret = dma_fence_get_status ( & fence - > base ) ) = = 1 )
ret = 0 ;
}
}
2009-12-11 19:24:15 +10:00
out :
2019-02-19 17:21:48 +10:00
validate_fini ( & op , chan , fence , bo ) ;
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & fence ) ;
2011-03-07 12:31:35 +01:00
2019-11-04 18:38:00 +01:00
if ( do_reloc ) {
struct drm_nouveau_gem_pushbuf_bo __user * upbbo =
u64_to_user_ptr ( req - > buffers ) ;
for ( i = 0 ; i < req - > nr_buffers ; i + + ) {
if ( bo [ i ] . presumed . valid )
continue ;
if ( copy_to_user ( & upbbo [ i ] . presumed , & bo [ i ] . presumed ,
sizeof ( bo [ i ] . presumed ) ) ) {
ret = - EFAULT ;
break ;
}
}
u_free ( reloc ) ;
}
2011-03-07 12:31:35 +01:00
out_prevalid :
2013-09-02 16:31:31 +02:00
u_free ( bo ) ;
u_free ( push ) ;
2009-12-11 19:24:15 +10:00
out_next :
2010-02-11 16:37:26 +10:00
if ( chan - > dma . ib_max ) {
req - > suffix0 = 0x00000000 ;
req - > suffix1 = 0x00000000 ;
} else
2016-05-18 13:57:42 +10:00
if ( drm - > client . device . info . chipset > = 0x25 ) {
2009-12-11 19:24:15 +10:00
req - > suffix0 = 0x00020000 ;
req - > suffix1 = 0x00000000 ;
} else {
req - > suffix0 = 0x20000000 |
2017-11-01 03:56:19 +10:00
( chan - > push . addr + ( ( chan - > dma . cur + 2 ) < < 2 ) ) ;
2009-12-11 19:24:15 +10:00
req - > suffix1 = 0x00000000 ;
}
2012-07-20 08:17:34 +10:00
return nouveau_abi16_put ( abi16 , ret ) ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_gem_ioctl_cpu_prep ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_cpu_prep * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
bool no_wait = ! ! ( req - > flags & NOUVEAU_GEM_CPU_PREP_NOWAIT ) ;
2014-05-14 15:38:23 +02:00
bool write = ! ! ( req - > flags & NOUVEAU_GEM_CPU_PREP_WRITE ) ;
2016-08-29 08:08:27 +01:00
long lret ;
2014-01-21 13:00:24 +01:00
int ret ;
2009-12-11 19:24:15 +10:00
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file_priv , req - > handle ) ;
2009-12-11 19:24:15 +10:00
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
nvbo = nouveau_gem_object ( gem ) ;
2019-08-11 10:06:32 +02:00
lret = dma_resv_wait_timeout_rcu ( nvbo - > bo . base . resv , write , true ,
2016-08-29 08:08:27 +01:00
no_wait ? 0 : 30 * HZ ) ;
if ( ! lret )
ret = - EBUSY ;
else if ( lret > 0 )
ret = 0 ;
else
ret = lret ;
2014-01-21 13:00:24 +01:00
2014-10-27 18:49:19 +09:00
nouveau_bo_sync_for_cpu ( nvbo ) ;
2018-06-18 14:53:11 +02:00
drm_gem_object_put_unlocked ( gem ) ;
2014-01-21 13:00:24 +01:00
2009-12-11 19:24:15 +10:00
return ret ;
}
int
nouveau_gem_ioctl_cpu_fini ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
2014-10-27 18:49:19 +09:00
struct drm_nouveau_gem_cpu_fini * req = data ;
struct drm_gem_object * gem ;
struct nouveau_bo * nvbo ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file_priv , req - > handle ) ;
2014-10-27 18:49:19 +09:00
if ( ! gem )
return - ENOENT ;
nvbo = nouveau_gem_object ( gem ) ;
nouveau_bo_sync_for_device ( nvbo ) ;
2018-06-18 14:53:11 +02:00
drm_gem_object_put_unlocked ( gem ) ;
2010-10-11 11:48:45 +10:00
return 0 ;
2009-12-11 19:24:15 +10:00
}
int
nouveau_gem_ioctl_info ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_nouveau_gem_info * req = data ;
struct drm_gem_object * gem ;
int ret ;
2016-05-09 11:04:54 +01:00
gem = drm_gem_object_lookup ( file_priv , req - > handle ) ;
2009-12-11 19:24:15 +10:00
if ( ! gem )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2009-12-11 19:24:15 +10:00
2011-06-07 13:28:11 +10:00
ret = nouveau_gem_info ( file_priv , gem , req ) ;
2018-06-18 14:53:11 +02:00
drm_gem_object_put_unlocked ( gem ) ;
2009-12-11 19:24:15 +10:00
return ret ;
}