2015-04-20 16:55:21 -04:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
# include <linux/ktime.h>
2016-03-17 15:30:49 +11:00
# include <linux/pagemap.h>
2015-04-20 16:55:21 -04:00
# include <drm/drmP.h>
# include <drm/amdgpu_drm.h>
# include "amdgpu.h"
void amdgpu_gem_object_free ( struct drm_gem_object * gobj )
{
struct amdgpu_bo * robj = gem_to_amdgpu_bo ( gobj ) ;
if ( robj ) {
if ( robj - > gem_base . import_attach )
drm_prime_gem_destroy ( & robj - > gem_base , robj - > tbo . sg ) ;
2015-06-03 21:31:20 +02:00
amdgpu_mn_unregister ( robj ) ;
2015-04-20 16:55:21 -04:00
amdgpu_bo_unref ( & robj ) ;
}
}
int amdgpu_gem_object_create ( struct amdgpu_device * adev , unsigned long size ,
int alignment , u32 initial_domain ,
u64 flags , bool kernel ,
struct drm_gem_object * * obj )
{
struct amdgpu_bo * robj ;
unsigned long max_size ;
int r ;
* obj = NULL ;
/* At least align on page size */
if ( alignment < PAGE_SIZE ) {
alignment = PAGE_SIZE ;
}
if ( ! ( initial_domain & ( AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA ) ) ) {
/* Maximum bo size is the unpinned gtt size since we use the gtt to
* handle vram to system pool migrations .
*/
max_size = adev - > mc . gtt_size - adev - > gart_pin_size ;
if ( size > max_size ) {
DRM_DEBUG ( " Allocation size %ldMb bigger than %ldMb limit \n " ,
size > > 20 , max_size > > 20 ) ;
return - ENOMEM ;
}
}
retry :
2015-09-03 17:34:59 +02:00
r = amdgpu_bo_create ( adev , size , alignment , kernel , initial_domain ,
flags , NULL , NULL , & robj ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
if ( r ! = - ERESTARTSYS ) {
if ( initial_domain = = AMDGPU_GEM_DOMAIN_VRAM ) {
initial_domain | = AMDGPU_GEM_DOMAIN_GTT ;
goto retry ;
}
DRM_ERROR ( " Failed to allocate GEM object (%ld, %d, %u, %d) \n " ,
size , initial_domain , alignment , r ) ;
}
return r ;
}
* obj = & robj - > gem_base ;
return 0 ;
}
2016-02-15 16:59:57 +01:00
void amdgpu_gem_force_release ( struct amdgpu_device * adev )
2015-04-20 16:55:21 -04:00
{
2016-02-15 16:59:57 +01:00
struct drm_device * ddev = adev - > ddev ;
struct drm_file * file ;
2015-04-20 16:55:21 -04:00
2016-04-26 19:29:41 +02:00
mutex_lock ( & ddev - > filelist_mutex ) ;
2016-02-15 16:59:57 +01:00
list_for_each_entry ( file , & ddev - > filelist , lhead ) {
struct drm_gem_object * gobj ;
int handle ;
WARN_ONCE ( 1 , " Still active user space clients! \n " ) ;
spin_lock ( & file - > table_lock ) ;
idr_for_each_entry ( & file - > object_idr , gobj , handle ) {
WARN_ONCE ( 1 , " And also active allocations! \n " ) ;
2016-04-26 19:29:41 +02:00
drm_gem_object_unreference_unlocked ( gobj ) ;
2016-02-15 16:59:57 +01:00
}
idr_destroy ( & file - > object_idr ) ;
spin_unlock ( & file - > table_lock ) ;
}
2016-04-26 19:29:41 +02:00
mutex_unlock ( & ddev - > filelist_mutex ) ;
2015-04-20 16:55:21 -04:00
}
/*
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case .
*/
2016-09-15 14:58:48 +02:00
int amdgpu_gem_object_open ( struct drm_gem_object * obj ,
struct drm_file * file_priv )
2015-04-20 16:55:21 -04:00
{
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo = gem_to_amdgpu_bo ( obj ) ;
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( abo - > tbo . bdev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
struct amdgpu_vm * vm = & fpriv - > vm ;
struct amdgpu_bo_va * bo_va ;
int r ;
2016-09-15 15:06:50 +02:00
r = amdgpu_bo_reserve ( abo , false ) ;
2015-11-13 15:22:04 +08:00
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2016-09-15 15:06:50 +02:00
bo_va = amdgpu_vm_bo_find ( vm , abo ) ;
2015-04-20 16:55:21 -04:00
if ( ! bo_va ) {
2016-09-15 15:06:50 +02:00
bo_va = amdgpu_vm_bo_add ( adev , vm , abo ) ;
2015-04-20 16:55:21 -04:00
} else {
+ + bo_va - > ref_count ;
}
2016-09-15 15:06:50 +02:00
amdgpu_bo_unreserve ( abo ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
void amdgpu_gem_object_close ( struct drm_gem_object * obj ,
struct drm_file * file_priv )
{
2016-03-08 17:47:46 +01:00
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
struct amdgpu_vm * vm = & fpriv - > vm ;
2016-03-08 17:47:46 +01:00
struct amdgpu_bo_list_entry vm_pd ;
struct list_head list , duplicates ;
struct ttm_validate_buffer tv ;
struct ww_acquire_ctx ticket ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo_va * bo_va ;
int r ;
2016-03-08 17:47:46 +01:00
INIT_LIST_HEAD ( & list ) ;
INIT_LIST_HEAD ( & duplicates ) ;
tv . bo = & bo - > tbo ;
tv . shared = true ;
list_add ( & tv . head , & list ) ;
amdgpu_vm_get_pd_bo ( vm , & list , & vm_pd ) ;
2016-03-17 17:14:10 +01:00
r = ttm_eu_reserve_buffers ( & ticket , & list , false , & duplicates ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
dev_err ( adev - > dev , " leaking bo va because "
" we fail to reserve bo (%d) \n " , r ) ;
return ;
}
2016-03-08 17:47:46 +01:00
bo_va = amdgpu_vm_bo_find ( vm , bo ) ;
2015-04-20 16:55:21 -04:00
if ( bo_va ) {
if ( - - bo_va - > ref_count = = 0 ) {
amdgpu_vm_bo_rmv ( adev , bo_va ) ;
}
}
2016-03-08 17:47:46 +01:00
ttm_eu_backoff_reservation ( & ticket , & list ) ;
2015-04-20 16:55:21 -04:00
}
static int amdgpu_gem_handle_lockup ( struct amdgpu_device * adev , int r )
{
if ( r = = - EDEADLK ) {
r = amdgpu_gpu_reset ( adev ) ;
if ( ! r )
r = - EAGAIN ;
}
return r ;
}
/*
* GEM ioctls .
*/
int amdgpu_gem_create_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct amdgpu_device * adev = dev - > dev_private ;
union drm_amdgpu_gem_create * args = data ;
uint64_t size = args - > in . bo_size ;
struct drm_gem_object * gobj ;
uint32_t handle ;
bool kernel = false ;
int r ;
2017-03-08 17:40:17 -05:00
/* reject invalid gem flags */
if ( args - > in . domain_flags & ~ ( AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_SHADOW |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ) ) {
r = - EINVAL ;
goto error_unlock ;
}
/* reject invalid gem domains */
if ( args - > in . domains & ~ ( AMDGPU_GEM_DOMAIN_CPU |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS |
AMDGPU_GEM_DOMAIN_OA ) ) {
r = - EINVAL ;
goto error_unlock ;
}
2015-04-20 16:55:21 -04:00
/* create a gem object to contain this object in */
if ( args - > in . domains & ( AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA ) ) {
kernel = true ;
if ( args - > in . domains = = AMDGPU_GEM_DOMAIN_GDS )
size = size < < AMDGPU_GDS_SHIFT ;
else if ( args - > in . domains = = AMDGPU_GEM_DOMAIN_GWS )
size = size < < AMDGPU_GWS_SHIFT ;
else if ( args - > in . domains = = AMDGPU_GEM_DOMAIN_OA )
size = size < < AMDGPU_OA_SHIFT ;
else {
r = - EINVAL ;
goto error_unlock ;
}
}
size = roundup ( size , PAGE_SIZE ) ;
r = amdgpu_gem_object_create ( adev , size , args - > in . alignment ,
( u32 ) ( 0xffffffff & args - > in . domains ) ,
args - > in . domain_flags ,
kernel , & gobj ) ;
if ( r )
goto error_unlock ;
r = drm_gem_handle_create ( filp , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( gobj ) ;
if ( r )
goto error_unlock ;
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . handle = handle ;
return 0 ;
error_unlock :
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}
int amdgpu_gem_userptr_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct amdgpu_device * adev = dev - > dev_private ;
struct drm_amdgpu_gem_userptr * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * bo ;
uint32_t handle ;
int r ;
if ( offset_in_page ( args - > addr | args - > size ) )
return - EINVAL ;
/* reject unknown flag values */
if ( args - > flags & ~ ( AMDGPU_GEM_USERPTR_READONLY |
AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
AMDGPU_GEM_USERPTR_REGISTER ) )
return - EINVAL ;
2016-03-11 15:29:27 +01:00
if ( ! ( args - > flags & AMDGPU_GEM_USERPTR_READONLY ) & &
! ( args - > flags & AMDGPU_GEM_USERPTR_REGISTER ) ) {
2015-04-20 16:55:21 -04:00
2016-03-11 15:29:27 +01:00
/* if we want to write to it we must install a MMU notifier */
2015-04-20 16:55:21 -04:00
return - EACCES ;
}
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create ( adev , args - > size , 0 ,
AMDGPU_GEM_DOMAIN_CPU , 0 ,
0 , & gobj ) ;
if ( r )
goto handle_lockup ;
bo = gem_to_amdgpu_bo ( gobj ) ;
2015-12-18 22:13:12 +01:00
bo - > prefered_domains = AMDGPU_GEM_DOMAIN_GTT ;
bo - > allowed_domains = AMDGPU_GEM_DOMAIN_GTT ;
2015-04-20 16:55:21 -04:00
r = amdgpu_ttm_tt_set_userptr ( bo - > tbo . ttm , args - > addr , args - > flags ) ;
if ( r )
goto release_object ;
if ( args - > flags & AMDGPU_GEM_USERPTR_REGISTER ) {
r = amdgpu_mn_register ( bo , args - > addr ) ;
if ( r )
goto release_object ;
}
if ( args - > flags & AMDGPU_GEM_USERPTR_VALIDATE ) {
down_read ( & current - > mm - > mmap_sem ) ;
2016-02-23 12:36:59 +01:00
r = amdgpu_ttm_tt_get_user_pages ( bo - > tbo . ttm ,
bo - > tbo . ttm - > pages ) ;
if ( r )
goto unlock_mmap_sem ;
2015-04-20 16:55:21 -04:00
r = amdgpu_bo_reserve ( bo , true ) ;
2016-02-23 12:36:59 +01:00
if ( r )
goto free_pages ;
2015-04-20 16:55:21 -04:00
amdgpu_ttm_placement_from_domain ( bo , AMDGPU_GEM_DOMAIN_GTT ) ;
r = ttm_bo_validate ( & bo - > tbo , & bo - > placement , true , false ) ;
amdgpu_bo_unreserve ( bo ) ;
if ( r )
2016-02-23 12:36:59 +01:00
goto free_pages ;
up_read ( & current - > mm - > mmap_sem ) ;
2015-04-20 16:55:21 -04:00
}
r = drm_gem_handle_create ( filp , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( gobj ) ;
if ( r )
goto handle_lockup ;
args - > handle = handle ;
return 0 ;
2016-02-23 12:36:59 +01:00
free_pages :
release_pages ( bo - > tbo . ttm - > pages , bo - > tbo . ttm - > num_pages , false ) ;
unlock_mmap_sem :
up_read ( & current - > mm - > mmap_sem ) ;
2015-04-20 16:55:21 -04:00
release_object :
drm_gem_object_unreference_unlocked ( gobj ) ;
handle_lockup :
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}
int amdgpu_mode_dumb_mmap ( struct drm_file * filp ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset_p )
{
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
2016-02-08 11:08:35 +01:00
if ( amdgpu_ttm_tt_get_usermm ( robj - > tbo . ttm ) | |
2015-05-13 14:30:53 +02:00
( robj - > flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS ) ) {
2015-04-20 16:55:21 -04:00
drm_gem_object_unreference_unlocked ( gobj ) ;
return - EPERM ;
}
* offset_p = amdgpu_bo_mmap_offset ( robj ) ;
drm_gem_object_unreference_unlocked ( gobj ) ;
return 0 ;
}
int amdgpu_gem_mmap_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
union drm_amdgpu_gem_mmap * args = data ;
uint32_t handle = args - > in . handle ;
memset ( args , 0 , sizeof ( * args ) ) ;
return amdgpu_mode_dumb_mmap ( filp , dev , handle , & args - > out . addr_ptr ) ;
}
/**
* amdgpu_gem_timeout - calculate jiffies timeout from absolute value
*
* @ timeout_ns : timeout in ns
*
* Calculate the timeout in jiffies from an absolute timeout in ns .
*/
unsigned long amdgpu_gem_timeout ( uint64_t timeout_ns )
{
unsigned long timeout_jiffies ;
ktime_t timeout ;
/* clamp timeout if it's to large */
if ( ( ( int64_t ) timeout_ns ) < 0 )
return MAX_SCHEDULE_TIMEOUT ;
2015-07-08 16:58:48 +02:00
timeout = ktime_sub ( ns_to_ktime ( timeout_ns ) , ktime_get ( ) ) ;
2015-04-20 16:55:21 -04:00
if ( ktime_to_ns ( timeout ) < 0 )
return 0 ;
timeout_jiffies = nsecs_to_jiffies ( ktime_to_ns ( timeout ) ) ;
/* clamp timeout to avoid unsigned-> signed overflow */
if ( timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1 ;
return timeout_jiffies ;
}
int amdgpu_gem_wait_idle_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct amdgpu_device * adev = dev - > dev_private ;
union drm_amdgpu_gem_wait_idle * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
uint32_t handle = args - > in . handle ;
unsigned long timeout = amdgpu_gem_timeout ( args - > in . timeout ) ;
int r = 0 ;
long ret ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
2016-08-29 08:08:24 +01:00
ret = reservation_object_wait_timeout_rcu ( robj - > tbo . resv , true , true ,
timeout ) ;
2015-04-20 16:55:21 -04:00
/* ret == 0 means not signaled,
* ret > 0 means signaled
* ret < 0 means interrupted before timeout
*/
if ( ret > = 0 ) {
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . status = ( ret = = 0 ) ;
} else
r = ret ;
drm_gem_object_unreference_unlocked ( gobj ) ;
r = amdgpu_gem_handle_lockup ( adev , r ) ;
return r ;
}
int amdgpu_gem_metadata_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct drm_amdgpu_gem_metadata * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
int r = - 1 ;
DRM_DEBUG ( " %d \n " , args - > handle ) ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL )
return - ENOENT ;
robj = gem_to_amdgpu_bo ( gobj ) ;
r = amdgpu_bo_reserve ( robj , false ) ;
if ( unlikely ( r ! = 0 ) )
goto out ;
if ( args - > op = = AMDGPU_GEM_METADATA_OP_GET_METADATA ) {
amdgpu_bo_get_tiling_flags ( robj , & args - > data . tiling_info ) ;
r = amdgpu_bo_get_metadata ( robj , args - > data . data ,
sizeof ( args - > data . data ) ,
& args - > data . data_size_bytes ,
& args - > data . flags ) ;
} else if ( args - > op = = AMDGPU_GEM_METADATA_OP_SET_METADATA ) {
2015-09-23 14:00:35 +03:00
if ( args - > data . data_size_bytes > sizeof ( args - > data . data ) ) {
r = - EINVAL ;
goto unreserve ;
}
2015-04-20 16:55:21 -04:00
r = amdgpu_bo_set_tiling_flags ( robj , args - > data . tiling_info ) ;
if ( ! r )
r = amdgpu_bo_set_metadata ( robj , args - > data . data ,
args - > data . data_size_bytes ,
args - > data . flags ) ;
}
2015-09-23 14:00:35 +03:00
unreserve :
2015-04-20 16:55:21 -04:00
amdgpu_bo_unreserve ( robj ) ;
out :
drm_gem_object_unreference_unlocked ( gobj ) ;
return r ;
}
2016-09-28 12:03:04 +02:00
static int amdgpu_gem_va_check ( void * param , struct amdgpu_bo * bo )
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
2016-12-12 11:53:11 +01:00
if ( ! amdgpu_bo_gpu_accessible ( bo ) )
return - ERESTARTSYS ;
if ( bo - > shadow & & ! amdgpu_bo_gpu_accessible ( bo - > shadow ) )
return - ERESTARTSYS ;
2016-09-28 12:03:04 +02:00
2016-12-12 11:53:11 +01:00
return 0 ;
2016-09-28 12:03:04 +02:00
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_gem_va_update_vm - update the bo_va in its VM
*
* @ adev : amdgpu_device pointer
* @ bo_va : bo_va to update
2017-01-27 15:58:43 +01:00
* @ list : validation list
* @ operation : map or unmap
2015-04-20 16:55:21 -04:00
*
2017-01-27 15:58:43 +01:00
* Update the bo_va directly after setting its address . Errors are not
2015-04-20 16:55:21 -04:00
* vital here , so they are not reported back to userspace .
*/
static void amdgpu_gem_va_update_vm ( struct amdgpu_device * adev ,
2016-09-28 12:03:04 +02:00
struct amdgpu_bo_va * bo_va ,
2017-01-27 15:58:43 +01:00
struct list_head * list ,
2016-09-28 12:03:04 +02:00
uint32_t operation )
2015-04-20 16:55:21 -04:00
{
2017-01-27 15:58:43 +01:00
struct ttm_validate_buffer * entry ;
int r = - ERESTARTSYS ;
2015-04-20 16:55:21 -04:00
2017-01-27 15:58:43 +01:00
list_for_each_entry ( entry , list , head ) {
2016-12-12 12:09:12 +01:00
struct amdgpu_bo * bo =
container_of ( entry - > bo , struct amdgpu_bo , tbo ) ;
2017-02-09 23:28:24 +01:00
if ( amdgpu_gem_va_check ( NULL , bo ) )
2017-01-27 15:58:43 +01:00
goto error ;
2015-04-20 16:55:21 -04:00
}
2016-12-12 12:09:12 +01:00
2016-09-28 12:03:04 +02:00
r = amdgpu_vm_validate_pt_bos ( adev , bo_va - > vm , amdgpu_gem_va_check ,
NULL ) ;
if ( r )
2017-01-27 15:58:43 +01:00
goto error ;
2015-12-07 15:02:52 +08:00
2015-11-12 15:33:09 +08:00
r = amdgpu_vm_update_page_directory ( adev , bo_va - > vm ) ;
if ( r )
2017-01-27 15:58:43 +01:00
goto error ;
2015-04-20 16:55:21 -04:00
r = amdgpu_vm_clear_freed ( adev , bo_va - > vm ) ;
if ( r )
2017-01-27 15:58:43 +01:00
goto error ;
2015-07-22 13:29:28 +08:00
if ( operation = = AMDGPU_VA_OP_MAP )
2016-09-22 11:34:47 +08:00
r = amdgpu_vm_bo_update ( adev , bo_va , false ) ;
2015-04-20 16:55:21 -04:00
2017-01-27 15:58:43 +01:00
error :
2015-06-16 14:50:02 +02:00
if ( r & & r ! = - ERESTARTSYS )
2015-04-20 16:55:21 -04:00
DRM_ERROR ( " Couldn't update BO_VA (%d) \n " , r ) ;
}
int amdgpu_gem_va_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
2017-01-16 13:59:01 +08:00
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
AMDGPU_VM_PAGE_EXECUTABLE ;
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_PRT ;
2015-06-08 15:03:00 +02:00
struct drm_amdgpu_gem_va * args = data ;
2015-04-20 16:55:21 -04:00
struct drm_gem_object * gobj ;
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo_va * bo_va ;
2016-09-28 16:33:01 +02:00
struct amdgpu_bo_list_entry vm_pd ;
struct ttm_validate_buffer tv ;
2015-11-13 14:18:38 +08:00
struct ww_acquire_ctx ticket ;
2017-01-30 10:24:13 +01:00
struct list_head list ;
2017-02-14 12:22:57 -05:00
uint64_t va_flags ;
2015-04-20 16:55:21 -04:00
int r = 0 ;
2015-06-08 15:03:00 +02:00
if ( ! adev - > vm_manager . enabled )
2015-04-20 16:55:21 -04:00
return - ENOTTY ;
2015-06-08 15:03:00 +02:00
if ( args - > va_address < AMDGPU_VA_RESERVED_SIZE ) {
2015-04-20 16:55:21 -04:00
dev_err ( & dev - > pdev - > dev ,
" va_address 0x%lX is in reserved area 0x%X \n " ,
2015-06-08 15:03:00 +02:00
( unsigned long ) args - > va_address ,
2015-04-20 16:55:21 -04:00
AMDGPU_VA_RESERVED_SIZE ) ;
return - EINVAL ;
}
2017-01-16 13:59:01 +08:00
if ( ( args - > flags & ~ valid_flags ) & & ( args - > flags & ~ prt_flags ) ) {
dev_err ( & dev - > pdev - > dev , " invalid flags combination 0x%08X \n " ,
args - > flags ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-06-08 15:03:00 +02:00
switch ( args - > operation ) {
2015-04-20 16:55:21 -04:00
case AMDGPU_VA_OP_MAP :
case AMDGPU_VA_OP_UNMAP :
break ;
default :
dev_err ( & dev - > pdev - > dev , " unsupported operation %d \n " ,
2015-06-08 15:03:00 +02:00
args - > operation ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-11-13 14:18:38 +08:00
INIT_LIST_HEAD ( & list ) ;
2017-01-16 13:59:01 +08:00
if ( ! ( args - > flags & AMDGPU_VM_PAGE_PRT ) ) {
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
if ( gobj = = NULL )
return - ENOENT ;
abo = gem_to_amdgpu_bo ( gobj ) ;
tv . bo = & abo - > tbo ;
tv . shared = false ;
list_add ( & tv . head , & list ) ;
} else {
gobj = NULL ;
abo = NULL ;
}
2015-11-13 14:18:38 +08:00
2016-09-28 16:33:01 +02:00
amdgpu_vm_get_pd_bo ( & fpriv - > vm , & list , & vm_pd ) ;
2016-03-08 17:47:46 +01:00
2017-01-30 10:24:13 +01:00
r = ttm_eu_reserve_buffers ( & ticket , & list , true , NULL ) ;
2017-01-16 13:59:01 +08:00
if ( r )
goto error_unref ;
2015-06-08 15:03:00 +02:00
2017-01-16 13:59:01 +08:00
if ( abo ) {
bo_va = amdgpu_vm_bo_find ( & fpriv - > vm , abo ) ;
if ( ! bo_va ) {
r = - ENOENT ;
goto error_backoff ;
}
} else {
bo_va = fpriv - > prt_va ;
2015-04-20 16:55:21 -04:00
}
2015-06-08 15:03:00 +02:00
switch ( args - > operation ) {
2015-04-20 16:55:21 -04:00
case AMDGPU_VA_OP_MAP :
2017-03-13 10:13:37 +01:00
r = amdgpu_vm_alloc_pts ( adev , bo_va - > vm , args - > va_address ,
args - > map_size ) ;
if ( r )
goto error_backoff ;
2017-02-14 12:22:57 -05:00
2017-03-13 10:13:37 +01:00
va_flags = amdgpu_vm_get_pte_flags ( adev , args - > flags ) ;
2015-06-08 15:03:00 +02:00
r = amdgpu_vm_bo_map ( adev , bo_va , args - > va_address ,
args - > offset_in_bo , args - > map_size ,
2015-05-18 16:05:57 +02:00
va_flags ) ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_VA_OP_UNMAP :
2015-06-08 15:03:00 +02:00
r = amdgpu_vm_bo_unmap ( adev , bo_va , args - > va_address ) ;
2015-04-20 16:55:21 -04:00
break ;
default :
break ;
}
2017-01-16 13:59:01 +08:00
if ( ! r & & ! ( args - > flags & AMDGPU_VM_DELAY_UPDATE ) & & ! amdgpu_vm_debug )
2017-01-27 15:58:43 +01:00
amdgpu_gem_va_update_vm ( adev , bo_va , & list , args - > operation ) ;
2017-01-16 13:59:01 +08:00
error_backoff :
2017-01-27 15:58:43 +01:00
ttm_eu_backoff_reservation ( & ticket , & list ) ;
2015-11-13 15:22:04 +08:00
2017-01-16 13:59:01 +08:00
error_unref :
2015-04-20 16:55:21 -04:00
drm_gem_object_unreference_unlocked ( gobj ) ;
return r ;
}
int amdgpu_gem_op_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct drm_amdgpu_gem_op * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
int r ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
r = amdgpu_bo_reserve ( robj , false ) ;
if ( unlikely ( r ) )
goto out ;
switch ( args - > op ) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO : {
struct drm_amdgpu_gem_create_in info ;
void __user * out = ( void __user * ) ( long ) args - > value ;
info . bo_size = robj - > gem_base . size ;
info . alignment = robj - > tbo . mem . page_alignment < < PAGE_SHIFT ;
2015-12-18 22:13:12 +01:00
info . domains = robj - > prefered_domains ;
2015-04-20 16:55:21 -04:00
info . domain_flags = robj - > flags ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
if ( copy_to_user ( out , & info , sizeof ( info ) ) )
r = - EFAULT ;
break ;
}
2015-05-27 14:30:38 +02:00
case AMDGPU_GEM_OP_SET_PLACEMENT :
2016-02-08 11:08:35 +01:00
if ( amdgpu_ttm_tt_get_usermm ( robj - > tbo . ttm ) ) {
2015-04-20 16:55:21 -04:00
r = - EPERM ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
break ;
}
2015-12-18 22:13:12 +01:00
robj - > prefered_domains = args - > value & ( AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU ) ;
robj - > allowed_domains = robj - > prefered_domains ;
if ( robj - > allowed_domains = = AMDGPU_GEM_DOMAIN_VRAM )
robj - > allowed_domains | = AMDGPU_GEM_DOMAIN_GTT ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
break ;
default :
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
r = - EINVAL ;
}
out :
drm_gem_object_unreference_unlocked ( gobj ) ;
return r ;
}
int amdgpu_mode_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct amdgpu_device * adev = dev - > dev_private ;
struct drm_gem_object * gobj ;
uint32_t handle ;
int r ;
2016-10-18 01:41:17 +03:00
args - > pitch = amdgpu_align_pitch ( adev , args - > width ,
DIV_ROUND_UP ( args - > bpp , 8 ) , 0 ) ;
2015-09-23 14:00:59 +03:00
args - > size = ( u64 ) args - > pitch * args - > height ;
2015-04-20 16:55:21 -04:00
args - > size = ALIGN ( args - > size , PAGE_SIZE ) ;
r = amdgpu_gem_object_create ( adev , args - > size , 0 ,
AMDGPU_GEM_DOMAIN_VRAM ,
2015-08-27 00:14:16 -04:00
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ,
ttm_bo_type_device ,
2015-04-20 16:55:21 -04:00
& gobj ) ;
if ( r )
return - ENOMEM ;
r = drm_gem_handle_create ( file_priv , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked ( gobj ) ;
if ( r ) {
return r ;
}
args - > handle = handle ;
return 0 ;
}
# if defined(CONFIG_DEBUG_FS)
2016-02-15 15:23:00 +01:00
static int amdgpu_debugfs_gem_bo_info ( int id , void * ptr , void * data )
{
struct drm_gem_object * gobj = ptr ;
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( gobj ) ;
struct seq_file * m = data ;
unsigned domain ;
const char * placement ;
unsigned pin_count ;
domain = amdgpu_mem_type_to_domain ( bo - > tbo . mem . mem_type ) ;
switch ( domain ) {
case AMDGPU_GEM_DOMAIN_VRAM :
placement = " VRAM " ;
break ;
case AMDGPU_GEM_DOMAIN_GTT :
placement = " GTT " ;
break ;
case AMDGPU_GEM_DOMAIN_CPU :
default :
placement = " CPU " ;
break ;
}
seq_printf ( m , " \t 0x%08x: %12ld byte %s @ 0x%010Lx " ,
id , amdgpu_bo_size ( bo ) , placement ,
amdgpu_bo_gpu_offset ( bo ) ) ;
pin_count = ACCESS_ONCE ( bo - > pin_count ) ;
if ( pin_count )
seq_printf ( m , " pin count %d " , pin_count ) ;
seq_printf ( m , " \n " ) ;
return 0 ;
}
2015-04-20 16:55:21 -04:00
static int amdgpu_debugfs_gem_info ( struct seq_file * m , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) m - > private ;
struct drm_device * dev = node - > minor - > dev ;
2016-02-15 15:23:00 +01:00
struct drm_file * file ;
int r ;
2015-04-20 16:55:21 -04:00
2016-04-26 19:29:41 +02:00
r = mutex_lock_interruptible ( & dev - > filelist_mutex ) ;
2016-02-15 15:23:00 +01:00
if ( r )
return r ;
list_for_each_entry ( file , & dev - > filelist , lhead ) {
struct task_struct * task ;
/*
* Although we have a valid reference on file - > pid , that does
* not guarantee that the task_struct who called get_pid ( ) is
* still alive ( e . g . get_pid ( current ) = > fork ( ) = > exit ( ) ) .
* Therefore , we need to protect this - > comm access using RCU .
*/
rcu_read_lock ( ) ;
task = pid_task ( file - > pid , PIDTYPE_PID ) ;
seq_printf ( m , " pid %8d command %s: \n " , pid_nr ( file - > pid ) ,
task ? task - > comm : " <unknown> " ) ;
rcu_read_unlock ( ) ;
spin_lock ( & file - > table_lock ) ;
idr_for_each ( & file - > object_idr , amdgpu_debugfs_gem_bo_info , m ) ;
spin_unlock ( & file - > table_lock ) ;
2015-04-20 16:55:21 -04:00
}
2016-02-15 15:23:00 +01:00
2016-04-26 19:29:41 +02:00
mutex_unlock ( & dev - > filelist_mutex ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2016-05-02 12:46:15 -04:00
static const struct drm_info_list amdgpu_debugfs_gem_list [ ] = {
2015-04-20 16:55:21 -04:00
{ " amdgpu_gem_info " , & amdgpu_debugfs_gem_info , 0 , NULL } ,
} ;
# endif
int amdgpu_gem_debugfs_init ( struct amdgpu_device * adev )
{
# if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files ( adev , amdgpu_debugfs_gem_list , 1 ) ;
# endif
return 0 ;
}