2015-04-20 16:55:21 -04:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
# include <linux/ktime.h>
2019-06-10 00:07:56 +02:00
# include <linux/module.h>
2016-03-17 15:30:49 +11:00
# include <linux/pagemap.h>
2019-06-10 00:07:56 +02:00
# include <linux/pci.h>
2020-03-10 14:23:12 +01:00
# include <linux/dma-buf.h>
2019-06-10 00:07:56 +02:00
2015-04-20 16:55:21 -04:00
# include <drm/amdgpu_drm.h>
2019-06-10 00:07:56 +02:00
# include <drm/drm_debugfs.h>
2020-11-03 10:30:11 +01:00
# include <drm/drm_gem_ttm_helper.h>
2019-06-10 00:07:56 +02:00
2015-04-20 16:55:21 -04:00
# include "amdgpu.h"
2018-05-22 15:31:23 -07:00
# include "amdgpu_display.h"
2020-09-23 12:21:38 +02:00
# include "amdgpu_dma_buf.h"
2019-03-26 14:47:57 -04:00
# include "amdgpu_xgmi.h"
2015-04-20 16:55:21 -04:00
2020-09-23 12:21:38 +02:00
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs ;
static void amdgpu_gem_object_free ( struct drm_gem_object * gobj )
2015-04-20 16:55:21 -04:00
{
struct amdgpu_bo * robj = gem_to_amdgpu_bo ( gobj ) ;
if ( robj ) {
2015-06-03 21:31:20 +02:00
amdgpu_mn_unregister ( robj ) ;
2015-04-20 16:55:21 -04:00
amdgpu_bo_unref ( & robj ) ;
}
}
int amdgpu_gem_object_create ( struct amdgpu_device * adev , unsigned long size ,
2017-08-25 09:14:43 +02:00
int alignment , u32 initial_domain ,
2018-03-14 14:48:17 -05:00
u64 flags , enum ttm_bo_type type ,
2019-08-11 10:06:32 +02:00
struct dma_resv * resv ,
2017-08-25 09:14:43 +02:00
struct drm_gem_object * * obj )
2015-04-20 16:55:21 -04:00
{
2017-08-25 09:14:43 +02:00
struct amdgpu_bo * bo ;
2018-04-16 18:27:50 +08:00
struct amdgpu_bo_param bp ;
2015-04-20 16:55:21 -04:00
int r ;
2018-04-16 18:27:50 +08:00
memset ( & bp , 0 , sizeof ( bp ) ) ;
2015-04-20 16:55:21 -04:00
* obj = NULL ;
2018-04-16 18:27:50 +08:00
bp . size = size ;
bp . byte_align = alignment ;
bp . type = type ;
bp . resv = resv ;
2018-04-17 11:52:53 +08:00
bp . preferred_domain = initial_domain ;
2018-04-16 18:27:50 +08:00
bp . flags = flags ;
bp . domain = initial_domain ;
r = amdgpu_bo_create ( adev , & bp , & bo ) ;
2020-12-08 15:16:15 -05:00
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2020-12-08 15:16:15 -05:00
2019-08-05 16:01:07 +02:00
* obj = & bo - > tbo . base ;
2020-09-23 12:21:38 +02:00
( * obj ) - > funcs = & amdgpu_gem_object_funcs ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2016-02-15 16:59:57 +01:00
void amdgpu_gem_force_release ( struct amdgpu_device * adev )
2015-04-20 16:55:21 -04:00
{
2020-08-24 12:29:45 -04:00
struct drm_device * ddev = adev_to_drm ( adev ) ;
2016-02-15 16:59:57 +01:00
struct drm_file * file ;
2015-04-20 16:55:21 -04:00
2016-04-26 19:29:41 +02:00
mutex_lock ( & ddev - > filelist_mutex ) ;
2016-02-15 16:59:57 +01:00
list_for_each_entry ( file , & ddev - > filelist , lhead ) {
struct drm_gem_object * gobj ;
int handle ;
WARN_ONCE ( 1 , " Still active user space clients! \n " ) ;
spin_lock ( & file - > table_lock ) ;
idr_for_each_entry ( & file - > object_idr , gobj , handle ) {
WARN_ONCE ( 1 , " And also active allocations! \n " ) ;
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2016-02-15 16:59:57 +01:00
}
idr_destroy ( & file - > object_idr ) ;
spin_unlock ( & file - > table_lock ) ;
}
2016-04-26 19:29:41 +02:00
mutex_unlock ( & ddev - > filelist_mutex ) ;
2015-04-20 16:55:21 -04:00
}
/*
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case .
*/
2020-09-23 12:21:38 +02:00
static int amdgpu_gem_object_open ( struct drm_gem_object * obj ,
struct drm_file * file_priv )
2015-04-20 16:55:21 -04:00
{
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo = gem_to_amdgpu_bo ( obj ) ;
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( abo - > tbo . bdev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
struct amdgpu_vm * vm = & fpriv - > vm ;
struct amdgpu_bo_va * bo_va ;
2017-08-29 16:07:31 +02:00
struct mm_struct * mm ;
2015-04-20 16:55:21 -04:00
int r ;
2017-08-29 16:07:31 +02:00
mm = amdgpu_ttm_tt_get_usermm ( abo - > tbo . ttm ) ;
if ( mm & & mm ! = current - > mm )
return - EPERM ;
2017-08-25 09:14:43 +02:00
if ( abo - > flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID & &
2019-08-05 16:01:15 +02:00
abo - > tbo . base . resv ! = vm - > root . base . bo - > tbo . base . resv )
2017-08-25 09:14:43 +02:00
return - EPERM ;
2016-09-15 15:06:50 +02:00
r = amdgpu_bo_reserve ( abo , false ) ;
2015-11-13 15:22:04 +08:00
if ( r )
2015-04-20 16:55:21 -04:00
return r ;
2016-09-15 15:06:50 +02:00
bo_va = amdgpu_vm_bo_find ( vm , abo ) ;
2015-04-20 16:55:21 -04:00
if ( ! bo_va ) {
2016-09-15 15:06:50 +02:00
bo_va = amdgpu_vm_bo_add ( adev , vm , abo ) ;
2015-04-20 16:55:21 -04:00
} else {
+ + bo_va - > ref_count ;
}
2016-09-15 15:06:50 +02:00
amdgpu_bo_unreserve ( abo ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2020-09-23 12:21:38 +02:00
static void amdgpu_gem_object_close ( struct drm_gem_object * obj ,
struct drm_file * file_priv )
2015-04-20 16:55:21 -04:00
{
2016-03-08 17:47:46 +01:00
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( obj ) ;
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
struct amdgpu_vm * vm = & fpriv - > vm ;
2016-03-08 17:47:46 +01:00
struct amdgpu_bo_list_entry vm_pd ;
2017-08-25 09:14:43 +02:00
struct list_head list , duplicates ;
2020-03-12 12:03:34 +01:00
struct dma_fence * fence = NULL ;
2016-03-08 17:47:46 +01:00
struct ttm_validate_buffer tv ;
struct ww_acquire_ctx ticket ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo_va * bo_va ;
2020-03-12 12:03:34 +01:00
long r ;
2016-03-08 17:47:46 +01:00
INIT_LIST_HEAD ( & list ) ;
2017-08-25 09:14:43 +02:00
INIT_LIST_HEAD ( & duplicates ) ;
2016-03-08 17:47:46 +01:00
tv . bo = & bo - > tbo ;
2020-03-12 12:03:34 +01:00
tv . num_shared = 2 ;
2016-03-08 17:47:46 +01:00
list_add ( & tv . head , & list ) ;
amdgpu_vm_get_pd_bo ( vm , & list , & vm_pd ) ;
2019-09-19 12:56:15 +02:00
r = ttm_eu_reserve_buffers ( & ticket , & list , false , & duplicates ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
dev_err ( adev - > dev , " leaking bo va because "
2020-03-12 12:03:34 +01:00
" we fail to reserve bo (%ld) \n " , r ) ;
2015-04-20 16:55:21 -04:00
return ;
}
2016-03-08 17:47:46 +01:00
bo_va = amdgpu_vm_bo_find ( vm , bo ) ;
2020-03-12 12:03:34 +01:00
if ( ! bo_va | | - - bo_va - > ref_count )
goto out_unlock ;
2017-03-23 19:34:11 +01:00
2020-03-12 12:03:34 +01:00
amdgpu_vm_bo_rmv ( adev , bo_va ) ;
if ( ! amdgpu_vm_ready ( vm ) )
goto out_unlock ;
2017-03-23 19:34:11 +01:00
2020-03-12 12:03:34 +01:00
fence = dma_resv_get_excl ( bo - > tbo . base . resv ) ;
if ( fence ) {
amdgpu_bo_fence ( bo , fence , true ) ;
fence = NULL ;
2015-04-20 16:55:21 -04:00
}
2020-03-12 12:03:34 +01:00
r = amdgpu_vm_clear_freed ( adev , vm , & fence ) ;
if ( r | | ! fence )
goto out_unlock ;
amdgpu_bo_fence ( bo , fence , true ) ;
dma_fence_put ( fence ) ;
out_unlock :
if ( unlikely ( r < 0 ) )
dev_err ( adev - > dev , " failed to clear page "
" tables on GEM object close (%ld) \n " , r ) ;
2016-03-08 17:47:46 +01:00
ttm_eu_backoff_reservation ( & ticket , & list ) ;
2015-04-20 16:55:21 -04:00
}
2020-09-23 12:21:38 +02:00
static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
. free = amdgpu_gem_object_free ,
. open = amdgpu_gem_object_open ,
. close = amdgpu_gem_object_close ,
. export = amdgpu_gem_prime_export ,
2020-11-03 10:30:11 +01:00
. vmap = drm_gem_ttm_vmap ,
. vunmap = drm_gem_ttm_vunmap ,
2020-09-23 12:21:38 +02:00
} ;
2015-04-20 16:55:21 -04:00
/*
* GEM ioctls .
*/
int amdgpu_gem_create_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2017-08-25 09:14:43 +02:00
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
struct amdgpu_vm * vm = & fpriv - > vm ;
2015-04-20 16:55:21 -04:00
union drm_amdgpu_gem_create * args = data ;
2017-08-23 20:11:25 +02:00
uint64_t flags = args - > in . domain_flags ;
2015-04-20 16:55:21 -04:00
uint64_t size = args - > in . bo_size ;
2019-08-11 10:06:32 +02:00
struct dma_resv * resv = NULL ;
2015-04-20 16:55:21 -04:00
struct drm_gem_object * gobj ;
2020-12-08 15:16:15 -05:00
uint32_t handle , initial_domain ;
2015-04-20 16:55:21 -04:00
int r ;
2017-03-08 17:40:17 -05:00
/* reject invalid gem flags */
2017-08-23 20:11:25 +02:00
if ( flags & ~ ( AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
2017-08-25 09:14:43 +02:00
AMDGPU_GEM_CREATE_VRAM_CLEARED |
2017-09-15 20:44:06 -04:00
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
2019-08-07 22:32:46 -05:00
AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
AMDGPU_GEM_CREATE_ENCRYPTED ) )
2017-09-15 20:44:06 -04:00
2017-05-08 15:14:54 +02:00
return - EINVAL ;
2017-03-08 17:40:17 -05:00
/* reject invalid gem domains */
2018-04-17 18:34:40 +08:00
if ( args - > in . domains & ~ AMDGPU_GEM_DOMAIN_MASK )
2017-05-08 15:14:54 +02:00
return - EINVAL ;
2017-03-08 17:40:17 -05:00
2020-02-18 13:07:42 +08:00
if ( ! amdgpu_is_tmz ( adev ) & & ( flags & AMDGPU_GEM_CREATE_ENCRYPTED ) ) {
DRM_NOTE_ONCE ( " Cannot allocate secure buffer since TMZ is disabled \n " ) ;
2019-08-07 22:32:46 -05:00
return - EINVAL ;
}
2015-04-20 16:55:21 -04:00
/* create a gem object to contain this object in */
if ( args - > in . domains & ( AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA ) ) {
2018-05-30 11:12:08 +08:00
if ( flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID ) {
/* if gds bo is created from user space, it must be
* passed to bo list
*/
DRM_ERROR ( " GDS bo cannot be per-vm-bo \n " ) ;
return - EINVAL ;
}
2017-08-23 20:11:25 +02:00
flags | = AMDGPU_GEM_CREATE_NO_CPU_ACCESS ;
2015-04-20 16:55:21 -04:00
}
2017-08-25 09:14:43 +02:00
if ( flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID ) {
r = amdgpu_bo_reserve ( vm - > root . base . bo , false ) ;
if ( r )
return r ;
2019-08-05 16:01:15 +02:00
resv = vm - > root . base . bo - > tbo . base . resv ;
2017-08-25 09:14:43 +02:00
}
2020-12-08 15:16:15 -05:00
retry :
initial_domain = ( u32 ) ( 0xffffffff & args - > in . domains ) ;
2015-04-20 16:55:21 -04:00
r = amdgpu_gem_object_create ( adev , size , args - > in . alignment ,
2020-12-08 15:16:15 -05:00
initial_domain ,
2018-06-28 14:38:21 +08:00
flags , ttm_bo_type_device , resv , & gobj ) ;
2020-12-08 15:16:15 -05:00
if ( r ) {
if ( r ! = - ERESTARTSYS ) {
if ( flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ) {
flags & = ~ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED ;
goto retry ;
}
if ( initial_domain = = AMDGPU_GEM_DOMAIN_VRAM ) {
initial_domain | = AMDGPU_GEM_DOMAIN_GTT ;
goto retry ;
}
DRM_DEBUG ( " Failed to allocate GEM object (%llu, %d, %llu, %d) \n " ,
size , initial_domain , args - > in . alignment , r ) ;
}
return r ;
}
2017-08-25 09:14:43 +02:00
if ( flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID ) {
if ( ! r ) {
struct amdgpu_bo * abo = gem_to_amdgpu_bo ( gobj ) ;
abo - > parent = amdgpu_bo_ref ( vm - > root . base . bo ) ;
}
amdgpu_bo_unreserve ( vm - > root . base . bo ) ;
}
2015-04-20 16:55:21 -04:00
if ( r )
2017-05-08 15:14:54 +02:00
return r ;
2015-04-20 16:55:21 -04:00
r = drm_gem_handle_create ( filp , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2017-05-08 15:14:54 +02:00
return r ;
2015-04-20 16:55:21 -04:00
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . handle = handle ;
return 0 ;
}
int amdgpu_gem_userptr_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
2017-04-12 14:24:39 +02:00
struct ttm_operation_ctx ctx = { true , false } ;
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2015-04-20 16:55:21 -04:00
struct drm_amdgpu_gem_userptr * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * bo ;
uint32_t handle ;
int r ;
2019-09-25 16:48:47 -07:00
args - > addr = untagged_addr ( args - > addr ) ;
2015-04-20 16:55:21 -04:00
if ( offset_in_page ( args - > addr | args - > size ) )
return - EINVAL ;
/* reject unknown flag values */
if ( args - > flags & ~ ( AMDGPU_GEM_USERPTR_READONLY |
AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
AMDGPU_GEM_USERPTR_REGISTER ) )
return - EINVAL ;
2016-03-11 15:29:27 +01:00
if ( ! ( args - > flags & AMDGPU_GEM_USERPTR_READONLY ) & &
! ( args - > flags & AMDGPU_GEM_USERPTR_REGISTER ) ) {
2015-04-20 16:55:21 -04:00
2016-03-11 15:29:27 +01:00
/* if we want to write to it we must install a MMU notifier */
2015-04-20 16:55:21 -04:00
return - EACCES ;
}
/* create a gem object to contain this object in */
2017-08-25 09:14:43 +02:00
r = amdgpu_gem_object_create ( adev , args - > size , 0 , AMDGPU_GEM_DOMAIN_CPU ,
2018-06-28 14:38:21 +08:00
0 , ttm_bo_type_device , NULL , & gobj ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2017-05-08 15:14:54 +02:00
return r ;
2015-04-20 16:55:21 -04:00
bo = gem_to_amdgpu_bo ( gobj ) ;
2017-08-08 07:58:01 -04:00
bo - > preferred_domains = AMDGPU_GEM_DOMAIN_GTT ;
2015-12-18 22:13:12 +01:00
bo - > allowed_domains = AMDGPU_GEM_DOMAIN_GTT ;
2020-08-06 14:44:07 +02:00
r = amdgpu_ttm_tt_set_userptr ( & bo - > tbo , args - > addr , args - > flags ) ;
2015-04-20 16:55:21 -04:00
if ( r )
goto release_object ;
if ( args - > flags & AMDGPU_GEM_USERPTR_REGISTER ) {
r = amdgpu_mn_register ( bo , args - > addr ) ;
if ( r )
goto release_object ;
}
if ( args - > flags & AMDGPU_GEM_USERPTR_VALIDATE ) {
2019-07-02 18:39:45 -04:00
r = amdgpu_ttm_tt_get_user_pages ( bo , bo - > tbo . ttm - > pages ) ;
2016-02-23 12:36:59 +01:00
if ( r )
2017-10-20 17:21:40 +08:00
goto release_object ;
2016-02-23 12:36:59 +01:00
2015-04-20 16:55:21 -04:00
r = amdgpu_bo_reserve ( bo , true ) ;
2016-02-23 12:36:59 +01:00
if ( r )
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-13 15:35:28 -05:00
goto user_pages_done ;
2015-04-20 16:55:21 -04:00
2018-07-16 16:12:24 +02:00
amdgpu_bo_placement_from_domain ( bo , AMDGPU_GEM_DOMAIN_GTT ) ;
2017-04-12 14:24:39 +02:00
r = ttm_bo_validate ( & bo - > tbo , & bo - > placement , & ctx ) ;
2015-04-20 16:55:21 -04:00
amdgpu_bo_unreserve ( bo ) ;
if ( r )
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-13 15:35:28 -05:00
goto user_pages_done ;
2015-04-20 16:55:21 -04:00
}
r = drm_gem_handle_create ( filp , gobj , & handle ) ;
if ( r )
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-13 15:35:28 -05:00
goto user_pages_done ;
2015-04-20 16:55:21 -04:00
args - > handle = handle ;
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-13 15:35:28 -05:00
user_pages_done :
if ( args - > flags & AMDGPU_GEM_USERPTR_VALIDATE )
amdgpu_ttm_tt_get_user_pages_done ( bo - > tbo . ttm ) ;
2016-02-23 12:36:59 +01:00
2015-04-20 16:55:21 -04:00
release_object :
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
int amdgpu_mode_dumb_mmap ( struct drm_file * filp ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset_p )
{
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
2016-02-08 11:08:35 +01:00
if ( amdgpu_ttm_tt_get_usermm ( robj - > tbo . ttm ) | |
2015-05-13 14:30:53 +02:00
( robj - > flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS ) ) {
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return - EPERM ;
}
* offset_p = amdgpu_bo_mmap_offset ( robj ) ;
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
int amdgpu_gem_mmap_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
union drm_amdgpu_gem_mmap * args = data ;
uint32_t handle = args - > in . handle ;
memset ( args , 0 , sizeof ( * args ) ) ;
return amdgpu_mode_dumb_mmap ( filp , dev , handle , & args - > out . addr_ptr ) ;
}
/**
* amdgpu_gem_timeout - calculate jiffies timeout from absolute value
*
* @ timeout_ns : timeout in ns
*
* Calculate the timeout in jiffies from an absolute timeout in ns .
*/
unsigned long amdgpu_gem_timeout ( uint64_t timeout_ns )
{
unsigned long timeout_jiffies ;
ktime_t timeout ;
/* clamp timeout if it's to large */
if ( ( ( int64_t ) timeout_ns ) < 0 )
return MAX_SCHEDULE_TIMEOUT ;
2015-07-08 16:58:48 +02:00
timeout = ktime_sub ( ns_to_ktime ( timeout_ns ) , ktime_get ( ) ) ;
2015-04-20 16:55:21 -04:00
if ( ktime_to_ns ( timeout ) < 0 )
return 0 ;
timeout_jiffies = nsecs_to_jiffies ( ktime_to_ns ( timeout ) ) ;
/* clamp timeout to avoid unsigned-> signed overflow */
if ( timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
return MAX_SCHEDULE_TIMEOUT - 1 ;
return timeout_jiffies ;
}
int amdgpu_gem_wait_idle_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
union drm_amdgpu_gem_wait_idle * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
uint32_t handle = args - > in . handle ;
unsigned long timeout = amdgpu_gem_timeout ( args - > in . timeout ) ;
int r = 0 ;
long ret ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
2019-08-11 10:06:32 +02:00
ret = dma_resv_wait_timeout_rcu ( robj - > tbo . base . resv , true , true ,
2016-08-29 08:08:24 +01:00
timeout ) ;
2015-04-20 16:55:21 -04:00
/* ret == 0 means not signaled,
* ret > 0 means signaled
* ret < 0 means interrupted before timeout
*/
if ( ret > = 0 ) {
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . status = ( ret = = 0 ) ;
} else
r = ret ;
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
int amdgpu_gem_metadata_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
struct drm_amdgpu_gem_metadata * args = data ;
struct drm_gem_object * gobj ;
struct amdgpu_bo * robj ;
int r = - 1 ;
DRM_DEBUG ( " %d \n " , args - > handle ) ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL )
return - ENOENT ;
robj = gem_to_amdgpu_bo ( gobj ) ;
r = amdgpu_bo_reserve ( robj , false ) ;
if ( unlikely ( r ! = 0 ) )
goto out ;
if ( args - > op = = AMDGPU_GEM_METADATA_OP_GET_METADATA ) {
amdgpu_bo_get_tiling_flags ( robj , & args - > data . tiling_info ) ;
r = amdgpu_bo_get_metadata ( robj , args - > data . data ,
sizeof ( args - > data . data ) ,
& args - > data . data_size_bytes ,
& args - > data . flags ) ;
} else if ( args - > op = = AMDGPU_GEM_METADATA_OP_SET_METADATA ) {
2015-09-23 14:00:35 +03:00
if ( args - > data . data_size_bytes > sizeof ( args - > data . data ) ) {
r = - EINVAL ;
goto unreserve ;
}
2015-04-20 16:55:21 -04:00
r = amdgpu_bo_set_tiling_flags ( robj , args - > data . tiling_info ) ;
if ( ! r )
r = amdgpu_bo_set_metadata ( robj , args - > data . data ,
args - > data . data_size_bytes ,
args - > data . flags ) ;
}
2015-09-23 14:00:35 +03:00
unreserve :
2015-04-20 16:55:21 -04:00
amdgpu_bo_unreserve ( robj ) ;
out :
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
/**
* amdgpu_gem_va_update_vm - update the bo_va in its VM
*
* @ adev : amdgpu_device pointer
2017-03-13 10:13:38 +01:00
* @ vm : vm to update
2015-04-20 16:55:21 -04:00
* @ bo_va : bo_va to update
2017-03-13 10:13:38 +01:00
* @ operation : map , unmap or clear
2015-04-20 16:55:21 -04:00
*
2017-01-27 15:58:43 +01:00
* Update the bo_va directly after setting its address . Errors are not
2015-04-20 16:55:21 -04:00
* vital here , so they are not reported back to userspace .
*/
static void amdgpu_gem_va_update_vm ( struct amdgpu_device * adev ,
2017-03-13 10:13:38 +01:00
struct amdgpu_vm * vm ,
2016-09-28 12:03:04 +02:00
struct amdgpu_bo_va * bo_va ,
uint32_t operation )
2015-04-20 16:55:21 -04:00
{
2017-08-03 14:02:13 +02:00
int r ;
2015-04-20 16:55:21 -04:00
2017-08-03 14:02:13 +02:00
if ( ! amdgpu_vm_ready ( vm ) )
return ;
2015-12-07 15:02:52 +08:00
2017-03-23 19:36:31 +01:00
r = amdgpu_vm_clear_freed ( adev , vm , NULL ) ;
2015-04-20 16:55:21 -04:00
if ( r )
2017-01-27 15:58:43 +01:00
goto error ;
2015-07-22 13:29:28 +08:00
2017-03-13 10:13:39 +01:00
if ( operation = = AMDGPU_VA_OP_MAP | |
2018-02-14 23:20:00 -06:00
operation = = AMDGPU_VA_OP_REPLACE ) {
2016-09-22 11:34:47 +08:00
r = amdgpu_vm_bo_update ( adev , bo_va , false ) ;
2018-02-14 23:20:00 -06:00
if ( r )
goto error ;
}
2015-04-20 16:55:21 -04:00
2019-03-14 09:10:01 +01:00
r = amdgpu_vm_update_pdes ( adev , vm , false ) ;
2017-09-01 20:37:57 +02:00
2017-01-27 15:58:43 +01:00
error :
2015-06-16 14:50:02 +02:00
if ( r & & r ! = - ERESTARTSYS )
2015-04-20 16:55:21 -04:00
DRM_ERROR ( " Couldn't update BO_VA (%d) \n " , r ) ;
}
2019-09-02 14:52:30 +02:00
/**
* amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
*
* @ adev : amdgpu_device pointer
* @ flags : GEM UAPI flags
*
* Returns the GEM UAPI flags mapped into hardware for the ASIC .
*/
uint64_t amdgpu_gem_va_map_flags ( struct amdgpu_device * adev , uint32_t flags )
{
uint64_t pte_flag = 0 ;
if ( flags & AMDGPU_VM_PAGE_EXECUTABLE )
pte_flag | = AMDGPU_PTE_EXECUTABLE ;
if ( flags & AMDGPU_VM_PAGE_READABLE )
pte_flag | = AMDGPU_PTE_READABLE ;
if ( flags & AMDGPU_VM_PAGE_WRITEABLE )
pte_flag | = AMDGPU_PTE_WRITEABLE ;
if ( flags & AMDGPU_VM_PAGE_PRT )
pte_flag | = AMDGPU_PTE_PRT ;
if ( adev - > gmc . gmc_funcs - > map_mtype )
pte_flag | = amdgpu_gmc_map_mtype ( adev ,
flags & AMDGPU_VM_MTYPE_MASK ) ;
return pte_flag ;
}
2015-04-20 16:55:21 -04:00
int amdgpu_gem_va_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
2017-01-16 13:59:01 +08:00
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
2017-02-14 12:04:52 -05:00
AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK ;
2017-01-16 13:59:01 +08:00
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_PRT ;
2015-06-08 15:03:00 +02:00
struct drm_amdgpu_gem_va * args = data ;
2015-04-20 16:55:21 -04:00
struct drm_gem_object * gobj ;
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_fpriv * fpriv = filp - > driver_priv ;
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo_va * bo_va ;
2016-09-28 16:33:01 +02:00
struct amdgpu_bo_list_entry vm_pd ;
struct ttm_validate_buffer tv ;
2015-11-13 14:18:38 +08:00
struct ww_acquire_ctx ticket ;
2017-08-25 09:14:43 +02:00
struct list_head list , duplicates ;
2017-02-14 12:22:57 -05:00
uint64_t va_flags ;
2020-10-16 18:03:07 +05:30
uint64_t vm_size ;
2015-04-20 16:55:21 -04:00
int r = 0 ;
2015-06-08 15:03:00 +02:00
if ( args - > va_address < AMDGPU_VA_RESERVED_SIZE ) {
2021-01-07 09:07:42 +01:00
dev_dbg ( dev - > dev ,
2017-11-06 15:25:37 +01:00
" va_address 0x%LX is in reserved area 0x%LX \n " ,
args - > va_address , AMDGPU_VA_RESERVED_SIZE ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2018-08-27 18:22:31 +02:00
if ( args - > va_address > = AMDGPU_GMC_HOLE_START & &
args - > va_address < AMDGPU_GMC_HOLE_END ) {
2021-01-07 09:07:42 +01:00
dev_dbg ( dev - > dev ,
2017-11-06 15:37:01 +01:00
" va_address 0x%LX is in VA hole 0x%LX-0x%LX \n " ,
2018-08-27 18:22:31 +02:00
args - > va_address , AMDGPU_GMC_HOLE_START ,
AMDGPU_GMC_HOLE_END ) ;
2017-11-06 15:37:01 +01:00
return - EINVAL ;
}
2018-08-27 18:22:31 +02:00
args - > va_address & = AMDGPU_GMC_HOLE_MASK ;
2017-11-06 15:37:01 +01:00
2020-10-16 18:03:07 +05:30
vm_size = adev - > vm_manager . max_pfn * AMDGPU_GPU_PAGE_SIZE ;
vm_size - = AMDGPU_VA_RESERVED_SIZE ;
if ( args - > va_address + args - > map_size > vm_size ) {
2021-01-07 09:07:42 +01:00
dev_dbg ( dev - > dev ,
2020-10-16 18:03:07 +05:30
" va_address 0x%llx is in top reserved area 0x%llx \n " ,
args - > va_address + args - > map_size , vm_size ) ;
return - EINVAL ;
}
2017-01-16 13:59:01 +08:00
if ( ( args - > flags & ~ valid_flags ) & & ( args - > flags & ~ prt_flags ) ) {
2021-01-07 09:07:42 +01:00
dev_dbg ( dev - > dev , " invalid flags combination 0x%08X \n " ,
2017-01-16 13:59:01 +08:00
args - > flags ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-06-08 15:03:00 +02:00
switch ( args - > operation ) {
2015-04-20 16:55:21 -04:00
case AMDGPU_VA_OP_MAP :
case AMDGPU_VA_OP_UNMAP :
2017-03-13 10:13:38 +01:00
case AMDGPU_VA_OP_CLEAR :
2017-03-13 10:13:39 +01:00
case AMDGPU_VA_OP_REPLACE :
2015-04-20 16:55:21 -04:00
break ;
default :
2021-01-07 09:07:42 +01:00
dev_dbg ( dev - > dev , " unsupported operation %d \n " ,
2015-06-08 15:03:00 +02:00
args - > operation ) ;
2015-04-20 16:55:21 -04:00
return - EINVAL ;
}
2015-11-13 14:18:38 +08:00
INIT_LIST_HEAD ( & list ) ;
2017-08-25 09:14:43 +02:00
INIT_LIST_HEAD ( & duplicates ) ;
2017-03-13 10:13:38 +01:00
if ( ( args - > operation ! = AMDGPU_VA_OP_CLEAR ) & &
! ( args - > flags & AMDGPU_VM_PAGE_PRT ) ) {
2017-01-16 13:59:01 +08:00
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
if ( gobj = = NULL )
return - ENOENT ;
abo = gem_to_amdgpu_bo ( gobj ) ;
tv . bo = & abo - > tbo ;
2018-09-19 16:25:08 +02:00
if ( abo - > flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID )
tv . num_shared = 1 ;
else
tv . num_shared = 0 ;
2017-01-16 13:59:01 +08:00
list_add ( & tv . head , & list ) ;
} else {
gobj = NULL ;
abo = NULL ;
}
2015-11-13 14:18:38 +08:00
2016-09-28 16:33:01 +02:00
amdgpu_vm_get_pd_bo ( & fpriv - > vm , & list , & vm_pd ) ;
2016-03-08 17:47:46 +01:00
2019-09-19 12:56:15 +02:00
r = ttm_eu_reserve_buffers ( & ticket , & list , true , & duplicates ) ;
2017-01-16 13:59:01 +08:00
if ( r )
goto error_unref ;
2015-06-08 15:03:00 +02:00
2017-01-16 13:59:01 +08:00
if ( abo ) {
bo_va = amdgpu_vm_bo_find ( & fpriv - > vm , abo ) ;
if ( ! bo_va ) {
r = - ENOENT ;
goto error_backoff ;
}
2017-03-13 10:13:38 +01:00
} else if ( args - > operation ! = AMDGPU_VA_OP_CLEAR ) {
2017-01-16 13:59:01 +08:00
bo_va = fpriv - > prt_va ;
2017-03-13 10:13:38 +01:00
} else {
bo_va = NULL ;
2015-04-20 16:55:21 -04:00
}
2015-06-08 15:03:00 +02:00
switch ( args - > operation ) {
2015-04-20 16:55:21 -04:00
case AMDGPU_VA_OP_MAP :
2019-09-02 14:52:30 +02:00
va_flags = amdgpu_gem_va_map_flags ( adev , args - > flags ) ;
2015-06-08 15:03:00 +02:00
r = amdgpu_vm_bo_map ( adev , bo_va , args - > va_address ,
args - > offset_in_bo , args - > map_size ,
2015-05-18 16:05:57 +02:00
va_flags ) ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_VA_OP_UNMAP :
2015-06-08 15:03:00 +02:00
r = amdgpu_vm_bo_unmap ( adev , bo_va , args - > va_address ) ;
2015-04-20 16:55:21 -04:00
break ;
2017-03-13 10:13:38 +01:00
case AMDGPU_VA_OP_CLEAR :
r = amdgpu_vm_bo_clear_mappings ( adev , & fpriv - > vm ,
args - > va_address ,
args - > map_size ) ;
break ;
2017-03-13 10:13:39 +01:00
case AMDGPU_VA_OP_REPLACE :
2019-09-02 14:52:30 +02:00
va_flags = amdgpu_gem_va_map_flags ( adev , args - > flags ) ;
2017-03-13 10:13:39 +01:00
r = amdgpu_vm_bo_replace_map ( adev , bo_va , args - > va_address ,
args - > offset_in_bo , args - > map_size ,
va_flags ) ;
break ;
2015-04-20 16:55:21 -04:00
default :
break ;
}
2017-01-16 13:59:01 +08:00
if ( ! r & & ! ( args - > flags & AMDGPU_VM_DELAY_UPDATE ) & & ! amdgpu_vm_debug )
2018-06-12 13:57:45 +08:00
amdgpu_gem_va_update_vm ( adev , & fpriv - > vm , bo_va ,
2017-03-13 10:13:38 +01:00
args - > operation ) ;
2017-01-16 13:59:01 +08:00
error_backoff :
2017-01-27 15:58:43 +01:00
ttm_eu_backoff_reservation ( & ticket , & list ) ;
2015-11-13 15:22:04 +08:00
2017-01-16 13:59:01 +08:00
error_unref :
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
int amdgpu_gem_op_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp )
{
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2015-04-20 16:55:21 -04:00
struct drm_amdgpu_gem_op * args = data ;
struct drm_gem_object * gobj ;
2019-03-26 14:47:57 -04:00
struct amdgpu_vm_bo_base * base ;
2015-04-20 16:55:21 -04:00
struct amdgpu_bo * robj ;
int r ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( filp , args - > handle ) ;
2015-04-20 16:55:21 -04:00
if ( gobj = = NULL ) {
return - ENOENT ;
}
robj = gem_to_amdgpu_bo ( gobj ) ;
r = amdgpu_bo_reserve ( robj , false ) ;
if ( unlikely ( r ) )
goto out ;
switch ( args - > op ) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO : {
struct drm_amdgpu_gem_create_in info ;
2017-07-26 17:02:52 +02:00
void __user * out = u64_to_user_ptr ( args - > value ) ;
2015-04-20 16:55:21 -04:00
2019-08-05 16:01:07 +02:00
info . bo_size = robj - > tbo . base . size ;
2015-04-20 16:55:21 -04:00
info . alignment = robj - > tbo . mem . page_alignment < < PAGE_SHIFT ;
2017-08-08 07:58:01 -04:00
info . domains = robj - > preferred_domains ;
2015-04-20 16:55:21 -04:00
info . domain_flags = robj - > flags ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
if ( copy_to_user ( out , & info , sizeof ( info ) ) )
r = - EFAULT ;
break ;
}
2015-05-27 14:30:38 +02:00
case AMDGPU_GEM_OP_SET_PLACEMENT :
2017-04-03 13:31:22 +10:00
if ( robj - > prime_shared_count & & ( args - > value & AMDGPU_GEM_DOMAIN_VRAM ) ) {
r = - EINVAL ;
amdgpu_bo_unreserve ( robj ) ;
break ;
}
2016-02-08 11:08:35 +01:00
if ( amdgpu_ttm_tt_get_usermm ( robj - > tbo . ttm ) ) {
2015-04-20 16:55:21 -04:00
r = - EPERM ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
break ;
}
2019-03-26 14:47:57 -04:00
for ( base = robj - > vm_bo ; base ; base = base - > next )
if ( amdgpu_xgmi_same_hive ( amdgpu_ttm_adev ( robj - > tbo . bdev ) ,
amdgpu_ttm_adev ( base - > vm - > root . base . bo - > tbo . bdev ) ) ) {
r = - EINVAL ;
amdgpu_bo_unreserve ( robj ) ;
goto out ;
}
2017-08-08 07:58:01 -04:00
robj - > preferred_domains = args - > value & ( AMDGPU_GEM_DOMAIN_VRAM |
2015-12-18 22:13:12 +01:00
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU ) ;
2017-08-08 07:58:01 -04:00
robj - > allowed_domains = robj - > preferred_domains ;
2015-12-18 22:13:12 +01:00
if ( robj - > allowed_domains = = AMDGPU_GEM_DOMAIN_VRAM )
robj - > allowed_domains | = AMDGPU_GEM_DOMAIN_GTT ;
2017-08-25 09:14:43 +02:00
if ( robj - > flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID )
amdgpu_vm_bo_invalidate ( adev , robj , true ) ;
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
break ;
default :
2015-08-28 17:27:54 +02:00
amdgpu_bo_unreserve ( robj ) ;
2015-04-20 16:55:21 -04:00
r = - EINVAL ;
}
out :
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
int amdgpu_mode_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
2020-08-24 12:27:47 -04:00
struct amdgpu_device * adev = drm_to_adev ( dev ) ;
2015-04-20 16:55:21 -04:00
struct drm_gem_object * gobj ;
uint32_t handle ;
2019-07-15 18:04:08 -04:00
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_CPU_GTT_USWC ;
2018-05-25 17:12:29 -07:00
u32 domain ;
2015-04-20 16:55:21 -04:00
int r ;
2019-03-11 14:16:29 -04:00
/*
* The buffer returned from this function should be cleared , but
* it can only be done if the ring is enabled or we ' ll fail to
* create the buffer .
*/
if ( adev - > mman . buffer_funcs_enabled )
flags | = AMDGPU_GEM_CREATE_VRAM_CLEARED ;
2016-10-18 01:41:17 +03:00
args - > pitch = amdgpu_align_pitch ( adev , args - > width ,
DIV_ROUND_UP ( args - > bpp , 8 ) , 0 ) ;
2015-09-23 14:00:59 +03:00
args - > size = ( u64 ) args - > pitch * args - > height ;
2015-04-20 16:55:21 -04:00
args - > size = ALIGN ( args - > size , PAGE_SIZE ) ;
2018-05-25 17:12:29 -07:00
domain = amdgpu_bo_get_preferred_pin_domain ( adev ,
2019-07-26 09:24:35 -04:00
amdgpu_display_supported_domains ( adev , flags ) ) ;
2019-03-11 14:16:29 -04:00
r = amdgpu_gem_object_create ( adev , args - > size , 0 , domain , flags ,
2018-06-28 14:38:21 +08:00
ttm_bo_type_device , NULL , & gobj ) ;
2015-04-20 16:55:21 -04:00
if ( r )
return - ENOMEM ;
r = drm_gem_handle_create ( file_priv , gobj , & handle ) ;
/* drop reference from allocate - handle holds it now */
2020-05-15 10:50:54 +01:00
drm_gem_object_put ( gobj ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
return r ;
}
args - > handle = handle ;
return 0 ;
}
# if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_gem_info ( struct seq_file * m , void * data )
{
struct drm_info_node * node = ( struct drm_info_node * ) m - > private ;
struct drm_device * dev = node - > minor - > dev ;
2016-02-15 15:23:00 +01:00
struct drm_file * file ;
int r ;
2015-04-20 16:55:21 -04:00
2016-04-26 19:29:41 +02:00
r = mutex_lock_interruptible ( & dev - > filelist_mutex ) ;
2016-02-15 15:23:00 +01:00
if ( r )
return r ;
list_for_each_entry ( file , & dev - > filelist , lhead ) {
struct task_struct * task ;
2020-10-08 15:46:38 +05:30
struct drm_gem_object * gobj ;
int id ;
2016-02-15 15:23:00 +01:00
/*
* Although we have a valid reference on file - > pid , that does
* not guarantee that the task_struct who called get_pid ( ) is
* still alive ( e . g . get_pid ( current ) = > fork ( ) = > exit ( ) ) .
* Therefore , we need to protect this - > comm access using RCU .
*/
rcu_read_lock ( ) ;
task = pid_task ( file - > pid , PIDTYPE_PID ) ;
seq_printf ( m , " pid %8d command %s: \n " , pid_nr ( file - > pid ) ,
task ? task - > comm : " <unknown> " ) ;
rcu_read_unlock ( ) ;
spin_lock ( & file - > table_lock ) ;
2020-10-08 15:46:38 +05:30
idr_for_each_entry ( & file - > object_idr , gobj , id ) {
struct amdgpu_bo * bo = gem_to_amdgpu_bo ( gobj ) ;
amdgpu_bo_print_info ( id , bo , m ) ;
}
2016-02-15 15:23:00 +01:00
spin_unlock ( & file - > table_lock ) ;
2015-04-20 16:55:21 -04:00
}
2016-02-15 15:23:00 +01:00
2016-04-26 19:29:41 +02:00
mutex_unlock ( & dev - > filelist_mutex ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
2016-05-02 12:46:15 -04:00
static const struct drm_info_list amdgpu_debugfs_gem_list [ ] = {
2015-04-20 16:55:21 -04:00
{ " amdgpu_gem_info " , & amdgpu_debugfs_gem_info , 0 , NULL } ,
} ;
# endif
2017-12-14 15:23:14 -05:00
int amdgpu_debugfs_gem_init ( struct amdgpu_device * adev )
2015-04-20 16:55:21 -04:00
{
# if defined(CONFIG_DEBUG_FS)
2020-07-13 13:55:42 +08:00
return amdgpu_debugfs_add_files ( adev , amdgpu_debugfs_gem_list ,
ARRAY_SIZE ( amdgpu_debugfs_gem_list ) ) ;
2015-04-20 16:55:21 -04:00
# endif
return 0 ;
}