2014-07-16 21:08:55 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# include <linux/device.h>
# include <linux/export.h>
# include <linux/err.h>
# include <linux/fs.h>
2018-03-15 17:27:51 -04:00
# include <linux/file.h>
2014-07-16 21:08:55 +03:00
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/compat.h>
# include <uapi/linux/kfd_ioctl.h>
# include <linux/time.h>
# include <linux/mm.h>
2015-08-28 09:27:16 +02:00
# include <linux/mman.h>
2014-07-16 21:08:55 +03:00
# include <asm/processor.h>
# include "kfd_priv.h"
2014-07-17 01:46:17 +03:00
# include "kfd_device_queue_manager.h"
2015-05-20 14:08:55 +03:00
# include "kfd_dbgmgr.h"
2014-07-16 21:08:55 +03:00
static long kfd_ioctl ( struct file * , unsigned int , unsigned long ) ;
static int kfd_open ( struct inode * , struct file * ) ;
2014-07-16 23:25:31 +03:00
static int kfd_mmap ( struct file * , struct vm_area_struct * ) ;
2014-07-16 21:08:55 +03:00
static const char kfd_dev_name [ ] = " kfd " ;
static const struct file_operations kfd_fops = {
. owner = THIS_MODULE ,
. unlocked_ioctl = kfd_ioctl ,
. compat_ioctl = kfd_ioctl ,
. open = kfd_open ,
2014-07-16 23:25:31 +03:00
. mmap = kfd_mmap ,
2014-07-16 21:08:55 +03:00
} ;
static int kfd_char_dev_major = - 1 ;
static struct class * kfd_class ;
struct device * kfd_device ;
int kfd_chardev_init ( void )
{
int err = 0 ;
kfd_char_dev_major = register_chrdev ( 0 , kfd_dev_name , & kfd_fops ) ;
err = kfd_char_dev_major ;
if ( err < 0 )
goto err_register_chrdev ;
kfd_class = class_create ( THIS_MODULE , kfd_dev_name ) ;
err = PTR_ERR ( kfd_class ) ;
if ( IS_ERR ( kfd_class ) )
goto err_class_create ;
kfd_device = device_create ( kfd_class , NULL ,
MKDEV ( kfd_char_dev_major , 0 ) ,
NULL , kfd_dev_name ) ;
err = PTR_ERR ( kfd_device ) ;
if ( IS_ERR ( kfd_device ) )
goto err_device_create ;
return 0 ;
err_device_create :
class_destroy ( kfd_class ) ;
err_class_create :
unregister_chrdev ( kfd_char_dev_major , kfd_dev_name ) ;
err_register_chrdev :
return err ;
}
void kfd_chardev_exit ( void )
{
device_destroy ( kfd_class , MKDEV ( kfd_char_dev_major , 0 ) ) ;
class_destroy ( kfd_class ) ;
unregister_chrdev ( kfd_char_dev_major , kfd_dev_name ) ;
}
struct device * kfd_chardev ( void )
{
return kfd_device ;
}
static int kfd_open ( struct inode * inode , struct file * filep )
{
2014-07-16 23:25:31 +03:00
struct kfd_process * process ;
2014-12-05 10:40:34 +02:00
bool is_32bit_user_mode ;
2014-07-16 23:25:31 +03:00
2014-07-16 21:08:55 +03:00
if ( iminor ( inode ) ! = 0 )
return - ENODEV ;
2016-03-22 14:25:19 -07:00
is_32bit_user_mode = in_compat_syscall ( ) ;
2014-12-05 10:40:34 +02:00
2016-05-01 00:06:27 +10:00
if ( is_32bit_user_mode ) {
2014-12-05 10:40:34 +02:00
dev_warn ( kfd_device ,
" Process %d (32-bit) failed to open /dev/kfd \n "
" 32-bit processes are not supported by amdkfd \n " ,
current - > pid ) ;
return - EPERM ;
}
2017-11-14 16:41:19 -05:00
process = kfd_create_process ( filep ) ;
2014-07-16 23:25:31 +03:00
if ( IS_ERR ( process ) )
return PTR_ERR ( process ) ;
2018-07-11 22:32:56 -04:00
if ( kfd_is_locked ( ) )
return - EAGAIN ;
2014-07-16 23:25:31 +03:00
dev_dbg ( kfd_device , " process %d opened, compat mode (32 bit) - %d \n " ,
process - > pasid , process - > is_32bit_user_mode ) ;
2014-07-16 21:08:55 +03:00
return 0 ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_get_version ( struct file * filep , struct kfd_process * p ,
void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_version_args * args = data ;
2014-11-02 12:18:29 +02:00
2014-12-29 13:52:22 +02:00
args - > major_version = KFD_IOCTL_MAJOR_VERSION ;
args - > minor_version = KFD_IOCTL_MINOR_VERSION ;
2014-11-02 12:18:29 +02:00
2016-05-01 00:06:29 +10:00
return 0 ;
2014-07-16 21:08:55 +03:00
}
2014-10-19 23:46:40 +03:00
static int set_queue_properties_from_user ( struct queue_properties * q_properties ,
struct kfd_ioctl_create_queue_args * args )
{
if ( args - > queue_percentage > KFD_MAX_QUEUE_PERCENTAGE ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
if ( args - > queue_priority > KFD_MAX_QUEUE_PRIORITY ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
if ( ( args - > ring_base_address ) & &
2014-11-20 15:37:13 +02:00
( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > ring_base_address ,
sizeof ( uint64_t ) ) ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Can't access ring base address \n " ) ;
2014-10-19 23:46:40 +03:00
return - EFAULT ;
}
if ( ! is_power_of_2 ( args - > ring_size ) & & ( args - > ring_size ! = 0 ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Ring size must be a power of 2 or 0 \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
2014-11-20 15:37:13 +02:00
if ( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > read_pointer_address ,
sizeof ( uint32_t ) ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Can't access read pointer \n " ) ;
2014-10-19 23:46:40 +03:00
return - EFAULT ;
}
2014-11-20 15:37:13 +02:00
if ( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > write_pointer_address ,
sizeof ( uint32_t ) ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Can't access write pointer \n " ) ;
2014-10-19 23:46:40 +03:00
return - EFAULT ;
}
2015-01-22 13:42:28 +02:00
if ( args - > eop_buffer_address & &
! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > eop_buffer_address ,
sizeof ( uint32_t ) ) ) {
2017-08-15 23:00:05 -04:00
pr_debug ( " Can't access eop buffer " ) ;
2015-01-04 10:37:18 +02:00
return - EFAULT ;
}
2015-01-22 13:42:28 +02:00
if ( args - > ctx_save_restore_address & &
! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > ctx_save_restore_address ,
sizeof ( uint32_t ) ) ) {
2017-08-15 23:00:05 -04:00
pr_debug ( " Can't access ctx save restore buffer " ) ;
2015-01-04 10:37:18 +02:00
return - EFAULT ;
}
2014-10-19 23:46:40 +03:00
q_properties - > is_interop = false ;
q_properties - > queue_percent = args - > queue_percentage ;
q_properties - > priority = args - > queue_priority ;
q_properties - > queue_address = args - > ring_base_address ;
q_properties - > queue_size = args - > ring_size ;
q_properties - > read_ptr = ( uint32_t * ) args - > read_pointer_address ;
q_properties - > write_ptr = ( uint32_t * ) args - > write_pointer_address ;
2015-01-04 10:37:18 +02:00
q_properties - > eop_ring_buffer_address = args - > eop_buffer_address ;
q_properties - > eop_ring_buffer_size = args - > eop_buffer_size ;
q_properties - > ctx_save_restore_area_address =
args - > ctx_save_restore_address ;
q_properties - > ctx_save_restore_area_size = args - > ctx_save_restore_size ;
2017-11-14 16:41:19 -05:00
q_properties - > ctl_stack_size = args - > ctl_stack_size ;
2014-10-19 23:46:40 +03:00
if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE | |
args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE_AQL )
q_properties - > type = KFD_QUEUE_TYPE_COMPUTE ;
2015-01-03 22:12:33 +02:00
else if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_SDMA )
q_properties - > type = KFD_QUEUE_TYPE_SDMA ;
2014-10-19 23:46:40 +03:00
else
return - ENOTSUPP ;
if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE_AQL )
q_properties - > format = KFD_QUEUE_FORMAT_AQL ;
else
q_properties - > format = KFD_QUEUE_FORMAT_PM4 ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue Percentage: %d, %d \n " ,
2014-10-19 23:46:40 +03:00
q_properties - > queue_percent , args - > queue_percentage ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue Priority: %d, %d \n " ,
2014-10-19 23:46:40 +03:00
q_properties - > priority , args - > queue_priority ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue Address: 0x%llX, 0x%llX \n " ,
2014-10-19 23:46:40 +03:00
q_properties - > queue_address , args - > ring_base_address ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue Size: 0x%llX, %u \n " ,
2014-10-19 23:46:40 +03:00
q_properties - > queue_size , args - > ring_size ) ;
2018-05-01 17:56:04 -04:00
pr_debug ( " Queue r/w Pointers: %px, %px \n " ,
2017-08-15 23:00:05 -04:00
q_properties - > read_ptr ,
q_properties - > write_ptr ) ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue Format: %d \n " , q_properties - > format ) ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue EOP: 0x%llX \n " , q_properties - > eop_ring_buffer_address ) ;
2015-01-04 10:37:18 +02:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue CTX save area: 0x%llX \n " ,
2015-01-04 10:37:18 +02:00
q_properties - > ctx_save_restore_area_address ) ;
2014-10-19 23:46:40 +03:00
return 0 ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_create_queue ( struct file * filep , struct kfd_process * p ,
void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_create_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
struct kfd_dev * dev ;
int err = 0 ;
unsigned int queue_id ;
struct kfd_process_device * pdd ;
struct queue_properties q_properties ;
memset ( & q_properties , 0 , sizeof ( struct queue_properties ) ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Creating queue ioctl \n " ) ;
2014-10-19 23:46:40 +03:00
2014-12-29 13:52:22 +02:00
err = set_queue_properties_from_user ( & q_properties , args ) ;
2014-10-19 23:46:40 +03:00
if ( err )
return err ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Looking for gpu id 0x%x \n " , args - > gpu_id ) ;
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-08-15 23:00:06 -04:00
if ( ! dev ) {
2017-08-15 23:00:05 -04:00
pr_debug ( " Could not find gpu id 0x%x \n " , args - > gpu_id ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
2015-01-04 10:37:18 +02:00
}
2014-10-19 23:46:40 +03:00
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
2014-11-25 13:21:30 +03:00
if ( IS_ERR ( pdd ) ) {
2014-12-29 13:52:22 +02:00
err = - ESRCH ;
2014-10-19 23:46:40 +03:00
goto err_bind_process ;
}
2017-08-15 23:00:05 -04:00
pr_debug ( " Creating queue for PASID %d on gpu 0x%x \n " ,
2014-10-19 23:46:40 +03:00
p - > pasid ,
dev - > id ) ;
2017-09-27 00:09:53 -04:00
err = pqm_create_queue ( & p - > pqm , dev , filep , & q_properties , & queue_id ) ;
2014-10-19 23:46:40 +03:00
if ( err ! = 0 )
goto err_create_queue ;
2014-12-29 13:52:22 +02:00
args - > queue_id = queue_id ;
2014-10-19 23:46:40 +03:00
2015-05-10 12:15:46 +03:00
2014-10-19 23:46:40 +03:00
/* Return gpu_id as doorbell offset for mmap usage */
2018-04-10 17:33:04 -04:00
args - > doorbell_offset = KFD_MMAP_TYPE_DOORBELL ;
args - > doorbell_offset | = KFD_MMAP_GPU_ID ( args - > gpu_id ) ;
2015-05-10 12:15:46 +03:00
args - > doorbell_offset < < = PAGE_SHIFT ;
2018-04-10 17:33:05 -04:00
if ( KFD_IS_SOC15 ( dev - > device_info - > asic_family ) )
/* On SOC15 ASICs, doorbell allocation must be
* per - device , and independent from the per - process
* queue_id . Return the doorbell offset within the
* doorbell aperture to user mode .
*/
args - > doorbell_offset | = q_properties . doorbell_off ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Queue id %d was created successfully \n " , args - > queue_id ) ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Ring buffer address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > ring_base_address ) ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Read ptr address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > read_pointer_address ) ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Write ptr address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > write_pointer_address ) ;
2014-10-19 23:46:40 +03:00
return 0 ;
err_create_queue :
err_bind_process :
mutex_unlock ( & p - > mutex ) ;
return err ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_destroy_queue ( struct file * filp , struct kfd_process * p ,
2014-12-29 13:52:22 +02:00
void * data )
2014-07-16 21:08:55 +03:00
{
2014-10-19 23:46:40 +03:00
int retval ;
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_destroy_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Destroying queue id %d for pasid %d \n " ,
2014-12-29 13:52:22 +02:00
args - > queue_id ,
2014-10-19 23:46:40 +03:00
p - > pasid ) ;
mutex_lock ( & p - > mutex ) ;
2014-12-29 13:52:22 +02:00
retval = pqm_destroy_queue ( & p - > pqm , args - > queue_id ) ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
return retval ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_update_queue ( struct file * filp , struct kfd_process * p ,
2014-12-29 13:52:22 +02:00
void * data )
2014-07-16 21:08:55 +03:00
{
2014-10-19 23:46:40 +03:00
int retval ;
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_update_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
struct queue_properties properties ;
2014-12-29 13:52:22 +02:00
if ( args - > queue_percentage > KFD_MAX_QUEUE_PERCENTAGE ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( args - > queue_priority > KFD_MAX_QUEUE_PRIORITY ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( ( args - > ring_base_address ) & &
2014-11-20 15:37:13 +02:00
( ! access_ok ( VERIFY_WRITE ,
2014-12-29 13:52:22 +02:00
( const void __user * ) args - > ring_base_address ,
2014-11-20 15:37:13 +02:00
sizeof ( uint64_t ) ) ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Can't access ring base address \n " ) ;
2014-10-19 23:46:40 +03:00
return - EFAULT ;
}
2014-12-29 13:52:22 +02:00
if ( ! is_power_of_2 ( args - > ring_size ) & & ( args - > ring_size ! = 0 ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Ring size must be a power of 2 or 0 \n " ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
properties . queue_address = args - > ring_base_address ;
properties . queue_size = args - > ring_size ;
properties . queue_percent = args - > queue_percentage ;
properties . priority = args - > queue_priority ;
2014-10-19 23:46:40 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Updating queue id %d for pasid %d \n " ,
2014-12-29 13:52:22 +02:00
args - > queue_id , p - > pasid ) ;
2014-10-19 23:46:40 +03:00
mutex_lock ( & p - > mutex ) ;
2014-12-29 13:52:22 +02:00
retval = pqm_update_queue ( & p - > pqm , args - > queue_id , & properties ) ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
return retval ;
2014-07-16 21:08:55 +03:00
}
2018-07-14 19:05:59 -04:00
static int kfd_ioctl_set_cu_mask ( struct file * filp , struct kfd_process * p ,
void * data )
{
int retval ;
const int max_num_cus = 1024 ;
struct kfd_ioctl_set_cu_mask_args * args = data ;
struct queue_properties properties ;
uint32_t __user * cu_mask_ptr = ( uint32_t __user * ) args - > cu_mask_ptr ;
size_t cu_mask_size = sizeof ( uint32_t ) * ( args - > num_cu_mask / 32 ) ;
if ( ( args - > num_cu_mask % 32 ) ! = 0 ) {
pr_debug ( " num_cu_mask 0x%x must be a multiple of 32 " ,
args - > num_cu_mask ) ;
return - EINVAL ;
}
properties . cu_mask_count = args - > num_cu_mask ;
if ( properties . cu_mask_count = = 0 ) {
pr_debug ( " CU mask cannot be 0 " ) ;
return - EINVAL ;
}
/* To prevent an unreasonably large CU mask size, set an arbitrary
* limit of max_num_cus bits . We can then just drop any CU mask bits
* past max_num_cus bits and just use the first max_num_cus bits .
*/
if ( properties . cu_mask_count > max_num_cus ) {
pr_debug ( " CU mask cannot be greater than 1024 bits " ) ;
properties . cu_mask_count = max_num_cus ;
cu_mask_size = sizeof ( uint32_t ) * ( max_num_cus / 32 ) ;
}
properties . cu_mask = kzalloc ( cu_mask_size , GFP_KERNEL ) ;
if ( ! properties . cu_mask )
return - ENOMEM ;
retval = copy_from_user ( properties . cu_mask , cu_mask_ptr , cu_mask_size ) ;
if ( retval ) {
pr_debug ( " Could not copy CU mask from userspace " ) ;
kfree ( properties . cu_mask ) ;
return - EFAULT ;
}
mutex_lock ( & p - > mutex ) ;
retval = pqm_set_cu_mask ( & p - > pqm , args - > queue_id , & properties ) ;
mutex_unlock ( & p - > mutex ) ;
if ( retval )
kfree ( properties . cu_mask ) ;
return retval ;
}
2017-05-02 17:39:37 -05:00
static int kfd_ioctl_get_queue_wave_state ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_get_queue_wave_state_args * args = data ;
int r ;
mutex_lock ( & p - > mutex ) ;
r = pqm_get_wave_state ( & p - > pqm , args - > queue_id ,
( void __user * ) args - > ctl_stack_address ,
& args - > ctl_stack_used_size ,
& args - > save_area_used_size ) ;
mutex_unlock ( & p - > mutex ) ;
return r ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_set_memory_policy ( struct file * filep ,
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_set_memory_policy_args * args = data ;
2014-07-17 01:46:17 +03:00
struct kfd_dev * dev ;
int err = 0 ;
struct kfd_process_device * pdd ;
enum cache_policy default_policy , alternate_policy ;
2014-12-29 13:52:22 +02:00
if ( args - > default_policy ! = KFD_IOC_CACHE_POLICY_COHERENT
& & args - > default_policy ! = KFD_IOC_CACHE_POLICY_NONCOHERENT ) {
2014-07-17 01:46:17 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( args - > alternate_policy ! = KFD_IOC_CACHE_POLICY_COHERENT
& & args - > alternate_policy ! = KFD_IOC_CACHE_POLICY_NONCOHERENT ) {
2014-07-17 01:46:17 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-08-15 23:00:06 -04:00
if ( ! dev )
2014-07-17 01:46:17 +03:00
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
2014-11-25 13:21:30 +03:00
if ( IS_ERR ( pdd ) ) {
2014-12-29 13:52:22 +02:00
err = - ESRCH ;
2014-07-17 01:46:17 +03:00
goto out ;
}
2014-12-29 13:52:22 +02:00
default_policy = ( args - > default_policy = = KFD_IOC_CACHE_POLICY_COHERENT )
2014-07-17 01:46:17 +03:00
? cache_policy_coherent : cache_policy_noncoherent ;
alternate_policy =
2014-12-29 13:52:22 +02:00
( args - > alternate_policy = = KFD_IOC_CACHE_POLICY_COHERENT )
2014-07-17 01:46:17 +03:00
? cache_policy_coherent : cache_policy_noncoherent ;
2015-01-12 14:26:10 +02:00
if ( ! dev - > dqm - > ops . set_cache_memory_policy ( dev - > dqm ,
2014-07-17 01:46:17 +03:00
& pdd - > qpd ,
default_policy ,
alternate_policy ,
2014-12-29 13:52:22 +02:00
( void __user * ) args - > alternate_aperture_base ,
args - > alternate_aperture_size ) )
2014-07-17 01:46:17 +03:00
err = - EINVAL ;
out :
mutex_unlock ( & p - > mutex ) ;
return err ;
2014-07-16 21:08:55 +03:00
}
2017-11-14 16:41:20 -05:00
static int kfd_ioctl_set_trap_handler ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_set_trap_handler_args * args = data ;
struct kfd_dev * dev ;
int err = 0 ;
struct kfd_process_device * pdd ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( dev = = NULL )
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
err = - ESRCH ;
goto out ;
}
if ( dev - > dqm - > ops . set_trap_handler ( dev - > dqm ,
& pdd - > qpd ,
args - > tba_addr ,
args - > tma_addr ) )
err = - EINVAL ;
out :
mutex_unlock ( & p - > mutex ) ;
return err ;
}
2014-12-07 17:05:22 +02:00
static int kfd_ioctl_dbg_register ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:08:55 +03:00
struct kfd_ioctl_dbg_register_args * args = data ;
struct kfd_dev * dev ;
struct kfd_dbgmgr * dbgmgr_ptr ;
struct kfd_process_device * pdd ;
bool create_ok ;
long status = 0 ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-08-15 23:00:06 -04:00
if ( ! dev )
2015-05-20 14:08:55 +03:00
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_register not supported on CZ \n " ) ;
return - EINVAL ;
}
mutex_lock ( & p - > mutex ) ;
2017-11-01 19:21:29 -04:00
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
2015-05-20 14:08:55 +03:00
/*
* make sure that we have pdd , if this the first queue created for
* this process
*/
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
2017-08-15 23:00:07 -04:00
status = PTR_ERR ( pdd ) ;
goto out ;
2015-05-20 14:08:55 +03:00
}
2017-08-15 23:00:06 -04:00
if ( ! dev - > dbgmgr ) {
2015-05-20 14:08:55 +03:00
/* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create ( & dbgmgr_ptr , dev ) ;
if ( create_ok ) {
status = kfd_dbgmgr_register ( dbgmgr_ptr , p ) ;
if ( status ! = 0 )
kfd_dbgmgr_destroy ( dbgmgr_ptr ) ;
else
dev - > dbgmgr = dbgmgr_ptr ;
}
} else {
pr_debug ( " debugger already registered \n " ) ;
status = - EINVAL ;
}
2017-08-15 23:00:07 -04:00
out :
2015-05-20 14:08:55 +03:00
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
2017-11-01 19:21:29 -04:00
mutex_unlock ( & p - > mutex ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
2016-11-12 17:33:29 +00:00
static int kfd_ioctl_dbg_unregister ( struct file * filep ,
2014-12-07 17:05:22 +02:00
struct kfd_process * p , void * data )
{
2015-05-20 14:08:55 +03:00
struct kfd_ioctl_dbg_unregister_args * args = data ;
struct kfd_dev * dev ;
long status ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-11-27 18:29:44 -05:00
if ( ! dev | | ! dev - > dbgmgr )
2015-05-20 14:08:55 +03:00
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
2016-11-12 17:33:29 +00:00
pr_debug ( " kfd_ioctl_dbg_unregister not supported on CZ \n " ) ;
2015-05-20 14:08:55 +03:00
return - EINVAL ;
}
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
status = kfd_dbgmgr_unregister ( dev - > dbgmgr , p ) ;
2017-08-15 23:00:06 -04:00
if ( ! status ) {
2015-05-20 14:08:55 +03:00
kfd_dbgmgr_destroy ( dev - > dbgmgr ) ;
dev - > dbgmgr = NULL ;
}
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
/*
* Parse and generate variable size data structure for address watch .
* Total size of the buffer and # watch points is limited in order
* to prevent kernel abuse . ( no bearing to the much smaller HW limitation
* which is enforced by dbgdev module )
* please also note that the watch address itself are not " copied from user " ,
* since it be set into the HW in user mode values .
*
*/
static int kfd_ioctl_dbg_address_watch ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:09:39 +03:00
struct kfd_ioctl_dbg_address_watch_args * args = data ;
struct kfd_dev * dev ;
struct dbg_address_watch_info aw_info ;
unsigned char * args_buff ;
long status ;
void __user * cmd_from_user ;
uint64_t watch_mask_value = 0 ;
unsigned int args_idx = 0 ;
memset ( ( void * ) & aw_info , 0 , sizeof ( struct dbg_address_watch_info ) ) ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-08-15 23:00:06 -04:00
if ( ! dev )
2015-05-20 14:09:39 +03:00
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_wave_control not supported on CZ \n " ) ;
return - EINVAL ;
}
cmd_from_user = ( void __user * ) args - > content_ptr ;
/* Validate arguments */
if ( ( args - > buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE ) | |
2015-06-16 13:49:32 +03:00
( args - > buf_size_in_bytes < = sizeof ( * args ) + sizeof ( int ) * 2 ) | |
2015-05-20 14:09:39 +03:00
( cmd_from_user = = NULL ) )
return - EINVAL ;
/* this is the actual buffer to work with */
2016-01-15 19:26:44 +01:00
args_buff = memdup_user ( cmd_from_user ,
2015-05-20 14:09:39 +03:00
args - > buf_size_in_bytes - sizeof ( * args ) ) ;
2016-01-02 15:06:19 -05:00
if ( IS_ERR ( args_buff ) )
return PTR_ERR ( args_buff ) ;
2015-05-20 14:09:39 +03:00
aw_info . process = p ;
aw_info . num_watch_points = * ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( aw_info . num_watch_points ) ;
aw_info . watch_mode = ( enum HSA_DBG_WATCH_MODE * ) & args_buff [ args_idx ] ;
args_idx + = sizeof ( enum HSA_DBG_WATCH_MODE ) * aw_info . num_watch_points ;
/*
* set watch address base pointer to point on the array base
* within args_buff
*/
aw_info . watch_address = ( uint64_t * ) & args_buff [ args_idx ] ;
/* skip over the addresses buffer */
args_idx + = sizeof ( aw_info . watch_address ) * aw_info . num_watch_points ;
2015-06-16 13:49:32 +03:00
if ( args_idx > = args - > buf_size_in_bytes - sizeof ( * args ) ) {
2017-08-15 23:00:07 -04:00
status = - EINVAL ;
goto out ;
2015-05-20 14:09:39 +03:00
}
watch_mask_value = ( uint64_t ) args_buff [ args_idx ] ;
if ( watch_mask_value > 0 ) {
/*
* There is an array of masks .
* set watch mask base pointer to point on the array base
* within args_buff
*/
aw_info . watch_mask = ( uint64_t * ) & args_buff [ args_idx ] ;
/* skip over the masks buffer */
args_idx + = sizeof ( aw_info . watch_mask ) *
aw_info . num_watch_points ;
} else {
/* just the NULL mask, set to NULL and skip over it */
aw_info . watch_mask = NULL ;
args_idx + = sizeof ( aw_info . watch_mask ) ;
}
2015-06-16 13:49:32 +03:00
if ( args_idx > = args - > buf_size_in_bytes - sizeof ( args ) ) {
2017-08-15 23:00:07 -04:00
status = - EINVAL ;
goto out ;
2015-05-20 14:09:39 +03:00
}
/* Currently HSA Event is not supported for DBG */
aw_info . watch_event = NULL ;
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
status = kfd_dbgmgr_address_watch ( dev - > dbgmgr , & aw_info ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
2017-08-15 23:00:07 -04:00
out :
2015-05-20 14:09:39 +03:00
kfree ( args_buff ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
/* Parse and generate fixed size data structure for wave control */
static int kfd_ioctl_dbg_wave_control ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:09:24 +03:00
struct kfd_ioctl_dbg_wave_control_args * args = data ;
struct kfd_dev * dev ;
struct dbg_wave_control_info wac_info ;
unsigned char * args_buff ;
uint32_t computed_buff_size ;
long status ;
void __user * cmd_from_user ;
unsigned int args_idx = 0 ;
memset ( ( void * ) & wac_info , 0 , sizeof ( struct dbg_wave_control_info ) ) ;
/* we use compact form, independent of the packing attribute value */
computed_buff_size = sizeof ( * args ) +
sizeof ( wac_info . mode ) +
sizeof ( wac_info . operand ) +
sizeof ( wac_info . dbgWave_msg . DbgWaveMsg ) +
sizeof ( wac_info . dbgWave_msg . MemoryVA ) +
sizeof ( wac_info . trapId ) ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-08-15 23:00:06 -04:00
if ( ! dev )
2015-05-20 14:09:24 +03:00
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_wave_control not supported on CZ \n " ) ;
return - EINVAL ;
}
/* input size must match the computed "compact" size */
if ( args - > buf_size_in_bytes ! = computed_buff_size ) {
pr_debug ( " size mismatch, computed : actual %u : %u \n " ,
args - > buf_size_in_bytes , computed_buff_size ) ;
return - EINVAL ;
}
cmd_from_user = ( void __user * ) args - > content_ptr ;
if ( cmd_from_user = = NULL )
return - EINVAL ;
2016-01-02 15:06:19 -05:00
/* copy the entire buffer from user */
2015-05-20 14:09:24 +03:00
2016-01-02 15:06:19 -05:00
args_buff = memdup_user ( cmd_from_user ,
2015-05-20 14:09:24 +03:00
args - > buf_size_in_bytes - sizeof ( * args ) ) ;
2016-01-02 15:06:19 -05:00
if ( IS_ERR ( args_buff ) )
return PTR_ERR ( args_buff ) ;
2015-05-20 14:09:24 +03:00
/* move ptr to the start of the "pay-load" area */
wac_info . process = p ;
wac_info . operand = * ( ( enum HSA_DBG_WAVEOP * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . operand ) ;
wac_info . mode = * ( ( enum HSA_DBG_WAVEMODE * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . mode ) ;
wac_info . trapId = * ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . trapId ) ;
wac_info . dbgWave_msg . DbgWaveMsg . WaveMsgInfoGen2 . Value =
* ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
wac_info . dbgWave_msg . MemoryVA = NULL ;
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
pr_debug ( " Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u \n " ,
wac_info . process , wac_info . operand ,
wac_info . mode , wac_info . trapId ,
wac_info . dbgWave_msg . DbgWaveMsg . WaveMsgInfoGen2 . Value ) ;
status = kfd_dbgmgr_wave_control ( dev - > dbgmgr , & wac_info ) ;
pr_debug ( " Returned status of dbg manager is %ld \n " , status ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
kfree ( args_buff ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_get_clock_counters ( struct file * filep ,
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_clock_counters_args * args = data ;
2014-07-17 01:47:58 +03:00
struct kfd_dev * dev ;
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2018-04-10 17:32:33 -04:00
if ( dev )
/* Reading GPU clock counter from KGD */
args - > gpu_clock_counter =
dev - > kfd2kgd - > get_gpu_clock_counter ( dev - > kgd ) ;
else
/* Node without GPU resource */
args - > gpu_clock_counter = 0 ;
2014-07-17 01:47:58 +03:00
/* No access to rdtsc. Using raw monotonic time */
2018-07-11 14:41:00 +02:00
args - > cpu_clock_counter = ktime_get_raw_ns ( ) ;
args - > system_clock_counter = ktime_get_boot_ns ( ) ;
2014-07-17 01:47:58 +03:00
/* Since the counter is in nano-seconds we use 1GHz frequency */
2014-12-29 13:52:22 +02:00
args - > system_clock_freq = 1000000000 ;
2014-07-17 01:47:58 +03:00
return 0 ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_get_process_apertures ( struct file * filp ,
2014-12-29 13:52:22 +02:00
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_process_apertures_args * args = data ;
2014-07-17 01:49:36 +03:00
struct kfd_process_device_apertures * pAperture ;
struct kfd_process_device * pdd ;
dev_dbg ( kfd_device , " get apertures for PASID %d " , p - > pasid ) ;
2014-12-29 13:52:22 +02:00
args - > num_of_nodes = 0 ;
2014-07-17 01:49:36 +03:00
mutex_lock ( & p - > mutex ) ;
/*if the process-device list isn't empty*/
if ( kfd_has_process_device_data ( p ) ) {
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data ( p ) ;
do {
2014-12-29 13:52:22 +02:00
pAperture =
& args - > process_apertures [ args - > num_of_nodes ] ;
2014-07-17 01:49:36 +03:00
pAperture - > gpu_id = pdd - > dev - > id ;
pAperture - > lds_base = pdd - > lds_base ;
pAperture - > lds_limit = pdd - > lds_limit ;
pAperture - > gpuvm_base = pdd - > gpuvm_base ;
pAperture - > gpuvm_limit = pdd - > gpuvm_limit ;
pAperture - > scratch_base = pdd - > scratch_base ;
pAperture - > scratch_limit = pdd - > scratch_limit ;
dev_dbg ( kfd_device ,
2014-12-29 13:52:22 +02:00
" node id %u \n " , args - > num_of_nodes ) ;
2014-07-17 01:49:36 +03:00
dev_dbg ( kfd_device ,
" gpu id %u \n " , pdd - > dev - > id ) ;
dev_dbg ( kfd_device ,
" lds_base %llX \n " , pdd - > lds_base ) ;
dev_dbg ( kfd_device ,
" lds_limit %llX \n " , pdd - > lds_limit ) ;
dev_dbg ( kfd_device ,
" gpuvm_base %llX \n " , pdd - > gpuvm_base ) ;
dev_dbg ( kfd_device ,
" gpuvm_limit %llX \n " , pdd - > gpuvm_limit ) ;
dev_dbg ( kfd_device ,
" scratch_base %llX \n " , pdd - > scratch_base ) ;
dev_dbg ( kfd_device ,
" scratch_limit %llX \n " , pdd - > scratch_limit ) ;
2014-12-29 13:52:22 +02:00
args - > num_of_nodes + + ;
2017-08-15 23:00:06 -04:00
pdd = kfd_get_next_process_device_data ( p , pdd ) ;
} while ( pdd & & ( args - > num_of_nodes < NUM_OF_SUPPORTED_GPUS ) ) ;
2014-07-17 01:49:36 +03:00
}
mutex_unlock ( & p - > mutex ) ;
return 0 ;
2014-07-16 21:08:55 +03:00
}
2018-03-15 17:27:46 -04:00
static int kfd_ioctl_get_process_apertures_new ( struct file * filp ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_get_process_apertures_new_args * args = data ;
struct kfd_process_device_apertures * pa ;
struct kfd_process_device * pdd ;
uint32_t nodes = 0 ;
int ret ;
dev_dbg ( kfd_device , " get apertures for PASID %d " , p - > pasid ) ;
if ( args - > num_of_nodes = = 0 ) {
/* Return number of nodes, so that user space can alloacate
* sufficient memory
*/
mutex_lock ( & p - > mutex ) ;
if ( ! kfd_has_process_device_data ( p ) )
goto out_unlock ;
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data ( p ) ;
do {
args - > num_of_nodes + + ;
pdd = kfd_get_next_process_device_data ( p , pdd ) ;
} while ( pdd ) ;
goto out_unlock ;
}
/* Fill in process-aperture information for all available
* nodes , but not more than args - > num_of_nodes as that is
* the amount of memory allocated by user
*/
pa = kzalloc ( ( sizeof ( struct kfd_process_device_apertures ) *
args - > num_of_nodes ) , GFP_KERNEL ) ;
if ( ! pa )
return - ENOMEM ;
mutex_lock ( & p - > mutex ) ;
if ( ! kfd_has_process_device_data ( p ) ) {
args - > num_of_nodes = 0 ;
kfree ( pa ) ;
goto out_unlock ;
}
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data ( p ) ;
do {
pa [ nodes ] . gpu_id = pdd - > dev - > id ;
pa [ nodes ] . lds_base = pdd - > lds_base ;
pa [ nodes ] . lds_limit = pdd - > lds_limit ;
pa [ nodes ] . gpuvm_base = pdd - > gpuvm_base ;
pa [ nodes ] . gpuvm_limit = pdd - > gpuvm_limit ;
pa [ nodes ] . scratch_base = pdd - > scratch_base ;
pa [ nodes ] . scratch_limit = pdd - > scratch_limit ;
dev_dbg ( kfd_device ,
" gpu id %u \n " , pdd - > dev - > id ) ;
dev_dbg ( kfd_device ,
" lds_base %llX \n " , pdd - > lds_base ) ;
dev_dbg ( kfd_device ,
" lds_limit %llX \n " , pdd - > lds_limit ) ;
dev_dbg ( kfd_device ,
" gpuvm_base %llX \n " , pdd - > gpuvm_base ) ;
dev_dbg ( kfd_device ,
" gpuvm_limit %llX \n " , pdd - > gpuvm_limit ) ;
dev_dbg ( kfd_device ,
" scratch_base %llX \n " , pdd - > scratch_base ) ;
dev_dbg ( kfd_device ,
" scratch_limit %llX \n " , pdd - > scratch_limit ) ;
nodes + + ;
pdd = kfd_get_next_process_device_data ( p , pdd ) ;
} while ( pdd & & ( nodes < args - > num_of_nodes ) ) ;
mutex_unlock ( & p - > mutex ) ;
args - > num_of_nodes = nodes ;
ret = copy_to_user (
( void __user * ) args - > kfd_process_device_apertures_ptr ,
pa ,
( nodes * sizeof ( struct kfd_process_device_apertures ) ) ) ;
kfree ( pa ) ;
return ret ? - EFAULT : 0 ;
out_unlock :
mutex_unlock ( & p - > mutex ) ;
return 0 ;
}
2014-12-07 17:05:11 +02:00
static int kfd_ioctl_create_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_create_event_args * args = data ;
int err ;
2018-03-15 17:27:52 -04:00
/* For dGPUs the event page is allocated in user mode. The
* handle is passed to KFD with the first call to this IOCTL
* through the event_page_offset field .
*/
if ( args - > event_page_offset ) {
struct kfd_dev * kfd ;
struct kfd_process_device * pdd ;
void * mem , * kern_addr ;
uint64_t size ;
if ( p - > signal_page ) {
pr_err ( " Event page is already set \n " ) ;
return - EINVAL ;
}
kfd = kfd_device_by_id ( GET_GPU_ID ( args - > event_page_offset ) ) ;
if ( ! kfd ) {
pr_err ( " Getting device by id failed in %s \n " , __func__ ) ;
return - EINVAL ;
}
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( kfd , p ) ;
if ( IS_ERR ( pdd ) ) {
err = PTR_ERR ( pdd ) ;
goto out_unlock ;
}
mem = kfd_process_device_translate_handle ( pdd ,
GET_IDR_HANDLE ( args - > event_page_offset ) ) ;
if ( ! mem ) {
pr_err ( " Can't find BO, offset is 0x%llx \n " ,
args - > event_page_offset ) ;
err = - EINVAL ;
goto out_unlock ;
}
mutex_unlock ( & p - > mutex ) ;
err = kfd - > kfd2kgd - > map_gtt_bo_to_kernel ( kfd - > kgd ,
mem , & kern_addr , & size ) ;
if ( err ) {
pr_err ( " Failed to map event page to kernel \n " ) ;
return err ;
}
err = kfd_event_page_set ( p , kern_addr , size ) ;
if ( err ) {
pr_err ( " Failed to set event page \n " ) ;
return err ;
}
}
2014-09-09 15:22:05 +03:00
err = kfd_event_create ( filp , p , args - > event_type ,
args - > auto_reset ! = 0 , args - > node_id ,
& args - > event_id , & args - > event_trigger_data ,
& args - > event_page_offset ,
& args - > event_slot_index ) ;
return err ;
2018-03-15 17:27:52 -04:00
out_unlock :
mutex_unlock ( & p - > mutex ) ;
return err ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_destroy_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_destroy_event_args * args = data ;
return kfd_event_destroy ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_set_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_set_event_args * args = data ;
return kfd_set_event ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_reset_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_reset_event_args * args = data ;
return kfd_reset_event ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_wait_events ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_wait_events_args * args = data ;
int err ;
err = kfd_wait_on_events ( p , args - > num_events ,
( void __user * ) args - > events_ptr ,
( args - > wait_for_all ! = 0 ) ,
2017-10-27 19:35:22 -04:00
args - > timeout , & args - > wait_result ) ;
2014-09-09 15:22:05 +03:00
return err ;
2014-12-07 17:05:11 +02:00
}
2017-08-15 23:00:20 -04:00
static int kfd_ioctl_set_scratch_backing_va ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_set_scratch_backing_va_args * args = data ;
struct kfd_process_device * pdd ;
struct kfd_dev * dev ;
long err ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( ! dev )
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
err = PTR_ERR ( pdd ) ;
goto bind_process_to_device_fail ;
}
pdd - > qpd . sh_hidden_private_base = args - > va_addr ;
mutex_unlock ( & p - > mutex ) ;
2018-01-04 17:17:43 -05:00
if ( dev - > dqm - > sched_policy = = KFD_SCHED_POLICY_NO_HWS & &
pdd - > qpd . vmid ! = 0 )
2017-08-15 23:00:20 -04:00
dev - > kfd2kgd - > set_scratch_backing_va (
dev - > kgd , args - > va_addr , pdd - > qpd . vmid ) ;
return 0 ;
bind_process_to_device_fail :
mutex_unlock ( & p - > mutex ) ;
return err ;
}
2014-12-07 17:05:11 +02:00
2017-08-15 23:00:22 -04:00
static int kfd_ioctl_get_tile_config ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_get_tile_config_args * args = data ;
struct kfd_dev * dev ;
struct tile_config config ;
int err = 0 ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
2017-09-08 15:13:33 +01:00
if ( ! dev )
return - EINVAL ;
2017-08-15 23:00:22 -04:00
dev - > kfd2kgd - > get_tile_config ( dev - > kgd , & config ) ;
args - > gb_addr_config = config . gb_addr_config ;
args - > num_banks = config . num_banks ;
args - > num_ranks = config . num_ranks ;
if ( args - > num_tile_configs > config . num_tile_configs )
args - > num_tile_configs = config . num_tile_configs ;
err = copy_to_user ( ( void __user * ) args - > tile_config_ptr ,
config . tile_config_ptr ,
args - > num_tile_configs * sizeof ( uint32_t ) ) ;
if ( err ) {
args - > num_tile_configs = 0 ;
return - EFAULT ;
}
if ( args - > num_macro_tile_configs > config . num_macro_tile_configs )
args - > num_macro_tile_configs =
config . num_macro_tile_configs ;
err = copy_to_user ( ( void __user * ) args - > macro_tile_config_ptr ,
config . macro_tile_config_ptr ,
args - > num_macro_tile_configs * sizeof ( uint32_t ) ) ;
if ( err ) {
args - > num_macro_tile_configs = 0 ;
return - EFAULT ;
}
return 0 ;
}
2018-03-15 17:27:51 -04:00
static int kfd_ioctl_acquire_vm ( struct file * filep , struct kfd_process * p ,
void * data )
{
struct kfd_ioctl_acquire_vm_args * args = data ;
struct kfd_process_device * pdd ;
struct kfd_dev * dev ;
struct file * drm_file ;
int ret ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( ! dev )
return - EINVAL ;
drm_file = fget ( args - > drm_fd ) ;
if ( ! drm_file )
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_get_process_device_data ( dev , p ) ;
if ( ! pdd ) {
ret = - EINVAL ;
goto err_unlock ;
}
if ( pdd - > drm_file ) {
ret = pdd - > drm_file = = drm_file ? 0 : - EBUSY ;
goto err_unlock ;
}
ret = kfd_process_device_init_vm ( pdd , drm_file ) ;
if ( ret )
goto err_unlock ;
/* On success, the PDD keeps the drm_file reference */
mutex_unlock ( & p - > mutex ) ;
return 0 ;
err_unlock :
mutex_unlock ( & p - > mutex ) ;
fput ( drm_file ) ;
return ret ;
}
2018-09-07 12:00:07 -04:00
bool kfd_dev_is_large_bar ( struct kfd_dev * dev )
2018-03-15 17:27:51 -04:00
{
struct kfd_local_mem_info mem_info ;
2018-03-15 17:27:53 -04:00
if ( debug_largebar ) {
pr_debug ( " Simulate large-bar allocation on non large-bar machine \n " ) ;
return true ;
}
2018-03-15 17:27:51 -04:00
if ( dev - > device_info - > needs_iommu_device )
return false ;
dev - > kfd2kgd - > get_local_mem_info ( dev - > kgd , & mem_info ) ;
if ( mem_info . local_mem_size_private = = 0 & &
mem_info . local_mem_size_public > 0 )
return true ;
return false ;
}
static int kfd_ioctl_alloc_memory_of_gpu ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_alloc_memory_of_gpu_args * args = data ;
struct kfd_process_device * pdd ;
void * mem ;
struct kfd_dev * dev ;
int idr_handle ;
long err ;
uint64_t offset = args - > mmap_offset ;
uint32_t flags = args - > flags ;
if ( args - > size = = 0 )
return - EINVAL ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( ! dev )
return - EINVAL ;
if ( ( flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC ) & &
( flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM ) & &
! kfd_dev_is_large_bar ( dev ) ) {
pr_err ( " Alloc host visible vram on small bar is not allowed \n " ) ;
return - EINVAL ;
}
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
err = PTR_ERR ( pdd ) ;
goto err_unlock ;
}
err = dev - > kfd2kgd - > alloc_memory_of_gpu (
dev - > kgd , args - > va_addr , args - > size ,
pdd - > vm , ( struct kgd_mem * * ) & mem , & offset ,
flags ) ;
if ( err )
goto err_unlock ;
idr_handle = kfd_process_device_create_obj_handle ( pdd , mem ) ;
if ( idr_handle < 0 ) {
err = - EFAULT ;
goto err_free ;
}
mutex_unlock ( & p - > mutex ) ;
args - > handle = MAKE_HANDLE ( args - > gpu_id , idr_handle ) ;
args - > mmap_offset = offset ;
return 0 ;
err_free :
dev - > kfd2kgd - > free_memory_of_gpu ( dev - > kgd , ( struct kgd_mem * ) mem ) ;
err_unlock :
mutex_unlock ( & p - > mutex ) ;
return err ;
}
static int kfd_ioctl_free_memory_of_gpu ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_free_memory_of_gpu_args * args = data ;
struct kfd_process_device * pdd ;
void * mem ;
struct kfd_dev * dev ;
int ret ;
dev = kfd_device_by_id ( GET_GPU_ID ( args - > handle ) ) ;
if ( ! dev )
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_get_process_device_data ( dev , p ) ;
if ( ! pdd ) {
pr_err ( " Process device data doesn't exist \n " ) ;
ret = - EINVAL ;
goto err_unlock ;
}
mem = kfd_process_device_translate_handle (
pdd , GET_IDR_HANDLE ( args - > handle ) ) ;
if ( ! mem ) {
ret = - EINVAL ;
goto err_unlock ;
}
ret = dev - > kfd2kgd - > free_memory_of_gpu ( dev - > kgd , ( struct kgd_mem * ) mem ) ;
/* If freeing the buffer failed, leave the handle in place for
* clean - up during process tear - down .
*/
if ( ! ret )
kfd_process_device_remove_obj_handle (
pdd , GET_IDR_HANDLE ( args - > handle ) ) ;
err_unlock :
mutex_unlock ( & p - > mutex ) ;
return ret ;
}
static int kfd_ioctl_map_memory_to_gpu ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_map_memory_to_gpu_args * args = data ;
struct kfd_process_device * pdd , * peer_pdd ;
void * mem ;
struct kfd_dev * dev , * peer ;
long err = 0 ;
int i ;
uint32_t * devices_arr = NULL ;
dev = kfd_device_by_id ( GET_GPU_ID ( args - > handle ) ) ;
if ( ! dev )
return - EINVAL ;
if ( ! args - > n_devices ) {
pr_debug ( " Device IDs array empty \n " ) ;
return - EINVAL ;
}
if ( args - > n_success > args - > n_devices ) {
pr_debug ( " n_success exceeds n_devices \n " ) ;
return - EINVAL ;
}
2018-04-24 16:35:49 +03:00
devices_arr = kmalloc_array ( args - > n_devices , sizeof ( * devices_arr ) ,
GFP_KERNEL ) ;
2018-03-15 17:27:51 -04:00
if ( ! devices_arr )
return - ENOMEM ;
err = copy_from_user ( devices_arr ,
( void __user * ) args - > device_ids_array_ptr ,
args - > n_devices * sizeof ( * devices_arr ) ) ;
if ( err ! = 0 ) {
err = - EFAULT ;
goto copy_from_user_failed ;
}
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
err = PTR_ERR ( pdd ) ;
goto bind_process_to_device_failed ;
}
mem = kfd_process_device_translate_handle ( pdd ,
GET_IDR_HANDLE ( args - > handle ) ) ;
if ( ! mem ) {
err = - ENOMEM ;
goto get_mem_obj_from_handle_failed ;
}
for ( i = args - > n_success ; i < args - > n_devices ; i + + ) {
peer = kfd_device_by_id ( devices_arr [ i ] ) ;
if ( ! peer ) {
pr_debug ( " Getting device by id failed for 0x%x \n " ,
devices_arr [ i ] ) ;
err = - EINVAL ;
goto get_mem_obj_from_handle_failed ;
}
peer_pdd = kfd_bind_process_to_device ( peer , p ) ;
if ( IS_ERR ( peer_pdd ) ) {
err = PTR_ERR ( peer_pdd ) ;
goto get_mem_obj_from_handle_failed ;
}
err = peer - > kfd2kgd - > map_memory_to_gpu (
peer - > kgd , ( struct kgd_mem * ) mem , peer_pdd - > vm ) ;
if ( err ) {
pr_err ( " Failed to map to gpu %d/%d \n " ,
i , args - > n_devices ) ;
goto map_memory_to_gpu_failed ;
}
args - > n_success = i + 1 ;
}
mutex_unlock ( & p - > mutex ) ;
err = dev - > kfd2kgd - > sync_memory ( dev - > kgd , ( struct kgd_mem * ) mem , true ) ;
if ( err ) {
pr_debug ( " Sync memory failed, wait interrupted by user signal \n " ) ;
goto sync_memory_failed ;
}
/* Flush TLBs after waiting for the page table updates to complete */
for ( i = 0 ; i < args - > n_devices ; i + + ) {
peer = kfd_device_by_id ( devices_arr [ i ] ) ;
if ( WARN_ON_ONCE ( ! peer ) )
continue ;
peer_pdd = kfd_get_process_device_data ( peer , p ) ;
if ( WARN_ON_ONCE ( ! peer_pdd ) )
continue ;
kfd_flush_tlb ( peer_pdd ) ;
}
kfree ( devices_arr ) ;
return err ;
bind_process_to_device_failed :
get_mem_obj_from_handle_failed :
map_memory_to_gpu_failed :
mutex_unlock ( & p - > mutex ) ;
copy_from_user_failed :
sync_memory_failed :
kfree ( devices_arr ) ;
return err ;
}
static int kfd_ioctl_unmap_memory_from_gpu ( struct file * filep ,
struct kfd_process * p , void * data )
{
struct kfd_ioctl_unmap_memory_from_gpu_args * args = data ;
struct kfd_process_device * pdd , * peer_pdd ;
void * mem ;
struct kfd_dev * dev , * peer ;
long err = 0 ;
uint32_t * devices_arr = NULL , i ;
dev = kfd_device_by_id ( GET_GPU_ID ( args - > handle ) ) ;
if ( ! dev )
return - EINVAL ;
if ( ! args - > n_devices ) {
pr_debug ( " Device IDs array empty \n " ) ;
return - EINVAL ;
}
if ( args - > n_success > args - > n_devices ) {
pr_debug ( " n_success exceeds n_devices \n " ) ;
return - EINVAL ;
}
2018-04-24 16:35:49 +03:00
devices_arr = kmalloc_array ( args - > n_devices , sizeof ( * devices_arr ) ,
GFP_KERNEL ) ;
2018-03-15 17:27:51 -04:00
if ( ! devices_arr )
return - ENOMEM ;
err = copy_from_user ( devices_arr ,
( void __user * ) args - > device_ids_array_ptr ,
args - > n_devices * sizeof ( * devices_arr ) ) ;
if ( err ! = 0 ) {
err = - EFAULT ;
goto copy_from_user_failed ;
}
mutex_lock ( & p - > mutex ) ;
pdd = kfd_get_process_device_data ( dev , p ) ;
if ( ! pdd ) {
2018-03-30 02:25:17 +00:00
err = - EINVAL ;
2018-03-15 17:27:51 -04:00
goto bind_process_to_device_failed ;
}
mem = kfd_process_device_translate_handle ( pdd ,
GET_IDR_HANDLE ( args - > handle ) ) ;
if ( ! mem ) {
err = - ENOMEM ;
goto get_mem_obj_from_handle_failed ;
}
for ( i = args - > n_success ; i < args - > n_devices ; i + + ) {
peer = kfd_device_by_id ( devices_arr [ i ] ) ;
if ( ! peer ) {
err = - EINVAL ;
goto get_mem_obj_from_handle_failed ;
}
peer_pdd = kfd_get_process_device_data ( peer , p ) ;
if ( ! peer_pdd ) {
err = - ENODEV ;
goto get_mem_obj_from_handle_failed ;
}
err = dev - > kfd2kgd - > unmap_memory_to_gpu (
peer - > kgd , ( struct kgd_mem * ) mem , peer_pdd - > vm ) ;
if ( err ) {
pr_err ( " Failed to unmap from gpu %d/%d \n " ,
i , args - > n_devices ) ;
goto unmap_memory_from_gpu_failed ;
}
args - > n_success = i + 1 ;
}
kfree ( devices_arr ) ;
mutex_unlock ( & p - > mutex ) ;
return 0 ;
bind_process_to_device_failed :
get_mem_obj_from_handle_failed :
unmap_memory_from_gpu_failed :
mutex_unlock ( & p - > mutex ) ;
copy_from_user_failed :
kfree ( devices_arr ) ;
return err ;
}
2014-12-29 14:20:05 +02:00
# define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
2017-08-15 23:00:04 -04:00
[ _IOC_NR ( ioctl ) ] = { . cmd = ioctl , . func = _func , . flags = _flags , \
. cmd_drv = 0 , . name = # ioctl }
2014-12-29 14:20:05 +02:00
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls [ ] = {
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_VERSION ,
kfd_ioctl_get_version , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_CREATE_QUEUE ,
kfd_ioctl_create_queue , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DESTROY_QUEUE ,
kfd_ioctl_destroy_queue , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_MEMORY_POLICY ,
kfd_ioctl_set_memory_policy , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_CLOCK_COUNTERS ,
kfd_ioctl_get_clock_counters , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_PROCESS_APERTURES ,
kfd_ioctl_get_process_apertures , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_UPDATE_QUEUE ,
kfd_ioctl_update_queue , 0 ) ,
2014-12-07 17:05:11 +02:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_CREATE_EVENT ,
kfd_ioctl_create_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DESTROY_EVENT ,
kfd_ioctl_destroy_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_EVENT ,
kfd_ioctl_set_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_RESET_EVENT ,
kfd_ioctl_reset_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_WAIT_EVENTS ,
kfd_ioctl_wait_events , 0 ) ,
2014-12-07 17:05:22 +02:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_REGISTER ,
kfd_ioctl_dbg_register , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_UNREGISTER ,
2016-11-12 17:33:29 +00:00
kfd_ioctl_dbg_unregister , 0 ) ,
2014-12-07 17:05:22 +02:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_ADDRESS_WATCH ,
kfd_ioctl_dbg_address_watch , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_WAVE_CONTROL ,
kfd_ioctl_dbg_wave_control , 0 ) ,
2017-08-15 23:00:20 -04:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_SCRATCH_BACKING_VA ,
kfd_ioctl_set_scratch_backing_va , 0 ) ,
2017-08-15 23:00:22 -04:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_TILE_CONFIG ,
2017-11-14 16:41:20 -05:00
kfd_ioctl_get_tile_config , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_TRAP_HANDLER ,
kfd_ioctl_set_trap_handler , 0 ) ,
2018-03-15 17:27:46 -04:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_PROCESS_APERTURES_NEW ,
kfd_ioctl_get_process_apertures_new , 0 ) ,
2018-03-15 17:27:51 -04:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_ACQUIRE_VM ,
kfd_ioctl_acquire_vm , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_ALLOC_MEMORY_OF_GPU ,
kfd_ioctl_alloc_memory_of_gpu , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_FREE_MEMORY_OF_GPU ,
kfd_ioctl_free_memory_of_gpu , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_MAP_MEMORY_TO_GPU ,
kfd_ioctl_map_memory_to_gpu , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU ,
kfd_ioctl_unmap_memory_from_gpu , 0 ) ,
2018-07-14 19:05:59 -04:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_CU_MASK ,
kfd_ioctl_set_cu_mask , 0 ) ,
2017-05-02 17:39:37 -05:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_QUEUE_WAVE_STATE ,
kfd_ioctl_get_queue_wave_state , 0 )
2014-12-29 14:20:05 +02:00
} ;
# define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
2014-07-16 21:08:55 +03:00
static long kfd_ioctl ( struct file * filep , unsigned int cmd , unsigned long arg )
{
struct kfd_process * process ;
2014-12-29 14:20:05 +02:00
amdkfd_ioctl_t * func ;
const struct amdkfd_ioctl_desc * ioctl = NULL ;
unsigned int nr = _IOC_NR ( cmd ) ;
2014-12-29 13:52:22 +02:00
char stack_kdata [ 128 ] ;
char * kdata = NULL ;
unsigned int usize , asize ;
int retcode = - EINVAL ;
2014-07-16 21:08:55 +03:00
2014-12-29 14:20:05 +02:00
if ( nr > = AMDKFD_CORE_IOCTL_COUNT )
goto err_i1 ;
if ( ( nr > = AMDKFD_COMMAND_START ) & & ( nr < AMDKFD_COMMAND_END ) ) {
u32 amdkfd_size ;
ioctl = & amdkfd_ioctls [ nr ] ;
amdkfd_size = _IOC_SIZE ( ioctl - > cmd ) ;
usize = asize = _IOC_SIZE ( cmd ) ;
if ( amdkfd_size > asize )
asize = amdkfd_size ;
cmd = ioctl - > cmd ;
} else
goto err_i1 ;
dev_dbg ( kfd_device , " ioctl cmd 0x%x (#%d), arg 0x%lx \n " , cmd , nr , arg ) ;
2014-07-16 21:08:55 +03:00
2014-07-16 23:25:31 +03:00
process = kfd_get_process ( current ) ;
2014-12-29 14:20:05 +02:00
if ( IS_ERR ( process ) ) {
dev_dbg ( kfd_device , " no process \n " ) ;
goto err_i1 ;
}
2014-07-16 21:08:55 +03:00
2014-12-29 14:20:05 +02:00
/* Do not trust userspace, use our own definition */
func = ioctl - > func ;
if ( unlikely ( ! func ) ) {
dev_dbg ( kfd_device , " no function \n " ) ;
retcode = - EINVAL ;
goto err_i1 ;
2014-07-16 21:08:55 +03:00
}
2014-12-29 13:52:22 +02:00
if ( cmd & ( IOC_IN | IOC_OUT ) ) {
if ( asize < = sizeof ( stack_kdata ) ) {
kdata = stack_kdata ;
} else {
kdata = kmalloc ( asize , GFP_KERNEL ) ;
if ( ! kdata ) {
retcode = - ENOMEM ;
goto err_i1 ;
}
}
if ( asize > usize )
memset ( kdata + usize , 0 , asize - usize ) ;
}
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
if ( cmd & IOC_IN ) {
if ( copy_from_user ( kdata , ( void __user * ) arg , usize ) ! = 0 ) {
retcode = - EFAULT ;
goto err_i1 ;
}
} else if ( cmd & IOC_OUT ) {
memset ( kdata , 0 , usize ) ;
}
2014-12-29 14:20:05 +02:00
retcode = func ( filep , process , kdata ) ;
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
if ( cmd & IOC_OUT )
if ( copy_to_user ( ( void __user * ) arg , kdata , usize ) ! = 0 )
retcode = - EFAULT ;
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
err_i1 :
2014-12-29 14:20:05 +02:00
if ( ! ioctl )
dev_dbg ( kfd_device , " invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x \n " ,
task_pid_nr ( current ) , cmd , nr ) ;
2014-12-29 13:52:22 +02:00
if ( kdata ! = stack_kdata )
kfree ( kdata ) ;
if ( retcode )
dev_dbg ( kfd_device , " ret = %d \n " , retcode ) ;
return retcode ;
2014-07-16 21:08:55 +03:00
}
2014-07-16 23:25:31 +03:00
static int kfd_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct kfd_process * process ;
2018-04-10 17:33:04 -04:00
struct kfd_dev * dev = NULL ;
unsigned long vm_pgoff ;
unsigned int gpu_id ;
2014-07-16 23:25:31 +03:00
process = kfd_get_process ( current ) ;
if ( IS_ERR ( process ) )
return PTR_ERR ( process ) ;
2018-04-10 17:33:04 -04:00
vm_pgoff = vma - > vm_pgoff ;
vma - > vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET ( vm_pgoff ) ;
gpu_id = KFD_MMAP_GPU_ID_GET ( vm_pgoff ) ;
if ( gpu_id )
dev = kfd_device_by_id ( gpu_id ) ;
switch ( vm_pgoff & KFD_MMAP_TYPE_MASK ) {
case KFD_MMAP_TYPE_DOORBELL :
if ( ! dev )
return - ENODEV ;
return kfd_doorbell_mmap ( dev , process , vma ) ;
case KFD_MMAP_TYPE_EVENTS :
2015-05-10 12:15:46 +03:00
return kfd_event_mmap ( process , vma ) ;
2018-04-10 17:33:04 -04:00
case KFD_MMAP_TYPE_RESERVED_MEM :
if ( ! dev )
return - ENODEV ;
return kfd_reserved_mem_mmap ( dev , process , vma ) ;
2015-05-10 12:15:46 +03:00
}
return - EFAULT ;
2014-07-16 23:25:31 +03:00
}