2014-07-16 21:08:55 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# include <linux/device.h>
# include <linux/export.h>
# include <linux/err.h>
# include <linux/fs.h>
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/compat.h>
# include <uapi/linux/kfd_ioctl.h>
# include <linux/time.h>
# include <linux/mm.h>
# include <uapi/asm-generic/mman-common.h>
# include <asm/processor.h>
# include "kfd_priv.h"
2014-07-17 01:46:17 +03:00
# include "kfd_device_queue_manager.h"
2015-05-20 14:08:55 +03:00
# include "kfd_dbgmgr.h"
2014-07-16 21:08:55 +03:00
static long kfd_ioctl ( struct file * , unsigned int , unsigned long ) ;
static int kfd_open ( struct inode * , struct file * ) ;
2014-07-16 23:25:31 +03:00
static int kfd_mmap ( struct file * , struct vm_area_struct * ) ;
2014-07-16 21:08:55 +03:00
static const char kfd_dev_name [ ] = " kfd " ;
static const struct file_operations kfd_fops = {
. owner = THIS_MODULE ,
. unlocked_ioctl = kfd_ioctl ,
. compat_ioctl = kfd_ioctl ,
. open = kfd_open ,
2014-07-16 23:25:31 +03:00
. mmap = kfd_mmap ,
2014-07-16 21:08:55 +03:00
} ;
static int kfd_char_dev_major = - 1 ;
static struct class * kfd_class ;
struct device * kfd_device ;
int kfd_chardev_init ( void )
{
int err = 0 ;
kfd_char_dev_major = register_chrdev ( 0 , kfd_dev_name , & kfd_fops ) ;
err = kfd_char_dev_major ;
if ( err < 0 )
goto err_register_chrdev ;
kfd_class = class_create ( THIS_MODULE , kfd_dev_name ) ;
err = PTR_ERR ( kfd_class ) ;
if ( IS_ERR ( kfd_class ) )
goto err_class_create ;
kfd_device = device_create ( kfd_class , NULL ,
MKDEV ( kfd_char_dev_major , 0 ) ,
NULL , kfd_dev_name ) ;
err = PTR_ERR ( kfd_device ) ;
if ( IS_ERR ( kfd_device ) )
goto err_device_create ;
return 0 ;
err_device_create :
class_destroy ( kfd_class ) ;
err_class_create :
unregister_chrdev ( kfd_char_dev_major , kfd_dev_name ) ;
err_register_chrdev :
return err ;
}
void kfd_chardev_exit ( void )
{
device_destroy ( kfd_class , MKDEV ( kfd_char_dev_major , 0 ) ) ;
class_destroy ( kfd_class ) ;
unregister_chrdev ( kfd_char_dev_major , kfd_dev_name ) ;
}
struct device * kfd_chardev ( void )
{
return kfd_device ;
}
static int kfd_open ( struct inode * inode , struct file * filep )
{
2014-07-16 23:25:31 +03:00
struct kfd_process * process ;
2014-12-05 10:40:34 +02:00
bool is_32bit_user_mode ;
2014-07-16 23:25:31 +03:00
2014-07-16 21:08:55 +03:00
if ( iminor ( inode ) ! = 0 )
return - ENODEV ;
2014-12-05 10:40:34 +02:00
is_32bit_user_mode = is_compat_task ( ) ;
if ( is_32bit_user_mode = = true ) {
dev_warn ( kfd_device ,
" Process %d (32-bit) failed to open /dev/kfd \n "
" 32-bit processes are not supported by amdkfd \n " ,
current - > pid ) ;
return - EPERM ;
}
2014-07-16 23:25:31 +03:00
process = kfd_create_process ( current ) ;
if ( IS_ERR ( process ) )
return PTR_ERR ( process ) ;
dev_dbg ( kfd_device , " process %d opened, compat mode (32 bit) - %d \n " ,
process - > pasid , process - > is_32bit_user_mode ) ;
2014-07-16 21:08:55 +03:00
return 0 ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_get_version ( struct file * filep , struct kfd_process * p ,
void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_version_args * args = data ;
2014-11-02 12:18:29 +02:00
int err = 0 ;
2014-12-29 13:52:22 +02:00
args - > major_version = KFD_IOCTL_MAJOR_VERSION ;
args - > minor_version = KFD_IOCTL_MINOR_VERSION ;
2014-11-02 12:18:29 +02:00
return err ;
2014-07-16 21:08:55 +03:00
}
2014-10-19 23:46:40 +03:00
static int set_queue_properties_from_user ( struct queue_properties * q_properties ,
struct kfd_ioctl_create_queue_args * args )
{
if ( args - > queue_percentage > KFD_MAX_QUEUE_PERCENTAGE ) {
pr_err ( " kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE \n " ) ;
return - EINVAL ;
}
if ( args - > queue_priority > KFD_MAX_QUEUE_PRIORITY ) {
pr_err ( " kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY \n " ) ;
return - EINVAL ;
}
if ( ( args - > ring_base_address ) & &
2014-11-20 15:37:13 +02:00
( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > ring_base_address ,
sizeof ( uint64_t ) ) ) ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: can't access ring base address \n " ) ;
return - EFAULT ;
}
if ( ! is_power_of_2 ( args - > ring_size ) & & ( args - > ring_size ! = 0 ) ) {
pr_err ( " kfd: ring size must be a power of 2 or 0 \n " ) ;
return - EINVAL ;
}
2014-11-20 15:37:13 +02:00
if ( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > read_pointer_address ,
sizeof ( uint32_t ) ) ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: can't access read pointer \n " ) ;
return - EFAULT ;
}
2014-11-20 15:37:13 +02:00
if ( ! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > write_pointer_address ,
sizeof ( uint32_t ) ) ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: can't access write pointer \n " ) ;
return - EFAULT ;
}
2015-01-22 13:42:28 +02:00
if ( args - > eop_buffer_address & &
! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > eop_buffer_address ,
sizeof ( uint32_t ) ) ) {
2015-01-04 10:37:18 +02:00
pr_debug ( " kfd: can't access eop buffer " ) ;
return - EFAULT ;
}
2015-01-22 13:42:28 +02:00
if ( args - > ctx_save_restore_address & &
! access_ok ( VERIFY_WRITE ,
( const void __user * ) args - > ctx_save_restore_address ,
sizeof ( uint32_t ) ) ) {
2015-01-04 10:37:18 +02:00
pr_debug ( " kfd: can't access ctx save restore buffer " ) ;
return - EFAULT ;
}
2014-10-19 23:46:40 +03:00
q_properties - > is_interop = false ;
q_properties - > queue_percent = args - > queue_percentage ;
q_properties - > priority = args - > queue_priority ;
q_properties - > queue_address = args - > ring_base_address ;
q_properties - > queue_size = args - > ring_size ;
q_properties - > read_ptr = ( uint32_t * ) args - > read_pointer_address ;
q_properties - > write_ptr = ( uint32_t * ) args - > write_pointer_address ;
2015-01-04 10:37:18 +02:00
q_properties - > eop_ring_buffer_address = args - > eop_buffer_address ;
q_properties - > eop_ring_buffer_size = args - > eop_buffer_size ;
q_properties - > ctx_save_restore_area_address =
args - > ctx_save_restore_address ;
q_properties - > ctx_save_restore_area_size = args - > ctx_save_restore_size ;
2014-10-19 23:46:40 +03:00
if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE | |
args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE_AQL )
q_properties - > type = KFD_QUEUE_TYPE_COMPUTE ;
2015-01-03 22:12:33 +02:00
else if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_SDMA )
q_properties - > type = KFD_QUEUE_TYPE_SDMA ;
2014-10-19 23:46:40 +03:00
else
return - ENOTSUPP ;
if ( args - > queue_type = = KFD_IOC_QUEUE_TYPE_COMPUTE_AQL )
q_properties - > format = KFD_QUEUE_FORMAT_AQL ;
else
q_properties - > format = KFD_QUEUE_FORMAT_PM4 ;
pr_debug ( " Queue Percentage (%d, %d) \n " ,
q_properties - > queue_percent , args - > queue_percentage ) ;
pr_debug ( " Queue Priority (%d, %d) \n " ,
q_properties - > priority , args - > queue_priority ) ;
pr_debug ( " Queue Address (0x%llX, 0x%llX) \n " ,
q_properties - > queue_address , args - > ring_base_address ) ;
pr_debug ( " Queue Size (0x%llX, %u) \n " ,
q_properties - > queue_size , args - > ring_size ) ;
pr_debug ( " Queue r/w Pointers (0x%llX, 0x%llX) \n " ,
( uint64_t ) q_properties - > read_ptr ,
( uint64_t ) q_properties - > write_ptr ) ;
pr_debug ( " Queue Format (%d) \n " , q_properties - > format ) ;
2015-01-04 10:37:18 +02:00
pr_debug ( " Queue EOP (0x%llX) \n " , q_properties - > eop_ring_buffer_address ) ;
pr_debug ( " Queue CTX save arex (0x%llX) \n " ,
q_properties - > ctx_save_restore_area_address ) ;
2014-10-19 23:46:40 +03:00
return 0 ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_create_queue ( struct file * filep , struct kfd_process * p ,
void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_create_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
struct kfd_dev * dev ;
int err = 0 ;
unsigned int queue_id ;
struct kfd_process_device * pdd ;
struct queue_properties q_properties ;
memset ( & q_properties , 0 , sizeof ( struct queue_properties ) ) ;
pr_debug ( " kfd: creating queue ioctl \n " ) ;
2014-12-29 13:52:22 +02:00
err = set_queue_properties_from_user ( & q_properties , args ) ;
2014-10-19 23:46:40 +03:00
if ( err )
return err ;
2015-01-22 10:44:41 +10:00
pr_debug ( " kfd: looking for gpu id 0x%x \n " , args - > gpu_id ) ;
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2015-01-04 10:37:18 +02:00
if ( dev = = NULL ) {
2015-01-22 10:44:41 +10:00
pr_debug ( " kfd: gpu id 0x%x was not found \n " , args - > gpu_id ) ;
2014-10-19 23:46:40 +03:00
return - EINVAL ;
2015-01-04 10:37:18 +02:00
}
2014-10-19 23:46:40 +03:00
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
2014-11-25 13:21:30 +03:00
if ( IS_ERR ( pdd ) ) {
2014-12-29 13:52:22 +02:00
err = - ESRCH ;
2014-10-19 23:46:40 +03:00
goto err_bind_process ;
}
pr_debug ( " kfd: creating queue for PASID %d on GPU 0x%x \n " ,
p - > pasid ,
dev - > id ) ;
2015-01-03 22:12:34 +02:00
err = pqm_create_queue ( & p - > pqm , dev , filep , & q_properties ,
0 , q_properties . type , & queue_id ) ;
2014-10-19 23:46:40 +03:00
if ( err ! = 0 )
goto err_create_queue ;
2014-12-29 13:52:22 +02:00
args - > queue_id = queue_id ;
2014-10-19 23:46:40 +03:00
2015-05-10 12:15:46 +03:00
2014-10-19 23:46:40 +03:00
/* Return gpu_id as doorbell offset for mmap usage */
2015-05-10 12:15:46 +03:00
args - > doorbell_offset = ( KFD_MMAP_DOORBELL_MASK | args - > gpu_id ) ;
args - > doorbell_offset < < = PAGE_SHIFT ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
2014-12-29 13:52:22 +02:00
pr_debug ( " kfd: queue id %d was created successfully \n " , args - > queue_id ) ;
2014-10-19 23:46:40 +03:00
pr_debug ( " ring buffer address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > ring_base_address ) ;
2014-10-19 23:46:40 +03:00
pr_debug ( " read ptr address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > read_pointer_address ) ;
2014-10-19 23:46:40 +03:00
pr_debug ( " write ptr address == 0x%016llX \n " ,
2014-12-29 13:52:22 +02:00
args - > write_pointer_address ) ;
2014-10-19 23:46:40 +03:00
return 0 ;
err_create_queue :
err_bind_process :
mutex_unlock ( & p - > mutex ) ;
return err ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_destroy_queue ( struct file * filp , struct kfd_process * p ,
2014-12-29 13:52:22 +02:00
void * data )
2014-07-16 21:08:55 +03:00
{
2014-10-19 23:46:40 +03:00
int retval ;
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_destroy_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
pr_debug ( " kfd: destroying queue id %d for PASID %d \n " ,
2014-12-29 13:52:22 +02:00
args - > queue_id ,
2014-10-19 23:46:40 +03:00
p - > pasid ) ;
mutex_lock ( & p - > mutex ) ;
2014-12-29 13:52:22 +02:00
retval = pqm_destroy_queue ( & p - > pqm , args - > queue_id ) ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
return retval ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_update_queue ( struct file * filp , struct kfd_process * p ,
2014-12-29 13:52:22 +02:00
void * data )
2014-07-16 21:08:55 +03:00
{
2014-10-19 23:46:40 +03:00
int retval ;
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_update_queue_args * args = data ;
2014-10-19 23:46:40 +03:00
struct queue_properties properties ;
2014-12-29 13:52:22 +02:00
if ( args - > queue_percentage > KFD_MAX_QUEUE_PERCENTAGE ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE \n " ) ;
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( args - > queue_priority > KFD_MAX_QUEUE_PRIORITY ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY \n " ) ;
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( ( args - > ring_base_address ) & &
2014-11-20 15:37:13 +02:00
( ! access_ok ( VERIFY_WRITE ,
2014-12-29 13:52:22 +02:00
( const void __user * ) args - > ring_base_address ,
2014-11-20 15:37:13 +02:00
sizeof ( uint64_t ) ) ) ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: can't access ring base address \n " ) ;
return - EFAULT ;
}
2014-12-29 13:52:22 +02:00
if ( ! is_power_of_2 ( args - > ring_size ) & & ( args - > ring_size ! = 0 ) ) {
2014-10-19 23:46:40 +03:00
pr_err ( " kfd: ring size must be a power of 2 or 0 \n " ) ;
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
properties . queue_address = args - > ring_base_address ;
properties . queue_size = args - > ring_size ;
properties . queue_percent = args - > queue_percentage ;
properties . priority = args - > queue_priority ;
2014-10-19 23:46:40 +03:00
pr_debug ( " kfd: updating queue id %d for PASID %d \n " ,
2014-12-29 13:52:22 +02:00
args - > queue_id , p - > pasid ) ;
2014-10-19 23:46:40 +03:00
mutex_lock ( & p - > mutex ) ;
2014-12-29 13:52:22 +02:00
retval = pqm_update_queue ( & p - > pqm , args - > queue_id , & properties ) ;
2014-10-19 23:46:40 +03:00
mutex_unlock ( & p - > mutex ) ;
return retval ;
2014-07-16 21:08:55 +03:00
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_set_memory_policy ( struct file * filep ,
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_set_memory_policy_args * args = data ;
2014-07-17 01:46:17 +03:00
struct kfd_dev * dev ;
int err = 0 ;
struct kfd_process_device * pdd ;
enum cache_policy default_policy , alternate_policy ;
2014-12-29 13:52:22 +02:00
if ( args - > default_policy ! = KFD_IOC_CACHE_POLICY_COHERENT
& & args - > default_policy ! = KFD_IOC_CACHE_POLICY_NONCOHERENT ) {
2014-07-17 01:46:17 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
if ( args - > alternate_policy ! = KFD_IOC_CACHE_POLICY_COHERENT
& & args - > alternate_policy ! = KFD_IOC_CACHE_POLICY_NONCOHERENT ) {
2014-07-17 01:46:17 +03:00
return - EINVAL ;
}
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2014-07-17 01:46:17 +03:00
if ( dev = = NULL )
return - EINVAL ;
mutex_lock ( & p - > mutex ) ;
pdd = kfd_bind_process_to_device ( dev , p ) ;
2014-11-25 13:21:30 +03:00
if ( IS_ERR ( pdd ) ) {
2014-12-29 13:52:22 +02:00
err = - ESRCH ;
2014-07-17 01:46:17 +03:00
goto out ;
}
2014-12-29 13:52:22 +02:00
default_policy = ( args - > default_policy = = KFD_IOC_CACHE_POLICY_COHERENT )
2014-07-17 01:46:17 +03:00
? cache_policy_coherent : cache_policy_noncoherent ;
alternate_policy =
2014-12-29 13:52:22 +02:00
( args - > alternate_policy = = KFD_IOC_CACHE_POLICY_COHERENT )
2014-07-17 01:46:17 +03:00
? cache_policy_coherent : cache_policy_noncoherent ;
2015-01-12 14:26:10 +02:00
if ( ! dev - > dqm - > ops . set_cache_memory_policy ( dev - > dqm ,
2014-07-17 01:46:17 +03:00
& pdd - > qpd ,
default_policy ,
alternate_policy ,
2014-12-29 13:52:22 +02:00
( void __user * ) args - > alternate_aperture_base ,
args - > alternate_aperture_size ) )
2014-07-17 01:46:17 +03:00
err = - EINVAL ;
out :
mutex_unlock ( & p - > mutex ) ;
return err ;
2014-07-16 21:08:55 +03:00
}
2014-12-07 17:05:22 +02:00
static int kfd_ioctl_dbg_register ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:08:55 +03:00
struct kfd_ioctl_dbg_register_args * args = data ;
struct kfd_dev * dev ;
struct kfd_dbgmgr * dbgmgr_ptr ;
struct kfd_process_device * pdd ;
bool create_ok ;
long status = 0 ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( dev = = NULL )
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_register not supported on CZ \n " ) ;
return - EINVAL ;
}
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
mutex_lock ( & p - > mutex ) ;
/*
* make sure that we have pdd , if this the first queue created for
* this process
*/
pdd = kfd_bind_process_to_device ( dev , p ) ;
if ( IS_ERR ( pdd ) ) {
mutex_unlock ( & p - > mutex ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
return PTR_ERR ( pdd ) ;
}
if ( dev - > dbgmgr = = NULL ) {
/* In case of a legal call, we have no dbgmgr yet */
create_ok = kfd_dbgmgr_create ( & dbgmgr_ptr , dev ) ;
if ( create_ok ) {
status = kfd_dbgmgr_register ( dbgmgr_ptr , p ) ;
if ( status ! = 0 )
kfd_dbgmgr_destroy ( dbgmgr_ptr ) ;
else
dev - > dbgmgr = dbgmgr_ptr ;
}
} else {
pr_debug ( " debugger already registered \n " ) ;
status = - EINVAL ;
}
mutex_unlock ( & p - > mutex ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
static int kfd_ioctl_dbg_unrgesiter ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:08:55 +03:00
struct kfd_ioctl_dbg_unregister_args * args = data ;
struct kfd_dev * dev ;
long status ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( dev = = NULL )
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_unrgesiter not supported on CZ \n " ) ;
return - EINVAL ;
}
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
status = kfd_dbgmgr_unregister ( dev - > dbgmgr , p ) ;
if ( status = = 0 ) {
kfd_dbgmgr_destroy ( dev - > dbgmgr ) ;
dev - > dbgmgr = NULL ;
}
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
/*
* Parse and generate variable size data structure for address watch .
* Total size of the buffer and # watch points is limited in order
* to prevent kernel abuse . ( no bearing to the much smaller HW limitation
* which is enforced by dbgdev module )
* please also note that the watch address itself are not " copied from user " ,
* since it be set into the HW in user mode values .
*
*/
static int kfd_ioctl_dbg_address_watch ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:09:39 +03:00
struct kfd_ioctl_dbg_address_watch_args * args = data ;
struct kfd_dev * dev ;
struct dbg_address_watch_info aw_info ;
unsigned char * args_buff ;
long status ;
void __user * cmd_from_user ;
uint64_t watch_mask_value = 0 ;
unsigned int args_idx = 0 ;
memset ( ( void * ) & aw_info , 0 , sizeof ( struct dbg_address_watch_info ) ) ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( dev = = NULL )
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_wave_control not supported on CZ \n " ) ;
return - EINVAL ;
}
cmd_from_user = ( void __user * ) args - > content_ptr ;
/* Validate arguments */
if ( ( args - > buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE ) | |
2015-06-16 13:49:32 +03:00
( args - > buf_size_in_bytes < = sizeof ( * args ) + sizeof ( int ) * 2 ) | |
2015-05-20 14:09:39 +03:00
( cmd_from_user = = NULL ) )
return - EINVAL ;
/* this is the actual buffer to work with */
args_buff = kmalloc ( args - > buf_size_in_bytes -
sizeof ( * args ) , GFP_KERNEL ) ;
if ( args_buff = = NULL )
return - ENOMEM ;
status = copy_from_user ( args_buff , cmd_from_user ,
args - > buf_size_in_bytes - sizeof ( * args ) ) ;
if ( status ! = 0 ) {
pr_debug ( " Failed to copy address watch user data \n " ) ;
kfree ( args_buff ) ;
return - EINVAL ;
}
aw_info . process = p ;
aw_info . num_watch_points = * ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( aw_info . num_watch_points ) ;
aw_info . watch_mode = ( enum HSA_DBG_WATCH_MODE * ) & args_buff [ args_idx ] ;
args_idx + = sizeof ( enum HSA_DBG_WATCH_MODE ) * aw_info . num_watch_points ;
/*
* set watch address base pointer to point on the array base
* within args_buff
*/
aw_info . watch_address = ( uint64_t * ) & args_buff [ args_idx ] ;
/* skip over the addresses buffer */
args_idx + = sizeof ( aw_info . watch_address ) * aw_info . num_watch_points ;
2015-06-16 13:49:32 +03:00
if ( args_idx > = args - > buf_size_in_bytes - sizeof ( * args ) ) {
2015-05-20 14:09:39 +03:00
kfree ( args_buff ) ;
return - EINVAL ;
}
watch_mask_value = ( uint64_t ) args_buff [ args_idx ] ;
if ( watch_mask_value > 0 ) {
/*
* There is an array of masks .
* set watch mask base pointer to point on the array base
* within args_buff
*/
aw_info . watch_mask = ( uint64_t * ) & args_buff [ args_idx ] ;
/* skip over the masks buffer */
args_idx + = sizeof ( aw_info . watch_mask ) *
aw_info . num_watch_points ;
} else {
/* just the NULL mask, set to NULL and skip over it */
aw_info . watch_mask = NULL ;
args_idx + = sizeof ( aw_info . watch_mask ) ;
}
2015-06-16 13:49:32 +03:00
if ( args_idx > = args - > buf_size_in_bytes - sizeof ( args ) ) {
2015-05-20 14:09:39 +03:00
kfree ( args_buff ) ;
return - EINVAL ;
}
/* Currently HSA Event is not supported for DBG */
aw_info . watch_event = NULL ;
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
status = kfd_dbgmgr_address_watch ( dev - > dbgmgr , & aw_info ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
kfree ( args_buff ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
/* Parse and generate fixed size data structure for wave control */
static int kfd_ioctl_dbg_wave_control ( struct file * filep ,
struct kfd_process * p , void * data )
{
2015-05-20 14:09:24 +03:00
struct kfd_ioctl_dbg_wave_control_args * args = data ;
struct kfd_dev * dev ;
struct dbg_wave_control_info wac_info ;
unsigned char * args_buff ;
uint32_t computed_buff_size ;
long status ;
void __user * cmd_from_user ;
unsigned int args_idx = 0 ;
memset ( ( void * ) & wac_info , 0 , sizeof ( struct dbg_wave_control_info ) ) ;
/* we use compact form, independent of the packing attribute value */
computed_buff_size = sizeof ( * args ) +
sizeof ( wac_info . mode ) +
sizeof ( wac_info . operand ) +
sizeof ( wac_info . dbgWave_msg . DbgWaveMsg ) +
sizeof ( wac_info . dbgWave_msg . MemoryVA ) +
sizeof ( wac_info . trapId ) ;
dev = kfd_device_by_id ( args - > gpu_id ) ;
if ( dev = = NULL )
return - EINVAL ;
if ( dev - > device_info - > asic_family = = CHIP_CARRIZO ) {
pr_debug ( " kfd_ioctl_dbg_wave_control not supported on CZ \n " ) ;
return - EINVAL ;
}
/* input size must match the computed "compact" size */
if ( args - > buf_size_in_bytes ! = computed_buff_size ) {
pr_debug ( " size mismatch, computed : actual %u : %u \n " ,
args - > buf_size_in_bytes , computed_buff_size ) ;
return - EINVAL ;
}
cmd_from_user = ( void __user * ) args - > content_ptr ;
if ( cmd_from_user = = NULL )
return - EINVAL ;
/* this is the actual buffer to work with */
args_buff = kmalloc ( args - > buf_size_in_bytes - sizeof ( * args ) ,
GFP_KERNEL ) ;
if ( args_buff = = NULL )
return - ENOMEM ;
/* Now copy the entire buffer from user */
status = copy_from_user ( args_buff , cmd_from_user ,
args - > buf_size_in_bytes - sizeof ( * args ) ) ;
if ( status ! = 0 ) {
pr_debug ( " Failed to copy wave control user data \n " ) ;
kfree ( args_buff ) ;
return - EINVAL ;
}
/* move ptr to the start of the "pay-load" area */
wac_info . process = p ;
wac_info . operand = * ( ( enum HSA_DBG_WAVEOP * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . operand ) ;
wac_info . mode = * ( ( enum HSA_DBG_WAVEMODE * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . mode ) ;
wac_info . trapId = * ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
args_idx + = sizeof ( wac_info . trapId ) ;
wac_info . dbgWave_msg . DbgWaveMsg . WaveMsgInfoGen2 . Value =
* ( ( uint32_t * ) ( & args_buff [ args_idx ] ) ) ;
wac_info . dbgWave_msg . MemoryVA = NULL ;
mutex_lock ( kfd_get_dbgmgr_mutex ( ) ) ;
pr_debug ( " Calling dbg manager process %p, operand %u, mode %u, trapId %u, message %u \n " ,
wac_info . process , wac_info . operand ,
wac_info . mode , wac_info . trapId ,
wac_info . dbgWave_msg . DbgWaveMsg . WaveMsgInfoGen2 . Value ) ;
status = kfd_dbgmgr_wave_control ( dev - > dbgmgr , & wac_info ) ;
pr_debug ( " Returned status of dbg manager is %ld \n " , status ) ;
mutex_unlock ( kfd_get_dbgmgr_mutex ( ) ) ;
kfree ( args_buff ) ;
2014-12-07 17:05:22 +02:00
return status ;
}
2014-12-29 13:52:22 +02:00
static int kfd_ioctl_get_clock_counters ( struct file * filep ,
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_clock_counters_args * args = data ;
2014-07-17 01:47:58 +03:00
struct kfd_dev * dev ;
2015-03-12 10:23:40 -07:00
struct timespec64 time ;
2014-07-17 01:47:58 +03:00
2014-12-29 13:52:22 +02:00
dev = kfd_device_by_id ( args - > gpu_id ) ;
2014-07-17 01:47:58 +03:00
if ( dev = = NULL )
return - EINVAL ;
/* Reading GPU clock counter from KGD */
2015-03-17 19:32:53 +08:00
args - > gpu_clock_counter =
dev - > kfd2kgd - > get_gpu_clock_counter ( dev - > kgd ) ;
2014-07-17 01:47:58 +03:00
/* No access to rdtsc. Using raw monotonic time */
2015-03-12 10:23:40 -07:00
getrawmonotonic64 ( & time ) ;
args - > cpu_clock_counter = ( uint64_t ) timespec64_to_ns ( & time ) ;
2014-07-17 01:47:58 +03:00
2015-03-12 10:23:40 -07:00
get_monotonic_boottime64 ( & time ) ;
args - > system_clock_counter = ( uint64_t ) timespec64_to_ns ( & time ) ;
2014-07-17 01:47:58 +03:00
/* Since the counter is in nano-seconds we use 1GHz frequency */
2014-12-29 13:52:22 +02:00
args - > system_clock_freq = 1000000000 ;
2014-07-17 01:47:58 +03:00
return 0 ;
2014-07-16 21:08:55 +03:00
}
static int kfd_ioctl_get_process_apertures ( struct file * filp ,
2014-12-29 13:52:22 +02:00
struct kfd_process * p , void * data )
2014-07-16 21:08:55 +03:00
{
2014-12-29 13:52:22 +02:00
struct kfd_ioctl_get_process_apertures_args * args = data ;
2014-07-17 01:49:36 +03:00
struct kfd_process_device_apertures * pAperture ;
struct kfd_process_device * pdd ;
dev_dbg ( kfd_device , " get apertures for PASID %d " , p - > pasid ) ;
2014-12-29 13:52:22 +02:00
args - > num_of_nodes = 0 ;
2014-07-17 01:49:36 +03:00
mutex_lock ( & p - > mutex ) ;
/*if the process-device list isn't empty*/
if ( kfd_has_process_device_data ( p ) ) {
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data ( p ) ;
do {
2014-12-29 13:52:22 +02:00
pAperture =
& args - > process_apertures [ args - > num_of_nodes ] ;
2014-07-17 01:49:36 +03:00
pAperture - > gpu_id = pdd - > dev - > id ;
pAperture - > lds_base = pdd - > lds_base ;
pAperture - > lds_limit = pdd - > lds_limit ;
pAperture - > gpuvm_base = pdd - > gpuvm_base ;
pAperture - > gpuvm_limit = pdd - > gpuvm_limit ;
pAperture - > scratch_base = pdd - > scratch_base ;
pAperture - > scratch_limit = pdd - > scratch_limit ;
dev_dbg ( kfd_device ,
2014-12-29 13:52:22 +02:00
" node id %u \n " , args - > num_of_nodes ) ;
2014-07-17 01:49:36 +03:00
dev_dbg ( kfd_device ,
" gpu id %u \n " , pdd - > dev - > id ) ;
dev_dbg ( kfd_device ,
" lds_base %llX \n " , pdd - > lds_base ) ;
dev_dbg ( kfd_device ,
" lds_limit %llX \n " , pdd - > lds_limit ) ;
dev_dbg ( kfd_device ,
" gpuvm_base %llX \n " , pdd - > gpuvm_base ) ;
dev_dbg ( kfd_device ,
" gpuvm_limit %llX \n " , pdd - > gpuvm_limit ) ;
dev_dbg ( kfd_device ,
" scratch_base %llX \n " , pdd - > scratch_base ) ;
dev_dbg ( kfd_device ,
" scratch_limit %llX \n " , pdd - > scratch_limit ) ;
2014-12-29 13:52:22 +02:00
args - > num_of_nodes + + ;
2014-07-17 01:49:36 +03:00
} while ( ( pdd = kfd_get_next_process_device_data ( p , pdd ) ) ! = NULL & &
2014-12-29 13:52:22 +02:00
( args - > num_of_nodes < NUM_OF_SUPPORTED_GPUS ) ) ;
2014-07-17 01:49:36 +03:00
}
mutex_unlock ( & p - > mutex ) ;
return 0 ;
2014-07-16 21:08:55 +03:00
}
2014-12-07 17:05:11 +02:00
static int kfd_ioctl_create_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_create_event_args * args = data ;
int err ;
err = kfd_event_create ( filp , p , args - > event_type ,
args - > auto_reset ! = 0 , args - > node_id ,
& args - > event_id , & args - > event_trigger_data ,
& args - > event_page_offset ,
& args - > event_slot_index ) ;
return err ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_destroy_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_destroy_event_args * args = data ;
return kfd_event_destroy ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_set_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_set_event_args * args = data ;
return kfd_set_event ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_reset_event ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_reset_event_args * args = data ;
return kfd_reset_event ( p , args - > event_id ) ;
2014-12-07 17:05:11 +02:00
}
static int kfd_ioctl_wait_events ( struct file * filp , struct kfd_process * p ,
void * data )
{
2014-09-09 15:22:05 +03:00
struct kfd_ioctl_wait_events_args * args = data ;
enum kfd_event_wait_result wait_result ;
int err ;
err = kfd_wait_on_events ( p , args - > num_events ,
( void __user * ) args - > events_ptr ,
( args - > wait_for_all ! = 0 ) ,
args - > timeout , & wait_result ) ;
args - > wait_result = wait_result ;
return err ;
2014-12-07 17:05:11 +02:00
}
2014-12-29 14:20:05 +02:00
# define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[ _IOC_NR ( ioctl ) ] = { . cmd = ioctl , . func = _func , . flags = _flags , . cmd_drv = 0 , . name = # ioctl }
/** Ioctl table */
static const struct amdkfd_ioctl_desc amdkfd_ioctls [ ] = {
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_VERSION ,
kfd_ioctl_get_version , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_CREATE_QUEUE ,
kfd_ioctl_create_queue , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DESTROY_QUEUE ,
kfd_ioctl_destroy_queue , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_MEMORY_POLICY ,
kfd_ioctl_set_memory_policy , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_CLOCK_COUNTERS ,
kfd_ioctl_get_clock_counters , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_GET_PROCESS_APERTURES ,
kfd_ioctl_get_process_apertures , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_UPDATE_QUEUE ,
kfd_ioctl_update_queue , 0 ) ,
2014-12-07 17:05:11 +02:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_CREATE_EVENT ,
kfd_ioctl_create_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DESTROY_EVENT ,
kfd_ioctl_destroy_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_SET_EVENT ,
kfd_ioctl_set_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_RESET_EVENT ,
kfd_ioctl_reset_event , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_WAIT_EVENTS ,
kfd_ioctl_wait_events , 0 ) ,
2014-12-07 17:05:22 +02:00
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_REGISTER ,
kfd_ioctl_dbg_register , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_UNREGISTER ,
kfd_ioctl_dbg_unrgesiter , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_ADDRESS_WATCH ,
kfd_ioctl_dbg_address_watch , 0 ) ,
AMDKFD_IOCTL_DEF ( AMDKFD_IOC_DBG_WAVE_CONTROL ,
kfd_ioctl_dbg_wave_control , 0 ) ,
2014-12-29 14:20:05 +02:00
} ;
# define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
2014-07-16 21:08:55 +03:00
static long kfd_ioctl ( struct file * filep , unsigned int cmd , unsigned long arg )
{
struct kfd_process * process ;
2014-12-29 14:20:05 +02:00
amdkfd_ioctl_t * func ;
const struct amdkfd_ioctl_desc * ioctl = NULL ;
unsigned int nr = _IOC_NR ( cmd ) ;
2014-12-29 13:52:22 +02:00
char stack_kdata [ 128 ] ;
char * kdata = NULL ;
unsigned int usize , asize ;
int retcode = - EINVAL ;
2014-07-16 21:08:55 +03:00
2014-12-29 14:20:05 +02:00
if ( nr > = AMDKFD_CORE_IOCTL_COUNT )
goto err_i1 ;
if ( ( nr > = AMDKFD_COMMAND_START ) & & ( nr < AMDKFD_COMMAND_END ) ) {
u32 amdkfd_size ;
ioctl = & amdkfd_ioctls [ nr ] ;
amdkfd_size = _IOC_SIZE ( ioctl - > cmd ) ;
usize = asize = _IOC_SIZE ( cmd ) ;
if ( amdkfd_size > asize )
asize = amdkfd_size ;
cmd = ioctl - > cmd ;
} else
goto err_i1 ;
dev_dbg ( kfd_device , " ioctl cmd 0x%x (#%d), arg 0x%lx \n " , cmd , nr , arg ) ;
2014-07-16 21:08:55 +03:00
2014-07-16 23:25:31 +03:00
process = kfd_get_process ( current ) ;
2014-12-29 14:20:05 +02:00
if ( IS_ERR ( process ) ) {
dev_dbg ( kfd_device , " no process \n " ) ;
goto err_i1 ;
}
2014-07-16 21:08:55 +03:00
2014-12-29 14:20:05 +02:00
/* Do not trust userspace, use our own definition */
func = ioctl - > func ;
if ( unlikely ( ! func ) ) {
dev_dbg ( kfd_device , " no function \n " ) ;
retcode = - EINVAL ;
goto err_i1 ;
2014-07-16 21:08:55 +03:00
}
2014-12-29 13:52:22 +02:00
if ( cmd & ( IOC_IN | IOC_OUT ) ) {
if ( asize < = sizeof ( stack_kdata ) ) {
kdata = stack_kdata ;
} else {
kdata = kmalloc ( asize , GFP_KERNEL ) ;
if ( ! kdata ) {
retcode = - ENOMEM ;
goto err_i1 ;
}
}
if ( asize > usize )
memset ( kdata + usize , 0 , asize - usize ) ;
}
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
if ( cmd & IOC_IN ) {
if ( copy_from_user ( kdata , ( void __user * ) arg , usize ) ! = 0 ) {
retcode = - EFAULT ;
goto err_i1 ;
}
} else if ( cmd & IOC_OUT ) {
memset ( kdata , 0 , usize ) ;
}
2014-12-29 14:20:05 +02:00
retcode = func ( filep , process , kdata ) ;
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
if ( cmd & IOC_OUT )
if ( copy_to_user ( ( void __user * ) arg , kdata , usize ) ! = 0 )
retcode = - EFAULT ;
2014-07-16 21:08:55 +03:00
2014-12-29 13:52:22 +02:00
err_i1 :
2014-12-29 14:20:05 +02:00
if ( ! ioctl )
dev_dbg ( kfd_device , " invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x \n " ,
task_pid_nr ( current ) , cmd , nr ) ;
2014-12-29 13:52:22 +02:00
if ( kdata ! = stack_kdata )
kfree ( kdata ) ;
if ( retcode )
dev_dbg ( kfd_device , " ret = %d \n " , retcode ) ;
return retcode ;
2014-07-16 21:08:55 +03:00
}
2014-07-16 23:25:31 +03:00
static int kfd_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct kfd_process * process ;
process = kfd_get_process ( current ) ;
if ( IS_ERR ( process ) )
return PTR_ERR ( process ) ;
2015-05-10 12:15:46 +03:00
if ( ( vma - > vm_pgoff & KFD_MMAP_DOORBELL_MASK ) = =
KFD_MMAP_DOORBELL_MASK ) {
vma - > vm_pgoff = vma - > vm_pgoff ^ KFD_MMAP_DOORBELL_MASK ;
return kfd_doorbell_mmap ( process , vma ) ;
} else if ( ( vma - > vm_pgoff & KFD_MMAP_EVENTS_MASK ) = =
KFD_MMAP_EVENTS_MASK ) {
vma - > vm_pgoff = vma - > vm_pgoff ^ KFD_MMAP_EVENTS_MASK ;
return kfd_event_mmap ( process , vma ) ;
}
return - EFAULT ;
2014-07-16 23:25:31 +03:00
}