2014-07-17 01:04:10 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/slab.h>
# include <linux/list.h>
# include "kfd_device_queue_manager.h"
# include "kfd_priv.h"
# include "kfd_kernel_queue.h"
2019-05-06 21:31:52 -05:00
# include "amdgpu_amdkfd.h"
2014-07-17 01:04:10 +03:00
static inline struct process_queue_node * get_queue_by_qid (
struct process_queue_manager * pqm , unsigned int qid )
{
struct process_queue_node * pqn ;
list_for_each_entry ( pqn , & pqm - > queues , process_queue_list ) {
2017-08-15 23:00:07 -04:00
if ( ( pqn - > q & & pqn - > q - > properties . queue_id = = qid ) | |
( pqn - > kq & & pqn - > kq - > queue - > properties . queue_id = = qid ) )
2014-07-17 01:04:10 +03:00
return pqn ;
}
return NULL ;
}
static int find_available_queue_slot ( struct process_queue_manager * pqm ,
unsigned int * qid )
{
unsigned long found ;
found = find_first_zero_bit ( pqm - > queue_slot_bitmap ,
2015-01-18 13:18:01 +02:00
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS ) ;
2014-07-17 01:04:10 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " The new slot id %lu \n " , found ) ;
2014-07-17 01:04:10 +03:00
2015-01-18 13:18:01 +02:00
if ( found > = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS ) {
2019-09-25 17:00:59 -04:00
pr_info ( " Cannot open more queues for process with pasid 0x%x \n " ,
2014-07-17 01:04:10 +03:00
pqm - > process - > pasid ) ;
return - ENOMEM ;
}
set_bit ( found , pqm - > queue_slot_bitmap ) ;
* qid = found ;
return 0 ;
}
2017-09-27 00:09:52 -04:00
void kfd_process_dequeue_from_device ( struct kfd_process_device * pdd )
{
struct kfd_dev * dev = pdd - > dev ;
if ( pdd - > already_dequeued )
return ;
dev - > dqm - > ops . process_termination ( dev - > dqm , & pdd - > qpd ) ;
pdd - > already_dequeued = true ;
}
2019-05-06 21:31:52 -05:00
int pqm_set_gws ( struct process_queue_manager * pqm , unsigned int qid ,
void * gws )
{
struct kfd_dev * dev = NULL ;
struct process_queue_node * pqn ;
struct kfd_process_device * pdd ;
struct kgd_mem * mem = NULL ;
int ret ;
pqn = get_queue_by_qid ( pqm , qid ) ;
if ( ! pqn ) {
pr_err ( " Queue id does not match any known queue \n " ) ;
return - EINVAL ;
}
if ( pqn - > q )
dev = pqn - > q - > device ;
if ( WARN_ON ( ! dev ) )
return - ENODEV ;
pdd = kfd_get_process_device_data ( dev , pqm - > process ) ;
if ( ! pdd ) {
pr_err ( " Process device data doesn't exist \n " ) ;
return - EINVAL ;
}
/* Only allow one queue per process can have GWS assigned */
if ( gws & & pdd - > qpd . num_gws )
2019-05-28 14:51:49 -05:00
return - EBUSY ;
2019-05-06 21:31:52 -05:00
if ( ! gws & & pdd - > qpd . num_gws = = 0 )
return - EINVAL ;
if ( gws )
ret = amdgpu_amdkfd_add_gws_to_process ( pdd - > process - > kgd_process_info ,
gws , & mem ) ;
else
ret = amdgpu_amdkfd_remove_gws_from_process ( pdd - > process - > kgd_process_info ,
pqn - > q - > gws ) ;
if ( unlikely ( ret ) )
return ret ;
pqn - > q - > gws = mem ;
pdd - > qpd . num_gws = gws ? amdgpu_amdkfd_get_num_gws ( dev - > kgd ) : 0 ;
return pqn - > q - > device - > dqm - > ops . update_queue ( pqn - > q - > device - > dqm ,
pqn - > q ) ;
}
2017-09-27 00:09:52 -04:00
void kfd_process_dequeue_from_all_devices ( struct kfd_process * p )
{
struct kfd_process_device * pdd ;
list_for_each_entry ( pdd , & p - > per_device_data , per_device_list )
kfd_process_dequeue_from_device ( pdd ) ;
}
2014-07-17 01:04:10 +03:00
int pqm_init ( struct process_queue_manager * pqm , struct kfd_process * p )
{
INIT_LIST_HEAD ( & pqm - > queues ) ;
pqm - > queue_slot_bitmap =
2015-01-18 13:18:01 +02:00
kzalloc ( DIV_ROUND_UP ( KFD_MAX_NUM_OF_QUEUES_PER_PROCESS ,
2014-07-17 01:04:10 +03:00
BITS_PER_BYTE ) , GFP_KERNEL ) ;
2017-08-15 23:00:06 -04:00
if ( ! pqm - > queue_slot_bitmap )
2014-07-17 01:04:10 +03:00
return - ENOMEM ;
pqm - > process = p ;
return 0 ;
}
void pqm_uninit ( struct process_queue_manager * pqm )
{
struct process_queue_node * pqn , * next ;
list_for_each_entry_safe ( pqn , next , & pqm - > queues , process_queue_list ) {
2019-07-17 09:47:58 -05:00
if ( pqn - > q & & pqn - > q - > gws )
amdgpu_amdkfd_remove_gws_from_process ( pqm - > process - > kgd_process_info ,
pqn - > q - > gws ) ;
2017-09-27 00:09:52 -04:00
uninit_queue ( pqn - > q ) ;
list_del ( & pqn - > process_queue_list ) ;
kfree ( pqn ) ;
2014-07-17 01:04:10 +03:00
}
2017-09-27 00:09:52 -04:00
2014-07-17 01:04:10 +03:00
kfree ( pqm - > queue_slot_bitmap ) ;
pqm - > queue_slot_bitmap = NULL ;
}
2019-01-15 19:23:16 -05:00
static int init_user_queue ( struct process_queue_manager * pqm ,
2014-07-17 01:04:10 +03:00
struct kfd_dev * dev , struct queue * * q ,
struct queue_properties * q_properties ,
struct file * f , unsigned int qid )
{
int retval ;
/* Doorbell initialized in user space*/
q_properties - > doorbell_ptr = NULL ;
/* let DQM handle it*/
q_properties - > vmid = 0 ;
q_properties - > queue_id = qid ;
2016-09-17 15:01:45 +10:00
retval = init_queue ( q , q_properties ) ;
2014-07-17 01:04:10 +03:00
if ( retval ! = 0 )
2017-08-15 23:00:07 -04:00
return retval ;
2014-07-17 01:04:10 +03:00
( * q ) - > device = dev ;
( * q ) - > process = pqm - > process ;
2017-08-15 23:00:05 -04:00
pr_debug ( " PQM After init queue " ) ;
2014-07-17 01:04:10 +03:00
return retval ;
}
int pqm_create_queue ( struct process_queue_manager * pqm ,
struct kfd_dev * dev ,
struct file * f ,
struct queue_properties * properties ,
2019-01-15 13:58:57 -05:00
unsigned int * qid ,
uint32_t * p_doorbell_offset_in_process )
2014-07-17 01:04:10 +03:00
{
int retval ;
struct kfd_process_device * pdd ;
struct queue * q ;
struct process_queue_node * pqn ;
struct kernel_queue * kq ;
2017-09-27 00:09:53 -04:00
enum kfd_queue_type type = properties - > type ;
2017-09-27 00:09:55 -04:00
unsigned int max_queues = 127 ; /* HWS limit */
2014-07-17 01:04:10 +03:00
q = NULL ;
kq = NULL ;
2014-11-18 14:00:04 +02:00
pdd = kfd_get_process_device_data ( dev , pqm - > process ) ;
if ( ! pdd ) {
pr_err ( " Process device data doesn't exist \n " ) ;
return - 1 ;
}
2014-07-17 01:04:10 +03:00
2015-05-20 13:43:04 +03:00
/*
* for debug process , verify that it is within the static queues limit
* currently limit is set to half of the total avail HQD slots
* If we are just about to create DIQ , the is_debug flag is not set yet
* Hence we also check the type as well
*/
2017-09-27 00:09:55 -04:00
if ( ( pdd - > qpd . is_debug ) | | ( type = = KFD_QUEUE_TYPE_DIQ ) )
max_queues = dev - > device_info - > max_no_of_hqd / 2 ;
if ( pdd - > qpd . queue_count > = max_queues )
return - ENOSPC ;
2015-05-20 13:43:04 +03:00
2014-07-17 01:04:10 +03:00
retval = find_available_queue_slot ( pqm , qid ) ;
if ( retval ! = 0 )
return retval ;
2017-11-01 19:21:30 -04:00
if ( list_empty ( & pdd - > qpd . queues_list ) & &
2017-11-14 16:41:17 -05:00
list_empty ( & pdd - > qpd . priv_queue_list ) )
2015-01-12 14:26:10 +02:00
dev - > dqm - > ops . register_process ( dev - > dqm , & pdd - > qpd ) ;
2014-07-17 01:04:10 +03:00
2017-08-15 23:00:08 -04:00
pqn = kzalloc ( sizeof ( * pqn ) , GFP_KERNEL ) ;
2014-07-17 01:04:10 +03:00
if ( ! pqn ) {
retval = - ENOMEM ;
goto err_allocate_pqn ;
}
switch ( type ) {
2015-01-03 22:12:32 +02:00
case KFD_QUEUE_TYPE_SDMA :
2019-02-07 14:02:27 -06:00
case KFD_QUEUE_TYPE_SDMA_XGMI :
2020-02-05 16:53:37 -05:00
/* SDMA queues are always allocated statically no matter
* which scheduler mode is used . We also do not need to
* check whether a SDMA queue can be allocated here , because
* allocate_sdma_queue ( ) in create_queue ( ) has the
* corresponding check logic .
*/
2019-01-15 19:23:16 -05:00
retval = init_user_queue ( pqm , dev , & q , properties , f , * qid ) ;
2017-11-01 19:21:57 -04:00
if ( retval ! = 0 )
goto err_create_queue ;
pqn - > q = q ;
pqn - > kq = NULL ;
2017-11-24 18:10:54 -05:00
retval = dev - > dqm - > ops . create_queue ( dev - > dqm , q , & pdd - > qpd ) ;
2017-11-01 19:21:57 -04:00
print_queue ( q ) ;
break ;
2014-07-17 01:04:10 +03:00
case KFD_QUEUE_TYPE_COMPUTE :
/* check if there is over subscription */
2018-01-04 17:17:43 -05:00
if ( ( dev - > dqm - > sched_policy = =
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION ) & &
2017-09-20 18:10:18 -04:00
( ( dev - > dqm - > processes_count > = dev - > vm_info . vmid_num_kfd ) | |
2020-01-30 18:35:23 -05:00
( dev - > dqm - > active_queue_count > = get_cp_queues_num ( dev - > dqm ) ) ) ) {
2018-07-11 22:33:08 -04:00
pr_debug ( " Over-subscription is not allowed when amdkfd.sched_policy == 1 \n " ) ;
2014-07-17 01:04:10 +03:00
retval = - EPERM ;
goto err_create_queue ;
}
2019-01-15 19:23:16 -05:00
retval = init_user_queue ( pqm , dev , & q , properties , f , * qid ) ;
2014-07-17 01:04:10 +03:00
if ( retval ! = 0 )
goto err_create_queue ;
pqn - > q = q ;
pqn - > kq = NULL ;
2017-11-24 18:10:54 -05:00
retval = dev - > dqm - > ops . create_queue ( dev - > dqm , q , & pdd - > qpd ) ;
2014-07-17 01:04:10 +03:00
print_queue ( q ) ;
break ;
case KFD_QUEUE_TYPE_DIQ :
kq = kernel_queue_init ( dev , KFD_QUEUE_TYPE_DIQ ) ;
2017-08-15 23:00:06 -04:00
if ( ! kq ) {
2014-11-25 13:24:51 +03:00
retval = - ENOMEM ;
2014-07-17 01:04:10 +03:00
goto err_create_queue ;
}
kq - > queue - > properties . queue_id = * qid ;
pqn - > kq = kq ;
pqn - > q = NULL ;
2015-01-12 14:26:10 +02:00
retval = dev - > dqm - > ops . create_kernel_queue ( dev - > dqm ,
kq , & pdd - > qpd ) ;
2014-07-17 01:04:10 +03:00
break ;
default :
2017-08-15 23:00:12 -04:00
WARN ( 1 , " Invalid queue type %d " , type ) ;
retval = - EINVAL ;
2014-07-17 01:04:10 +03:00
}
if ( retval ! = 0 ) {
2020-02-05 18:22:48 -05:00
pr_err ( " Pasid 0x%x DQM create queue type %d failed. ret %d \n " ,
2018-05-01 17:56:11 -04:00
pqm - > process - > pasid , type , retval ) ;
2014-07-17 01:04:10 +03:00
goto err_create_queue ;
}
2019-01-15 13:58:57 -05:00
if ( q & & p_doorbell_offset_in_process )
2018-04-10 17:33:05 -04:00
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* ( in bytes ) .
2019-01-15 13:58:57 -05:00
* There are always 1024 doorbells per process , so in case
* of 8 - byte doorbells , there are two doorbell pages per
* process .
2018-04-10 17:33:05 -04:00
*/
2019-01-15 13:58:57 -05:00
* p_doorbell_offset_in_process =
2018-04-10 17:33:05 -04:00
( q - > properties . doorbell_off * sizeof ( uint32_t ) ) &
( kfd_doorbell_process_slice ( dev ) - 1 ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " PQM After DQM create queue \n " ) ;
2014-07-17 01:04:10 +03:00
list_add ( & pqn - > process_queue_list , & pqm - > queues ) ;
if ( q ) {
2017-08-15 23:00:05 -04:00
pr_debug ( " PQM done creating queue \n " ) ;
2020-01-30 00:08:05 -05:00
kfd_procfs_add_queue ( q ) ;
2017-09-27 00:09:53 -04:00
print_queue_properties ( & q - > properties ) ;
2014-07-17 01:04:10 +03:00
}
return retval ;
err_create_queue :
2020-02-05 17:13:54 -05:00
uninit_queue ( q ) ;
if ( kq )
kernel_queue_uninit ( kq , false ) ;
2014-07-17 01:04:10 +03:00
kfree ( pqn ) ;
err_allocate_pqn :
2015-01-15 17:14:47 +02:00
/* check if queues list is empty unregister process from device */
2014-07-17 01:04:10 +03:00
clear_bit ( * qid , pqm - > queue_slot_bitmap ) ;
2017-11-01 19:21:30 -04:00
if ( list_empty ( & pdd - > qpd . queues_list ) & &
list_empty ( & pdd - > qpd . priv_queue_list ) )
2015-01-29 11:45:31 +10:00
dev - > dqm - > ops . unregister_process ( dev - > dqm , & pdd - > qpd ) ;
2014-07-17 01:04:10 +03:00
return retval ;
}
int pqm_destroy_queue ( struct process_queue_manager * pqm , unsigned int qid )
{
struct process_queue_node * pqn ;
struct kfd_process_device * pdd ;
struct device_queue_manager * dqm ;
struct kfd_dev * dev ;
int retval ;
dqm = NULL ;
retval = 0 ;
pqn = get_queue_by_qid ( pqm , qid ) ;
2017-08-15 23:00:06 -04:00
if ( ! pqn ) {
2017-08-15 23:00:05 -04:00
pr_err ( " Queue id does not match any known queue \n " ) ;
2014-07-17 01:04:10 +03:00
return - EINVAL ;
}
dev = NULL ;
if ( pqn - > kq )
dev = pqn - > kq - > dev ;
if ( pqn - > q )
dev = pqn - > q - > device ;
2017-08-15 23:00:12 -04:00
if ( WARN_ON ( ! dev ) )
return - ENODEV ;
2014-07-17 01:04:10 +03:00
2014-11-18 14:00:04 +02:00
pdd = kfd_get_process_device_data ( dev , pqm - > process ) ;
if ( ! pdd ) {
pr_err ( " Process device data doesn't exist \n " ) ;
return - 1 ;
}
2014-07-17 01:04:10 +03:00
if ( pqn - > kq ) {
/* destroy kernel queue (DIQ) */
dqm = pqn - > kq - > dev - > dqm ;
2015-01-12 14:26:10 +02:00
dqm - > ops . destroy_kernel_queue ( dqm , pqn - > kq , & pdd - > qpd ) ;
2019-12-20 02:46:55 -05:00
kernel_queue_uninit ( pqn - > kq , false ) ;
2014-07-17 01:04:10 +03:00
}
if ( pqn - > q ) {
2020-01-30 00:08:05 -05:00
kfd_procfs_del_queue ( pqn - > q ) ;
2014-07-17 01:04:10 +03:00
dqm = pqn - > q - > device - > dqm ;
2015-01-12 14:26:10 +02:00
retval = dqm - > ops . destroy_queue ( dqm , & pdd - > qpd , pqn - > q ) ;
2017-11-27 18:29:44 -05:00
if ( retval ) {
2019-09-25 17:00:59 -04:00
pr_err ( " Pasid 0x%x destroy queue %d failed, ret %d \n " ,
2018-05-01 17:56:11 -04:00
pqm - > process - > pasid ,
pqn - > q - > properties . queue_id , retval ) ;
if ( retval ! = - ETIME )
goto err_destroy_queue ;
2017-11-27 18:29:44 -05:00
}
2019-05-06 21:31:52 -05:00
if ( pqn - > q - > gws ) {
amdgpu_amdkfd_remove_gws_from_process ( pqm - > process - > kgd_process_info ,
pqn - > q - > gws ) ;
pdd - > qpd . num_gws = 0 ;
}
2018-07-14 19:05:59 -04:00
kfree ( pqn - > q - > properties . cu_mask ) ;
pqn - > q - > properties . cu_mask = NULL ;
2014-07-17 01:04:10 +03:00
uninit_queue ( pqn - > q ) ;
}
list_del ( & pqn - > process_queue_list ) ;
kfree ( pqn ) ;
clear_bit ( qid , pqm - > queue_slot_bitmap ) ;
2017-11-01 19:21:30 -04:00
if ( list_empty ( & pdd - > qpd . queues_list ) & &
list_empty ( & pdd - > qpd . priv_queue_list ) )
2015-01-12 14:26:10 +02:00
dqm - > ops . unregister_process ( dqm , & pdd - > qpd ) ;
2014-07-17 01:04:10 +03:00
2017-11-27 18:29:44 -05:00
err_destroy_queue :
2014-07-17 01:04:10 +03:00
return retval ;
}
int pqm_update_queue ( struct process_queue_manager * pqm , unsigned int qid ,
struct queue_properties * p )
{
int retval ;
struct process_queue_node * pqn ;
pqn = get_queue_by_qid ( pqm , qid ) ;
2015-01-20 14:57:19 +02:00
if ( ! pqn ) {
2017-08-15 23:00:05 -04:00
pr_debug ( " No queue %d exists for update operation \n " , qid ) ;
2015-01-20 14:57:19 +02:00
return - EFAULT ;
}
2014-07-17 01:04:10 +03:00
pqn - > q - > properties . queue_address = p - > queue_address ;
pqn - > q - > properties . queue_size = p - > queue_size ;
pqn - > q - > properties . queue_percent = p - > queue_percent ;
pqn - > q - > properties . priority = p - > priority ;
2015-01-12 14:26:10 +02:00
retval = pqn - > q - > device - > dqm - > ops . update_queue ( pqn - > q - > device - > dqm ,
pqn - > q ) ;
2014-07-17 01:04:10 +03:00
if ( retval ! = 0 )
return retval ;
return 0 ;
}
2018-07-14 19:05:59 -04:00
int pqm_set_cu_mask ( struct process_queue_manager * pqm , unsigned int qid ,
struct queue_properties * p )
{
int retval ;
struct process_queue_node * pqn ;
pqn = get_queue_by_qid ( pqm , qid ) ;
if ( ! pqn ) {
pr_debug ( " No queue %d exists for update operation \n " , qid ) ;
return - EFAULT ;
}
/* Free the old CU mask memory if it is already allocated, then
* allocate memory for the new CU mask .
*/
kfree ( pqn - > q - > properties . cu_mask ) ;
pqn - > q - > properties . cu_mask_count = p - > cu_mask_count ;
pqn - > q - > properties . cu_mask = p - > cu_mask ;
retval = pqn - > q - > device - > dqm - > ops . update_queue ( pqn - > q - > device - > dqm ,
pqn - > q ) ;
if ( retval ! = 0 )
return retval ;
return 0 ;
}
2015-05-20 13:48:26 +03:00
struct kernel_queue * pqm_get_kernel_queue (
2014-11-20 17:16:23 +08:00
struct process_queue_manager * pqm ,
2014-07-17 01:04:10 +03:00
unsigned int qid )
{
struct process_queue_node * pqn ;
pqn = get_queue_by_qid ( pqm , qid ) ;
if ( pqn & & pqn - > kq )
return pqn - > kq ;
return NULL ;
}
2017-05-02 17:39:37 -05:00
int pqm_get_wave_state ( struct process_queue_manager * pqm ,
unsigned int qid ,
void __user * ctl_stack ,
u32 * ctl_stack_used_size ,
u32 * save_area_used_size )
{
struct process_queue_node * pqn ;
pqn = get_queue_by_qid ( pqm , qid ) ;
if ( ! pqn ) {
pr_debug ( " amdkfd: No queue %d exists for operation \n " ,
qid ) ;
return - EFAULT ;
}
return pqn - > q - > device - > dqm - > ops . get_wave_state ( pqn - > q - > device - > dqm ,
pqn - > q ,
ctl_stack ,
ctl_stack_used_size ,
save_area_used_size ) ;
}
2017-11-27 18:29:49 -05:00
# if defined(CONFIG_DEBUG_FS)
2014-07-17 01:04:10 +03:00
2017-11-27 18:29:49 -05:00
int pqm_debugfs_mqds ( struct seq_file * m , void * data )
{
struct process_queue_manager * pqm = data ;
struct process_queue_node * pqn ;
struct queue * q ;
enum KFD_MQD_TYPE mqd_type ;
2018-07-11 22:33:07 -04:00
struct mqd_manager * mqd_mgr ;
2017-11-27 18:29:49 -05:00
int r = 0 ;
list_for_each_entry ( pqn , & pqm - > queues , process_queue_list ) {
if ( pqn - > q ) {
q = pqn - > q ;
switch ( q - > properties . type ) {
case KFD_QUEUE_TYPE_SDMA :
2019-02-07 14:02:27 -06:00
case KFD_QUEUE_TYPE_SDMA_XGMI :
2017-11-27 18:29:49 -05:00
seq_printf ( m , " SDMA queue on device %x \n " ,
q - > device - > id ) ;
mqd_type = KFD_MQD_TYPE_SDMA ;
break ;
case KFD_QUEUE_TYPE_COMPUTE :
seq_printf ( m , " Compute queue on device %x \n " ,
q - > device - > id ) ;
mqd_type = KFD_MQD_TYPE_CP ;
break ;
default :
seq_printf ( m ,
" Bad user queue type %d on device %x \n " ,
q - > properties . type , q - > device - > id ) ;
continue ;
}
2018-12-05 10:15:27 -06:00
mqd_mgr = q - > device - > dqm - > mqd_mgrs [ mqd_type ] ;
2017-11-27 18:29:49 -05:00
} else if ( pqn - > kq ) {
q = pqn - > kq - > queue ;
2018-07-11 22:33:07 -04:00
mqd_mgr = pqn - > kq - > mqd_mgr ;
2017-11-27 18:29:49 -05:00
switch ( q - > properties . type ) {
case KFD_QUEUE_TYPE_DIQ :
seq_printf ( m , " DIQ on device %x \n " ,
pqn - > kq - > dev - > id ) ;
break ;
default :
seq_printf ( m ,
" Bad kernel queue type %d on device %x \n " ,
q - > properties . type ,
pqn - > kq - > dev - > id ) ;
continue ;
}
} else {
seq_printf ( m ,
" Weird: Queue node with neither kernel nor user queue \n " ) ;
continue ;
}
2018-07-11 22:33:07 -04:00
r = mqd_mgr - > debugfs_show_mqd ( m , q - > mqd ) ;
2017-11-27 18:29:49 -05:00
if ( r ! = 0 )
break ;
}
return r ;
}
# endif