2014-07-17 01:27:00 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# include <linux/slab.h>
# include <linux/list.h>
# include <linux/types.h>
# include <linux/printk.h>
# include <linux/bitops.h>
2015-01-15 12:01:10 +02:00
# include <linux/sched.h>
2014-07-17 01:27:00 +03:00
# include "kfd_priv.h"
# include "kfd_device_queue_manager.h"
# include "kfd_mqd_manager.h"
# include "cik_regs.h"
# include "kfd_kernel_queue.h"
/* Size of the per-pipe EOP queue */
# define CIK_HPD_EOP_BYTES_LOG2 11
# define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
static int set_pasid_vmid_mapping ( struct device_queue_manager * dqm ,
unsigned int pasid , unsigned int vmid ) ;
static int create_compute_queue_nocpsch ( struct device_queue_manager * dqm ,
struct queue * q ,
struct qcm_process_device * qpd ) ;
2015-01-03 22:12:32 +02:00
2014-07-17 01:27:00 +03:00
static int execute_queues_cpsch ( struct device_queue_manager * dqm , bool lock ) ;
2015-05-20 13:43:04 +03:00
static int destroy_queues_cpsch ( struct device_queue_manager * dqm ,
bool preempt_static_queues , bool lock ) ;
2014-07-17 01:27:00 +03:00
2015-01-03 22:12:32 +02:00
static int create_sdma_queue_nocpsch ( struct device_queue_manager * dqm ,
struct queue * q ,
struct qcm_process_device * qpd ) ;
static void deallocate_sdma_queue ( struct device_queue_manager * dqm ,
unsigned int sdma_queue_id ) ;
2014-07-17 01:27:00 +03:00
2015-01-03 22:12:32 +02:00
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type ( enum kfd_queue_type type )
2014-07-17 01:27:00 +03:00
{
2015-01-03 22:12:32 +02:00
if ( type = = KFD_QUEUE_TYPE_SDMA )
2015-01-04 10:36:30 +02:00
return KFD_MQD_TYPE_SDMA ;
return KFD_MQD_TYPE_CP ;
2014-07-17 01:27:00 +03:00
}
2017-02-03 16:28:48 -05:00
static bool is_pipe_enabled ( struct device_queue_manager * dqm , int mec , int pipe )
{
int i ;
int pipe_offset = mec * dqm - > dev - > shared_resources . num_pipe_per_mec
+ pipe * dqm - > dev - > shared_resources . num_queue_per_pipe ;
/* queue is available for KFD usage if bit is 1 */
for ( i = 0 ; i < dqm - > dev - > shared_resources . num_queue_per_pipe ; + + i )
if ( test_bit ( pipe_offset + i ,
dqm - > dev - > shared_resources . queue_bitmap ) )
return true ;
return false ;
}
unsigned int get_queues_num ( struct device_queue_manager * dqm )
2015-02-17 11:30:31 +02:00
{
2017-02-03 16:28:48 -05:00
return bitmap_weight ( dqm - > dev - > shared_resources . queue_bitmap ,
KGD_MAX_QUEUES ) ;
2015-02-17 11:30:31 +02:00
}
2017-02-03 16:28:48 -05:00
unsigned int get_queues_per_pipe ( struct device_queue_manager * dqm )
2014-07-17 01:27:00 +03:00
{
2017-02-03 16:28:48 -05:00
return dqm - > dev - > shared_resources . num_queue_per_pipe ;
}
unsigned int get_pipes_per_mec ( struct device_queue_manager * dqm )
{
return dqm - > dev - > shared_resources . num_pipe_per_mec ;
2014-07-17 01:27:00 +03:00
}
2015-01-12 14:28:46 +02:00
void program_sh_mem_settings ( struct device_queue_manager * dqm ,
2014-07-17 01:27:00 +03:00
struct qcm_process_device * qpd )
{
2015-03-17 19:32:53 +08:00
return dqm - > dev - > kfd2kgd - > program_sh_mem_settings (
dqm - > dev - > kgd , qpd - > vmid ,
2014-07-17 01:27:00 +03:00
qpd - > sh_mem_config ,
qpd - > sh_mem_ape1_base ,
qpd - > sh_mem_ape1_limit ,
qpd - > sh_mem_bases ) ;
}
static int allocate_vmid ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd ,
struct queue * q )
{
int bit , allocated_vmid ;
if ( dqm - > vmid_bitmap = = 0 )
return - ENOMEM ;
2017-09-20 18:10:18 -04:00
bit = find_first_bit ( ( unsigned long * ) & dqm - > vmid_bitmap ,
dqm - > dev - > vm_info . vmid_num_kfd ) ;
2014-07-17 01:27:00 +03:00
clear_bit ( bit , ( unsigned long * ) & dqm - > vmid_bitmap ) ;
2017-09-20 18:10:18 -04:00
allocated_vmid = bit + dqm - > dev - > vm_info . first_vmid_kfd ;
2017-08-15 23:00:05 -04:00
pr_debug ( " vmid allocation %d \n " , allocated_vmid ) ;
2014-07-17 01:27:00 +03:00
qpd - > vmid = allocated_vmid ;
q - > properties . vmid = allocated_vmid ;
set_pasid_vmid_mapping ( dqm , q - > process - > pasid , q - > properties . vmid ) ;
program_sh_mem_settings ( dqm , qpd ) ;
return 0 ;
}
static void deallocate_vmid ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd ,
struct queue * q )
{
2017-09-20 18:10:18 -04:00
int bit = qpd - > vmid - dqm - > dev - > vm_info . first_vmid_kfd ;
2014-07-17 01:27:00 +03:00
2015-01-05 15:48:28 +02:00
/* Release the vmid mapping */
set_pasid_vmid_mapping ( dqm , 0 , qpd - > vmid ) ;
2014-07-17 01:27:00 +03:00
set_bit ( bit , ( unsigned long * ) & dqm - > vmid_bitmap ) ;
qpd - > vmid = 0 ;
q - > properties . vmid = 0 ;
}
static int create_queue_nocpsch ( struct device_queue_manager * dqm ,
struct queue * q ,
struct qcm_process_device * qpd ,
int * allocated_vmid )
{
int retval ;
print_queue ( q ) ;
mutex_lock ( & dqm - > lock ) ;
2015-01-18 13:18:01 +02:00
if ( dqm - > total_queue_count > = max_num_of_queues_per_device ) {
2017-08-15 23:00:05 -04:00
pr_warn ( " Can't create new usermode queue because %d queues were already created \n " ,
2015-01-18 13:18:01 +02:00
dqm - > total_queue_count ) ;
2017-08-15 23:00:07 -04:00
retval = - EPERM ;
goto out_unlock ;
2015-01-18 13:18:01 +02:00
}
2014-07-17 01:27:00 +03:00
if ( list_empty ( & qpd - > queues_list ) ) {
retval = allocate_vmid ( dqm , qpd , q ) ;
2017-08-15 23:00:07 -04:00
if ( retval )
goto out_unlock ;
2014-07-17 01:27:00 +03:00
}
* allocated_vmid = qpd - > vmid ;
q - > properties . vmid = qpd - > vmid ;
2015-01-03 22:12:32 +02:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_COMPUTE )
retval = create_compute_queue_nocpsch ( dqm , q , qpd ) ;
2017-08-15 23:00:07 -04:00
else if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA )
2015-01-03 22:12:32 +02:00
retval = create_sdma_queue_nocpsch ( dqm , q , qpd ) ;
2017-08-15 23:00:07 -04:00
else
retval = - EINVAL ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:06 -04:00
if ( retval ) {
2014-07-17 01:27:00 +03:00
if ( list_empty ( & qpd - > queues_list ) ) {
deallocate_vmid ( dqm , qpd , q ) ;
* allocated_vmid = 0 ;
}
2017-08-15 23:00:07 -04:00
goto out_unlock ;
2014-07-17 01:27:00 +03:00
}
list_add ( & q - > list , & qpd - > queues_list ) ;
2015-01-19 16:08:14 -06:00
if ( q - > properties . is_active )
dqm - > queue_count + + ;
2014-07-17 01:27:00 +03:00
2015-01-03 22:12:32 +02:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA )
dqm - > sdma_queue_count + + ;
2014-07-17 01:27:00 +03:00
2015-01-18 13:18:01 +02:00
/*
* Unconditionally increment this counter , regardless of the queue ' s
* type or whether the queue is active .
*/
dqm - > total_queue_count + + ;
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2017-08-15 23:00:07 -04:00
out_unlock :
2014-07-17 01:27:00 +03:00
mutex_unlock ( & dqm - > lock ) ;
2017-08-15 23:00:07 -04:00
return retval ;
2014-07-17 01:27:00 +03:00
}
static int allocate_hqd ( struct device_queue_manager * dqm , struct queue * q )
{
bool set ;
2015-01-13 11:18:06 +02:00
int pipe , bit , i ;
2014-07-17 01:27:00 +03:00
set = false ;
2017-08-15 23:00:04 -04:00
for ( pipe = dqm - > next_pipe_to_allocate , i = 0 ;
i < get_pipes_per_mec ( dqm ) ;
2017-02-03 16:28:48 -05:00
pipe = ( ( pipe + 1 ) % get_pipes_per_mec ( dqm ) ) , + + i ) {
if ( ! is_pipe_enabled ( dqm , 0 , pipe ) )
continue ;
2014-07-17 01:27:00 +03:00
if ( dqm - > allocated_queues [ pipe ] ! = 0 ) {
bit = find_first_bit (
( unsigned long * ) & dqm - > allocated_queues [ pipe ] ,
2017-02-03 16:28:48 -05:00
get_queues_per_pipe ( dqm ) ) ;
2014-07-17 01:27:00 +03:00
clear_bit ( bit ,
( unsigned long * ) & dqm - > allocated_queues [ pipe ] ) ;
q - > pipe = pipe ;
q - > queue = bit ;
set = true ;
break ;
}
}
2016-05-01 00:06:27 +10:00
if ( ! set )
2014-07-17 01:27:00 +03:00
return - EBUSY ;
2017-08-15 23:00:05 -04:00
pr_debug ( " hqd slot - pipe %d, queue %d \n " , q - > pipe , q - > queue ) ;
2014-07-17 01:27:00 +03:00
/* horizontal hqd allocation */
2017-02-03 16:28:48 -05:00
dqm - > next_pipe_to_allocate = ( pipe + 1 ) % get_pipes_per_mec ( dqm ) ;
2014-07-17 01:27:00 +03:00
return 0 ;
}
static inline void deallocate_hqd ( struct device_queue_manager * dqm ,
struct queue * q )
{
set_bit ( q - > queue , ( unsigned long * ) & dqm - > allocated_queues [ q - > pipe ] ) ;
}
static int create_compute_queue_nocpsch ( struct device_queue_manager * dqm ,
struct queue * q ,
struct qcm_process_device * qpd )
{
int retval ;
struct mqd_manager * mqd ;
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm , KFD_MQD_TYPE_COMPUTE ) ;
2017-08-15 23:00:06 -04:00
if ( ! mqd )
2014-07-17 01:27:00 +03:00
return - ENOMEM ;
retval = allocate_hqd ( dqm , q ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
return retval ;
retval = mqd - > init_mqd ( mqd , & q - > mqd , & q - > mqd_mem_obj ,
& q - > gart_mqd_addr , & q - > properties ) ;
2017-08-15 23:00:07 -04:00
if ( retval )
goto out_deallocate_hqd ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Loading mqd to hqd on pipe %d, queue %d \n " ,
q - > pipe , q - > queue ) ;
2015-01-04 21:46:44 +02:00
2017-08-15 23:00:20 -04:00
dqm - > dev - > kfd2kgd - > set_scratch_backing_va (
dqm - > dev - > kgd , qpd - > sh_hidden_private_base , qpd - > vmid ) ;
2017-08-15 23:00:17 -04:00
retval = mqd - > load_mqd ( mqd , q - > mqd , q - > pipe , q - > queue , & q - > properties ,
q - > process - > mm ) ;
2017-08-15 23:00:07 -04:00
if ( retval )
goto out_uninit_mqd ;
2015-01-04 21:46:44 +02:00
2014-07-17 01:27:00 +03:00
return 0 ;
2017-08-15 23:00:07 -04:00
out_uninit_mqd :
mqd - > uninit_mqd ( mqd , q - > mqd , q - > mqd_mem_obj ) ;
out_deallocate_hqd :
deallocate_hqd ( dqm , q ) ;
return retval ;
2014-07-17 01:27:00 +03:00
}
static int destroy_queue_nocpsch ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd ,
struct queue * q )
{
int retval ;
struct mqd_manager * mqd ;
retval = 0 ;
mutex_lock ( & dqm - > lock ) ;
2014-08-18 14:55:59 +03:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_COMPUTE ) {
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm , KFD_MQD_TYPE_COMPUTE ) ;
2014-08-18 14:55:59 +03:00
if ( mqd = = NULL ) {
retval = - ENOMEM ;
goto out ;
}
deallocate_hqd ( dqm , q ) ;
} else if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA ) {
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm , KFD_MQD_TYPE_SDMA ) ;
2014-08-18 14:55:59 +03:00
if ( mqd = = NULL ) {
retval = - ENOMEM ;
goto out ;
}
dqm - > sdma_queue_count - - ;
deallocate_sdma_queue ( dqm , q - > sdma_id ) ;
2015-01-22 11:40:06 +02:00
} else {
2017-08-15 23:00:05 -04:00
pr_debug ( " q->properties.type %d is invalid \n " ,
2015-01-22 11:40:06 +02:00
q - > properties . type ) ;
retval = - EINVAL ;
2014-07-17 01:27:00 +03:00
goto out ;
}
retval = mqd - > destroy_mqd ( mqd , q - > mqd ,
2014-08-18 14:55:59 +03:00
KFD_PREEMPT_TYPE_WAVEFRONT_RESET ,
2017-09-20 18:10:16 -04:00
KFD_UNMAP_LATENCY_MS ,
2014-07-17 01:27:00 +03:00
q - > pipe , q - > queue ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto out ;
mqd - > uninit_mqd ( mqd , q - > mqd , q - > mqd_mem_obj ) ;
list_del ( & q - > list ) ;
if ( list_empty ( & qpd - > queues_list ) )
deallocate_vmid ( dqm , qpd , q ) ;
2015-01-19 16:08:14 -06:00
if ( q - > properties . is_active )
dqm - > queue_count - - ;
2015-01-18 13:18:01 +02:00
/*
* Unconditionally decrement this counter , regardless of the queue ' s
* type
*/
dqm - > total_queue_count - - ;
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2014-07-17 01:27:00 +03:00
out :
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
static int update_queue ( struct device_queue_manager * dqm , struct queue * q )
{
int retval ;
struct mqd_manager * mqd ;
2014-12-07 22:27:24 +02:00
bool prev_active = false ;
2014-07-17 01:27:00 +03:00
mutex_lock ( & dqm - > lock ) ;
2015-01-22 13:42:28 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm ,
get_mqd_type_from_queue_type ( q - > properties . type ) ) ;
2017-08-15 23:00:06 -04:00
if ( ! mqd ) {
2017-08-15 23:00:07 -04:00
retval = - ENOMEM ;
goto out_unlock ;
2014-07-17 01:27:00 +03:00
}
2016-05-01 00:06:27 +10:00
if ( q - > properties . is_active )
2014-12-07 22:27:24 +02:00
prev_active = true ;
/*
*
* check active state vs . the previous state
* and modify counter accordingly
*/
retval = mqd - > update_mqd ( mqd , q - > mqd , & q - > properties ) ;
2016-05-01 00:06:27 +10:00
if ( ( q - > properties . is_active ) & & ( ! prev_active ) )
2014-07-17 01:27:00 +03:00
dqm - > queue_count + + ;
2017-08-15 23:00:06 -04:00
else if ( ! q - > properties . is_active & & prev_active )
2014-07-17 01:27:00 +03:00
dqm - > queue_count - - ;
if ( sched_policy ! = KFD_SCHED_POLICY_NO_HWS )
retval = execute_queues_cpsch ( dqm , false ) ;
2017-08-15 23:00:07 -04:00
out_unlock :
2014-07-17 01:27:00 +03:00
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
static struct mqd_manager * get_mqd_manager_nocpsch (
struct device_queue_manager * dqm , enum KFD_MQD_TYPE type )
{
struct mqd_manager * mqd ;
2017-08-15 23:00:12 -04:00
if ( WARN_ON ( type > = KFD_MQD_TYPE_MAX ) )
return NULL ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " mqd type %d \n " , type ) ;
2014-07-17 01:27:00 +03:00
mqd = dqm - > mqds [ type ] ;
if ( ! mqd ) {
mqd = mqd_manager_init ( type , dqm - > dev ) ;
2017-08-15 23:00:06 -04:00
if ( ! mqd )
2017-08-15 23:00:05 -04:00
pr_err ( " mqd manager is NULL " ) ;
2014-07-17 01:27:00 +03:00
dqm - > mqds [ type ] = mqd ;
}
return mqd ;
}
static int register_process_nocpsch ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd )
{
struct device_process_node * n ;
2015-01-12 14:28:46 +02:00
int retval ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:08 -04:00
n = kzalloc ( sizeof ( * n ) , GFP_KERNEL ) ;
2014-07-17 01:27:00 +03:00
if ( ! n )
return - ENOMEM ;
n - > qpd = qpd ;
mutex_lock ( & dqm - > lock ) ;
list_add ( & n - > list , & dqm - > queues ) ;
2015-01-12 14:28:46 +02:00
retval = dqm - > ops_asic_specific . register_process ( dqm , qpd ) ;
2014-07-17 01:27:00 +03:00
dqm - > processes_count + + ;
mutex_unlock ( & dqm - > lock ) ;
2015-01-12 14:28:46 +02:00
return retval ;
2014-07-17 01:27:00 +03:00
}
static int unregister_process_nocpsch ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd )
{
int retval ;
struct device_process_node * cur , * next ;
2015-04-14 14:13:18 +03:00
pr_debug ( " qpd->queues_list is %s \n " ,
list_empty ( & qpd - > queues_list ) ? " empty " : " not empty " ) ;
2014-07-17 01:27:00 +03:00
retval = 0 ;
mutex_lock ( & dqm - > lock ) ;
list_for_each_entry_safe ( cur , next , & dqm - > queues , list ) {
if ( qpd = = cur - > qpd ) {
list_del ( & cur - > list ) ;
2014-11-20 11:52:16 -06:00
kfree ( cur ) ;
2014-07-17 01:27:00 +03:00
dqm - > processes_count - - ;
goto out ;
}
}
/* qpd not found in dqm list */
retval = 1 ;
out :
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
static int
set_pasid_vmid_mapping ( struct device_queue_manager * dqm , unsigned int pasid ,
unsigned int vmid )
{
uint32_t pasid_mapping ;
2015-03-17 19:32:53 +08:00
pasid_mapping = ( pasid = = 0 ) ? 0 :
( uint32_t ) pasid |
ATC_VMID_PASID_MAPPING_VALID ;
return dqm - > dev - > kfd2kgd - > set_pasid_vmid_mapping (
dqm - > dev - > kgd , pasid_mapping ,
2014-07-17 01:27:00 +03:00
vmid ) ;
}
2014-07-17 01:37:30 +03:00
static void init_interrupts ( struct device_queue_manager * dqm )
{
unsigned int i ;
2017-02-03 16:28:48 -05:00
for ( i = 0 ; i < get_pipes_per_mec ( dqm ) ; i + + )
if ( is_pipe_enabled ( dqm , 0 , i ) )
dqm - > dev - > kfd2kgd - > init_interrupts ( dqm - > dev - > kgd , i ) ;
2014-07-17 01:37:30 +03:00
}
2014-07-17 01:27:00 +03:00
static int initialize_nocpsch ( struct device_queue_manager * dqm )
{
2017-08-15 23:00:02 -04:00
int pipe , queue ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:05 -04:00
pr_debug ( " num of pipes: %d \n " , get_pipes_per_mec ( dqm ) ) ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:07 -04:00
dqm - > allocated_queues = kcalloc ( get_pipes_per_mec ( dqm ) ,
sizeof ( unsigned int ) , GFP_KERNEL ) ;
if ( ! dqm - > allocated_queues )
return - ENOMEM ;
2014-07-17 01:27:00 +03:00
mutex_init ( & dqm - > lock ) ;
INIT_LIST_HEAD ( & dqm - > queues ) ;
dqm - > queue_count = dqm - > next_pipe_to_allocate = 0 ;
2015-01-03 22:12:32 +02:00
dqm - > sdma_queue_count = 0 ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:02 -04:00
for ( pipe = 0 ; pipe < get_pipes_per_mec ( dqm ) ; pipe + + ) {
int pipe_offset = pipe * get_queues_per_pipe ( dqm ) ;
for ( queue = 0 ; queue < get_queues_per_pipe ( dqm ) ; queue + + )
if ( test_bit ( pipe_offset + queue ,
dqm - > dev - > shared_resources . queue_bitmap ) )
dqm - > allocated_queues [ pipe ] | = 1 < < queue ;
}
2014-07-17 01:27:00 +03:00
2017-09-20 18:10:18 -04:00
dqm - > vmid_bitmap = ( 1 < < dqm - > dev - > vm_info . vmid_num_kfd ) - 1 ;
2015-01-03 22:12:32 +02:00
dqm - > sdma_bitmap = ( 1 < < CIK_SDMA_QUEUES ) - 1 ;
2014-07-17 01:27:00 +03:00
return 0 ;
}
static void uninitialize_nocpsch ( struct device_queue_manager * dqm )
{
2014-11-25 15:16:38 +02:00
int i ;
2017-08-15 23:00:12 -04:00
WARN_ON ( dqm - > queue_count > 0 | | dqm - > processes_count > 0 ) ;
2014-07-17 01:27:00 +03:00
kfree ( dqm - > allocated_queues ) ;
2014-11-25 15:16:38 +02:00
for ( i = 0 ; i < KFD_MQD_TYPE_MAX ; i + + )
kfree ( dqm - > mqds [ i ] ) ;
2014-07-17 01:27:00 +03:00
mutex_destroy ( & dqm - > lock ) ;
2014-10-26 22:00:31 +02:00
kfd_gtt_sa_free ( dqm - > dev , dqm - > pipeline_mem ) ;
2014-07-17 01:27:00 +03:00
}
static int start_nocpsch ( struct device_queue_manager * dqm )
{
2014-07-17 01:37:30 +03:00
init_interrupts ( dqm ) ;
2014-07-17 01:27:00 +03:00
return 0 ;
}
static int stop_nocpsch ( struct device_queue_manager * dqm )
{
return 0 ;
}
2015-01-03 22:12:32 +02:00
static int allocate_sdma_queue ( struct device_queue_manager * dqm ,
unsigned int * sdma_queue_id )
{
int bit ;
if ( dqm - > sdma_bitmap = = 0 )
return - ENOMEM ;
bit = find_first_bit ( ( unsigned long * ) & dqm - > sdma_bitmap ,
CIK_SDMA_QUEUES ) ;
clear_bit ( bit , ( unsigned long * ) & dqm - > sdma_bitmap ) ;
* sdma_queue_id = bit ;
return 0 ;
}
static void deallocate_sdma_queue ( struct device_queue_manager * dqm ,
unsigned int sdma_queue_id )
{
2015-01-22 11:09:27 +02:00
if ( sdma_queue_id > = CIK_SDMA_QUEUES )
2015-01-03 22:12:32 +02:00
return ;
set_bit ( sdma_queue_id , ( unsigned long * ) & dqm - > sdma_bitmap ) ;
}
static int create_sdma_queue_nocpsch ( struct device_queue_manager * dqm ,
struct queue * q ,
struct qcm_process_device * qpd )
{
struct mqd_manager * mqd ;
int retval ;
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm , KFD_MQD_TYPE_SDMA ) ;
2015-01-03 22:12:32 +02:00
if ( ! mqd )
return - ENOMEM ;
retval = allocate_sdma_queue ( dqm , & q - > sdma_id ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2015-01-03 22:12:32 +02:00
return retval ;
q - > properties . sdma_queue_id = q - > sdma_id % CIK_SDMA_QUEUES_PER_ENGINE ;
q - > properties . sdma_engine_id = q - > sdma_id / CIK_SDMA_ENGINE_NUM ;
2017-08-15 23:00:05 -04:00
pr_debug ( " SDMA id is: %d \n " , q - > sdma_id ) ;
pr_debug ( " SDMA queue id: %d \n " , q - > properties . sdma_queue_id ) ;
pr_debug ( " SDMA engine id: %d \n " , q - > properties . sdma_engine_id ) ;
2015-01-03 22:12:32 +02:00
2015-05-05 11:51:39 +03:00
dqm - > ops_asic_specific . init_sdma_vm ( dqm , q , qpd ) ;
2015-01-03 22:12:32 +02:00
retval = mqd - > init_mqd ( mqd , & q - > mqd , & q - > mqd_mem_obj ,
& q - > gart_mqd_addr , & q - > properties ) ;
2017-08-15 23:00:07 -04:00
if ( retval )
goto out_deallocate_sdma_queue ;
2015-01-03 22:12:32 +02:00
2017-08-15 23:00:17 -04:00
retval = mqd - > load_mqd ( mqd , q - > mqd , 0 , 0 , & q - > properties , NULL ) ;
2017-08-15 23:00:07 -04:00
if ( retval )
goto out_uninit_mqd ;
2015-03-10 14:02:25 +02:00
2015-01-03 22:12:32 +02:00
return 0 ;
2017-08-15 23:00:07 -04:00
out_uninit_mqd :
mqd - > uninit_mqd ( mqd , q - > mqd , q - > mqd_mem_obj ) ;
out_deallocate_sdma_queue :
deallocate_sdma_queue ( dqm , q - > sdma_id ) ;
return retval ;
2015-01-03 22:12:32 +02:00
}
2014-07-17 01:27:00 +03:00
/*
* Device Queue Manager implementation for cp scheduler
*/
static int set_sched_resources ( struct device_queue_manager * dqm )
{
2017-02-03 16:28:48 -05:00
int i , mec ;
2014-07-17 01:27:00 +03:00
struct scheduling_resources res ;
2017-09-20 18:10:18 -04:00
res . vmid_mask = dqm - > dev - > shared_resources . compute_vmid_bitmap ;
2017-02-03 16:28:48 -05:00
res . queue_mask = 0 ;
for ( i = 0 ; i < KGD_MAX_QUEUES ; + + i ) {
mec = ( i / dqm - > dev - > shared_resources . num_queue_per_pipe )
/ dqm - > dev - > shared_resources . num_pipe_per_mec ;
if ( ! test_bit ( i , dqm - > dev - > shared_resources . queue_bitmap ) )
continue ;
/* only acquire queues from the first MEC */
if ( mec > 0 )
continue ;
/* This situation may be hit in the future if a new HW
* generation exposes more than 64 queues . If so , the
2017-08-15 23:00:04 -04:00
* definition of res . queue_mask needs updating
*/
2017-07-11 22:53:29 +03:00
if ( WARN_ON ( i > = ( sizeof ( res . queue_mask ) * 8 ) ) ) {
2017-02-03 16:28:48 -05:00
pr_err ( " Invalid queue enabled by amdgpu: %d \n " , i ) ;
break ;
}
res . queue_mask | = ( 1ull < < i ) ;
}
2014-07-17 01:27:00 +03:00
res . gws_mask = res . oac_mask = res . gds_heap_base =
res . gds_heap_size = 0 ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Scheduling resources: \n "
" vmid mask: 0x%8X \n "
" queue mask: 0x%8llX \n " ,
2014-07-17 01:27:00 +03:00
res . vmid_mask , res . queue_mask ) ;
return pm_send_set_resources ( & dqm - > packets , & res ) ;
}
static int initialize_cpsch ( struct device_queue_manager * dqm )
{
int retval ;
2017-08-15 23:00:05 -04:00
pr_debug ( " num of pipes: %d \n " , get_pipes_per_mec ( dqm ) ) ;
2014-07-17 01:27:00 +03:00
mutex_init ( & dqm - > lock ) ;
INIT_LIST_HEAD ( & dqm - > queues ) ;
dqm - > queue_count = dqm - > processes_count = 0 ;
2015-01-03 22:12:32 +02:00
dqm - > sdma_queue_count = 0 ;
2014-07-17 01:27:00 +03:00
dqm - > active_runlist = false ;
2015-01-12 14:28:46 +02:00
retval = dqm - > ops_asic_specific . initialize ( dqm ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2017-08-15 23:00:07 -04:00
mutex_destroy ( & dqm - > lock ) ;
2014-07-17 01:27:00 +03:00
return retval ;
}
static int start_cpsch ( struct device_queue_manager * dqm )
{
int retval ;
retval = 0 ;
retval = pm_init ( & dqm - > packets , dqm ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto fail_packet_manager_init ;
retval = set_sched_resources ( dqm ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto fail_set_sched_resources ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Allocating fence memory \n " ) ;
2014-07-17 01:27:00 +03:00
/* allocate fence memory on the gart */
2014-10-26 22:00:31 +02:00
retval = kfd_gtt_sa_allocate ( dqm - > dev , sizeof ( * dqm - > fence_addr ) ,
& dqm - > fence_mem ) ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto fail_allocate_vidmem ;
dqm - > fence_addr = dqm - > fence_mem - > cpu_ptr ;
dqm - > fence_gpu_addr = dqm - > fence_mem - > gpu_addr ;
2014-07-17 01:37:30 +03:00
init_interrupts ( dqm ) ;
2014-07-17 01:27:00 +03:00
execute_queues_cpsch ( dqm , true ) ;
return 0 ;
fail_allocate_vidmem :
fail_set_sched_resources :
pm_uninit ( & dqm - > packets ) ;
fail_packet_manager_init :
return retval ;
}
static int stop_cpsch ( struct device_queue_manager * dqm )
{
2015-05-20 13:43:04 +03:00
destroy_queues_cpsch ( dqm , true , true ) ;
2014-07-17 01:27:00 +03:00
2014-10-26 22:00:31 +02:00
kfd_gtt_sa_free ( dqm - > dev , dqm - > fence_mem ) ;
2014-07-17 01:27:00 +03:00
pm_uninit ( & dqm - > packets ) ;
return 0 ;
}
static int create_kernel_queue_cpsch ( struct device_queue_manager * dqm ,
struct kernel_queue * kq ,
struct qcm_process_device * qpd )
{
mutex_lock ( & dqm - > lock ) ;
2015-01-18 13:18:01 +02:00
if ( dqm - > total_queue_count > = max_num_of_queues_per_device ) {
2017-08-15 23:00:05 -04:00
pr_warn ( " Can't create new kernel queue because %d queues were already created \n " ,
2015-01-18 13:18:01 +02:00
dqm - > total_queue_count ) ;
mutex_unlock ( & dqm - > lock ) ;
return - EPERM ;
}
/*
* Unconditionally increment this counter , regardless of the queue ' s
* type or whether the queue is active .
*/
dqm - > total_queue_count + + ;
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2014-07-17 01:27:00 +03:00
list_add ( & kq - > list , & qpd - > priv_queue_list ) ;
dqm - > queue_count + + ;
qpd - > is_debug = true ;
execute_queues_cpsch ( dqm , false ) ;
mutex_unlock ( & dqm - > lock ) ;
return 0 ;
}
static void destroy_kernel_queue_cpsch ( struct device_queue_manager * dqm ,
struct kernel_queue * kq ,
struct qcm_process_device * qpd )
{
mutex_lock ( & dqm - > lock ) ;
2015-05-20 13:43:04 +03:00
/* here we actually preempt the DIQ */
destroy_queues_cpsch ( dqm , true , false ) ;
2014-07-17 01:27:00 +03:00
list_del ( & kq - > list ) ;
dqm - > queue_count - - ;
qpd - > is_debug = false ;
execute_queues_cpsch ( dqm , false ) ;
2015-01-18 13:18:01 +02:00
/*
* Unconditionally decrement this counter , regardless of the queue ' s
* type .
*/
2015-01-29 10:32:25 +02:00
dqm - > total_queue_count - - ;
2015-01-18 13:18:01 +02:00
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2014-07-17 01:27:00 +03:00
mutex_unlock ( & dqm - > lock ) ;
}
2015-01-03 22:12:32 +02:00
static void select_sdma_engine_id ( struct queue * q )
{
static int sdma_id ;
q - > sdma_id = sdma_id ;
sdma_id = ( sdma_id + 1 ) % 2 ;
}
2014-07-17 01:27:00 +03:00
static int create_queue_cpsch ( struct device_queue_manager * dqm , struct queue * q ,
struct qcm_process_device * qpd , int * allocate_vmid )
{
int retval ;
struct mqd_manager * mqd ;
retval = 0 ;
if ( allocate_vmid )
* allocate_vmid = 0 ;
mutex_lock ( & dqm - > lock ) ;
2015-01-18 13:18:01 +02:00
if ( dqm - > total_queue_count > = max_num_of_queues_per_device ) {
2017-08-15 23:00:05 -04:00
pr_warn ( " Can't create new usermode queue because %d queues were already created \n " ,
2015-01-18 13:18:01 +02:00
dqm - > total_queue_count ) ;
retval = - EPERM ;
goto out ;
}
2015-01-03 22:12:32 +02:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA )
select_sdma_engine_id ( q ) ;
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm ,
2015-01-03 22:12:32 +02:00
get_mqd_type_from_queue_type ( q - > properties . type ) ) ;
2017-08-15 23:00:06 -04:00
if ( ! mqd ) {
2017-08-15 23:00:07 -04:00
retval = - ENOMEM ;
goto out ;
2014-07-17 01:27:00 +03:00
}
2015-05-20 16:23:53 +10:00
dqm - > ops_asic_specific . init_sdma_vm ( dqm , q , qpd ) ;
2014-07-17 01:27:00 +03:00
retval = mqd - > init_mqd ( mqd , & q - > mqd , & q - > mqd_mem_obj ,
& q - > gart_mqd_addr , & q - > properties ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto out ;
list_add ( & q - > list , & qpd - > queues_list ) ;
if ( q - > properties . is_active ) {
dqm - > queue_count + + ;
retval = execute_queues_cpsch ( dqm , false ) ;
}
2015-01-03 22:12:32 +02:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA )
2017-08-15 23:00:04 -04:00
dqm - > sdma_queue_count + + ;
2015-01-18 13:18:01 +02:00
/*
* Unconditionally increment this counter , regardless of the queue ' s
* type or whether the queue is active .
*/
dqm - > total_queue_count + + ;
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2014-07-17 01:27:00 +03:00
out :
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
2015-05-20 13:58:12 +03:00
int amdkfd_fence_wait_timeout ( unsigned int * fence_addr ,
2014-11-20 15:54:05 +02:00
unsigned int fence_value ,
2017-09-20 18:10:15 -04:00
unsigned int timeout_ms )
2014-07-17 01:27:00 +03:00
{
2017-09-20 18:10:15 -04:00
unsigned long end_jiffies = msecs_to_jiffies ( timeout_ms ) + jiffies ;
2014-07-17 01:27:00 +03:00
while ( * fence_addr ! = fence_value ) {
2017-09-20 18:10:15 -04:00
if ( time_after ( jiffies , end_jiffies ) ) {
2017-08-15 23:00:05 -04:00
pr_err ( " qcm fence wait loop timeout expired \n " ) ;
2014-07-17 01:27:00 +03:00
return - ETIME ;
}
2015-01-15 12:01:10 +02:00
schedule ( ) ;
2014-07-17 01:27:00 +03:00
}
return 0 ;
}
2015-01-03 22:12:32 +02:00
static int destroy_sdma_queues ( struct device_queue_manager * dqm ,
unsigned int sdma_engine )
{
return pm_send_unmap_queue ( & dqm - > packets , KFD_QUEUE_TYPE_SDMA ,
2015-05-20 13:43:04 +03:00
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES , 0 , false ,
2015-01-03 22:12:32 +02:00
sdma_engine ) ;
}
2015-05-20 13:43:04 +03:00
static int destroy_queues_cpsch ( struct device_queue_manager * dqm ,
bool preempt_static_queues , bool lock )
2014-07-17 01:27:00 +03:00
{
int retval ;
2015-05-20 13:43:04 +03:00
enum kfd_preempt_type_filter preempt_type ;
2015-03-25 13:12:20 +02:00
struct kfd_process_device * pdd ;
2014-07-17 01:27:00 +03:00
retval = 0 ;
if ( lock )
mutex_lock ( & dqm - > lock ) ;
2016-05-01 00:06:27 +10:00
if ( ! dqm - > active_runlist )
2014-07-17 01:27:00 +03:00
goto out ;
2015-01-03 22:12:32 +02:00
2017-08-15 23:00:05 -04:00
pr_debug ( " Before destroying queues, sdma queue count is : %u \n " ,
2015-01-03 22:12:32 +02:00
dqm - > sdma_queue_count ) ;
if ( dqm - > sdma_queue_count > 0 ) {
destroy_sdma_queues ( dqm , 0 ) ;
destroy_sdma_queues ( dqm , 1 ) ;
}
2015-05-20 13:43:04 +03:00
preempt_type = preempt_static_queues ?
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES ;
2014-07-17 01:27:00 +03:00
retval = pm_send_unmap_queue ( & dqm - > packets , KFD_QUEUE_TYPE_COMPUTE ,
2015-05-20 13:43:04 +03:00
preempt_type , 0 , false , 0 ) ;
2017-08-15 23:00:06 -04:00
if ( retval )
2014-07-17 01:27:00 +03:00
goto out ;
* dqm - > fence_addr = KFD_FENCE_INIT ;
pm_send_query_status ( & dqm - > packets , dqm - > fence_gpu_addr ,
KFD_FENCE_COMPLETED ) ;
/* should be timed out */
2015-05-20 18:05:44 +03:00
retval = amdkfd_fence_wait_timeout ( dqm - > fence_addr , KFD_FENCE_COMPLETED ,
2014-07-17 01:27:00 +03:00
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS ) ;
2017-08-15 23:00:06 -04:00
if ( retval ) {
2015-03-25 13:12:20 +02:00
pdd = kfd_get_process_device_data ( dqm - > dev ,
kfd_get_process ( current ) ) ;
pdd - > reset_wavefronts = true ;
2015-05-20 18:05:44 +03:00
goto out ;
}
2014-07-17 01:27:00 +03:00
pm_release_ib ( & dqm - > packets ) ;
dqm - > active_runlist = false ;
out :
if ( lock )
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
static int execute_queues_cpsch ( struct device_queue_manager * dqm , bool lock )
{
int retval ;
if ( lock )
mutex_lock ( & dqm - > lock ) ;
2015-05-20 13:43:04 +03:00
retval = destroy_queues_cpsch ( dqm , false , false ) ;
2017-08-15 23:00:06 -04:00
if ( retval ) {
2017-08-15 23:00:05 -04:00
pr_err ( " The cp might be in an unrecoverable state due to an unsuccessful queues preemption " ) ;
2014-07-17 01:27:00 +03:00
goto out ;
}
if ( dqm - > queue_count < = 0 | | dqm - > processes_count < = 0 ) {
retval = 0 ;
goto out ;
}
if ( dqm - > active_runlist ) {
retval = 0 ;
goto out ;
}
retval = pm_send_runlist ( & dqm - > packets , & dqm - > queues ) ;
2017-08-15 23:00:06 -04:00
if ( retval ) {
2017-08-15 23:00:05 -04:00
pr_err ( " failed to execute runlist " ) ;
2014-07-17 01:27:00 +03:00
goto out ;
}
dqm - > active_runlist = true ;
out :
if ( lock )
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
static int destroy_queue_cpsch ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd ,
struct queue * q )
{
int retval ;
struct mqd_manager * mqd ;
2015-05-20 13:43:04 +03:00
bool preempt_all_queues ;
2014-07-17 01:27:00 +03:00
2015-05-20 13:43:04 +03:00
preempt_all_queues = false ;
2014-07-17 01:27:00 +03:00
retval = 0 ;
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock ( & dqm - > lock ) ;
2015-05-20 13:43:04 +03:00
if ( qpd - > is_debug ) {
/*
* error , currently we do not allow to destroy a queue
* of a currently debugged process
*/
retval = - EBUSY ;
goto failed_try_destroy_debugged_queue ;
}
2015-01-12 14:26:10 +02:00
mqd = dqm - > ops . get_mqd_manager ( dqm ,
2015-01-03 22:12:32 +02:00
get_mqd_type_from_queue_type ( q - > properties . type ) ) ;
2014-07-17 01:27:00 +03:00
if ( ! mqd ) {
retval = - ENOMEM ;
goto failed ;
}
2015-01-03 22:12:32 +02:00
if ( q - > properties . type = = KFD_QUEUE_TYPE_SDMA )
dqm - > sdma_queue_count - - ;
2014-07-17 01:27:00 +03:00
list_del ( & q - > list ) ;
2015-01-19 16:08:14 -06:00
if ( q - > properties . is_active )
dqm - > queue_count - - ;
2014-07-17 01:27:00 +03:00
execute_queues_cpsch ( dqm , false ) ;
mqd - > uninit_mqd ( mqd , q - > mqd , q - > mqd_mem_obj ) ;
2015-01-18 13:18:01 +02:00
/*
* Unconditionally decrement this counter , regardless of the queue ' s
* type
*/
dqm - > total_queue_count - - ;
pr_debug ( " Total of %d queues are accountable so far \n " ,
dqm - > total_queue_count ) ;
2014-07-17 01:27:00 +03:00
mutex_unlock ( & dqm - > lock ) ;
return 0 ;
failed :
2015-05-20 13:43:04 +03:00
failed_try_destroy_debugged_queue :
2014-07-17 01:27:00 +03:00
mutex_unlock ( & dqm - > lock ) ;
return retval ;
}
/*
* Low bits must be 0000 / FFFF as required by HW , high bits must be 0 to
* stay in user mode .
*/
# define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
/* APE1 limit is inclusive and 64K aligned. */
# define APE1_LIMIT_ALIGNMENT 0xFFFF
static bool set_cache_memory_policy ( struct device_queue_manager * dqm ,
struct qcm_process_device * qpd ,
enum cache_policy default_policy ,
enum cache_policy alternate_policy ,
void __user * alternate_aperture_base ,
uint64_t alternate_aperture_size )
{
2015-01-12 14:28:46 +02:00
bool retval ;
2014-07-17 01:27:00 +03:00
mutex_lock ( & dqm - > lock ) ;
if ( alternate_aperture_size = = 0 ) {
/* base > limit disables APE1 */
qpd - > sh_mem_ape1_base = 1 ;
qpd - > sh_mem_ape1_limit = 0 ;
} else {
/*
* In FSA64 , APE1_Base [ 63 : 0 ] = { 16 { SH_MEM_APE1_BASE [ 31 ] } ,
* SH_MEM_APE1_BASE [ 31 : 0 ] , 0x0000 }
* APE1_Limit [ 63 : 0 ] = { 16 { SH_MEM_APE1_LIMIT [ 31 ] } ,
* SH_MEM_APE1_LIMIT [ 31 : 0 ] , 0xFFFF }
* Verify that the base and size parameters can be
* represented in this format and convert them .
* Additionally restrict APE1 to user - mode addresses .
*/
uint64_t base = ( uintptr_t ) alternate_aperture_base ;
uint64_t limit = base + alternate_aperture_size - 1 ;
2017-08-15 23:00:07 -04:00
if ( limit < = base | | ( base & APE1_FIXED_BITS_MASK ) ! = 0 | |
( limit & APE1_FIXED_BITS_MASK ) ! = APE1_LIMIT_ALIGNMENT ) {
retval = false ;
2014-07-17 01:27:00 +03:00
goto out ;
2017-08-15 23:00:07 -04:00
}
2014-07-17 01:27:00 +03:00
qpd - > sh_mem_ape1_base = base > > 16 ;
qpd - > sh_mem_ape1_limit = limit > > 16 ;
}
2015-01-12 14:28:46 +02:00
retval = dqm - > ops_asic_specific . set_cache_memory_policy (
dqm ,
qpd ,
default_policy ,
alternate_policy ,
alternate_aperture_base ,
alternate_aperture_size ) ;
2014-07-17 01:27:00 +03:00
if ( ( sched_policy = = KFD_SCHED_POLICY_NO_HWS ) & & ( qpd - > vmid ! = 0 ) )
program_sh_mem_settings ( dqm , qpd ) ;
2017-08-15 23:00:05 -04:00
pr_debug ( " sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x \n " ,
2014-07-17 01:27:00 +03:00
qpd - > sh_mem_config , qpd - > sh_mem_ape1_base ,
qpd - > sh_mem_ape1_limit ) ;
out :
mutex_unlock ( & dqm - > lock ) ;
2017-08-15 23:00:07 -04:00
return retval ;
2014-07-17 01:27:00 +03:00
}
struct device_queue_manager * device_queue_manager_init ( struct kfd_dev * dev )
{
struct device_queue_manager * dqm ;
2017-08-15 23:00:05 -04:00
pr_debug ( " Loading device queue manager \n " ) ;
2015-01-12 14:28:46 +02:00
2017-08-15 23:00:08 -04:00
dqm = kzalloc ( sizeof ( * dqm ) , GFP_KERNEL ) ;
2014-07-17 01:27:00 +03:00
if ( ! dqm )
return NULL ;
dqm - > dev = dev ;
switch ( sched_policy ) {
case KFD_SCHED_POLICY_HWS :
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION :
/* initialize dqm for cp scheduling */
2015-01-12 14:26:10 +02:00
dqm - > ops . create_queue = create_queue_cpsch ;
dqm - > ops . initialize = initialize_cpsch ;
dqm - > ops . start = start_cpsch ;
dqm - > ops . stop = stop_cpsch ;
dqm - > ops . destroy_queue = destroy_queue_cpsch ;
dqm - > ops . update_queue = update_queue ;
dqm - > ops . get_mqd_manager = get_mqd_manager_nocpsch ;
dqm - > ops . register_process = register_process_nocpsch ;
dqm - > ops . unregister_process = unregister_process_nocpsch ;
dqm - > ops . uninitialize = uninitialize_nocpsch ;
dqm - > ops . create_kernel_queue = create_kernel_queue_cpsch ;
dqm - > ops . destroy_kernel_queue = destroy_kernel_queue_cpsch ;
dqm - > ops . set_cache_memory_policy = set_cache_memory_policy ;
2014-07-17 01:27:00 +03:00
break ;
case KFD_SCHED_POLICY_NO_HWS :
/* initialize dqm for no cp scheduling */
2015-01-12 14:26:10 +02:00
dqm - > ops . start = start_nocpsch ;
dqm - > ops . stop = stop_nocpsch ;
dqm - > ops . create_queue = create_queue_nocpsch ;
dqm - > ops . destroy_queue = destroy_queue_nocpsch ;
dqm - > ops . update_queue = update_queue ;
dqm - > ops . get_mqd_manager = get_mqd_manager_nocpsch ;
dqm - > ops . register_process = register_process_nocpsch ;
dqm - > ops . unregister_process = unregister_process_nocpsch ;
dqm - > ops . initialize = initialize_nocpsch ;
dqm - > ops . uninitialize = uninitialize_nocpsch ;
dqm - > ops . set_cache_memory_policy = set_cache_memory_policy ;
2014-07-17 01:27:00 +03:00
break ;
default :
2017-08-15 23:00:12 -04:00
pr_err ( " Invalid scheduling policy %d \n " , sched_policy ) ;
goto out_free ;
2014-07-17 01:27:00 +03:00
}
2015-01-12 14:28:46 +02:00
switch ( dev - > device_info - > asic_family ) {
case CHIP_CARRIZO :
device_queue_manager_init_vi ( & dqm - > ops_asic_specific ) ;
2015-01-22 11:15:51 +02:00
break ;
2015-01-12 14:28:46 +02:00
case CHIP_KAVERI :
device_queue_manager_init_cik ( & dqm - > ops_asic_specific ) ;
2015-01-22 11:15:51 +02:00
break ;
2017-09-20 18:10:19 -04:00
default :
WARN ( 1 , " Unexpected ASIC family %u " ,
dev - > device_info - > asic_family ) ;
goto out_free ;
2015-01-12 14:28:46 +02:00
}
2017-08-15 23:00:12 -04:00
if ( ! dqm - > ops . initialize ( dqm ) )
return dqm ;
2014-07-17 01:27:00 +03:00
2017-08-15 23:00:12 -04:00
out_free :
kfree ( dqm ) ;
return NULL ;
2014-07-17 01:27:00 +03:00
}
void device_queue_manager_uninit ( struct device_queue_manager * dqm )
{
2015-01-12 14:26:10 +02:00
dqm - > ops . uninitialize ( dqm ) ;
2014-07-17 01:27:00 +03:00
kfree ( dqm ) ;
}