2014-07-16 21:08:55 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# ifndef KFD_PRIV_H_INCLUDED
# define KFD_PRIV_H_INCLUDED
# include <linux/hashtable.h>
# include <linux/mmu_notifier.h>
# include <linux/mutex.h>
# include <linux/types.h>
# include <linux/atomic.h>
# include <linux/workqueue.h>
# include <linux/spinlock.h>
2014-07-16 23:25:31 +03:00
# include <linux/kfd_ioctl.h>
2017-10-27 19:35:27 -04:00
# include <linux/idr.h>
2017-10-27 19:35:31 -04:00
# include <linux/kfifo.h>
2017-11-27 18:29:49 -05:00
# include <linux/seq_file.h>
2017-11-27 18:29:51 -05:00
# include <linux/kref.h>
2014-07-16 21:08:55 +03:00
# include <kgd_kfd_interface.h>
2017-09-20 18:10:19 -04:00
# include "amd_shared.h"
2018-04-13 14:24:12 -07:00
# define KFD_MAX_RING_ENTRY_SIZE 8
2014-07-16 21:22:32 +03:00
# define KFD_SYSFS_FILE_MODE 0444
2018-04-10 17:33:04 -04:00
/* GPU ID hash width in bits */
# define KFD_GPU_ID_HASH_WIDTH 16
/* Use upper bits of mmap offset to store KFD driver specific information.
* BITS [ 63 : 62 ] - Encode MMAP type
* BITS [ 61 : 46 ] - Encode gpu_id . To identify to which GPU the offset belongs to
* BITS [ 45 : 0 ] - MMAP offset value
*
* NOTE : struct vm_area_struct . vm_pgoff uses offset in pages . Hence , these
* defines are w . r . t to PAGE_SIZE
*/
# define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
# define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
# define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
# define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
# define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
# define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
# define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
< < KFD_MMAP_GPU_ID_SHIFT )
# define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
& KFD_MMAP_GPU_ID_MASK )
# define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
> > KFD_MMAP_GPU_ID_SHIFT )
# define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
# define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
2015-05-10 12:15:46 +03:00
2014-07-17 00:45:35 +03:00
/*
* When working with cp scheduler we should assign the HIQ manually or via
2018-07-11 22:33:08 -04:00
* the amdgpu driver to a fixed hqd slot , here are the fixed HIQ hqd slot
2014-07-17 00:45:35 +03:00
* definitions for Kaveri . In Kaveri only the first ME queues participates
* in the cp scheduling taking that in mind we set the HIQ slot in the
* second ME .
*/
# define KFD_CIK_HIQ_PIPE 4
# define KFD_CIK_HIQ_QUEUE 0
2014-07-16 21:22:32 +03:00
/* Macro for allocating structures */
# define kfd_alloc_struct(ptr_to_struct) \
( ( typeof ( ptr_to_struct ) ) kzalloc ( sizeof ( * ptr_to_struct ) , GFP_KERNEL ) )
2014-07-16 23:25:31 +03:00
# define KFD_MAX_NUM_OF_PROCESSES 512
2015-01-18 13:18:01 +02:00
# define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
2014-07-16 23:25:31 +03:00
2017-11-14 16:41:19 -05:00
/*
* Size of the per - process TBA + TMA buffer : 2 pages
*
* The first page is the TBA used for the CWSR ISA code . The second
* page is used as TMA for daisy changing a user - mode trap handler .
*/
# define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
# define KFD_CWSR_TMA_OFFSET PAGE_SIZE
2014-07-16 23:25:31 +03:00
/*
2015-01-18 13:18:01 +02:00
* Kernel module parameter to specify maximum number of supported queues per
* device
2014-07-16 23:25:31 +03:00
*/
2015-01-18 13:18:01 +02:00
extern int max_num_of_queues_per_device ;
2014-07-16 23:25:31 +03:00
2015-01-18 13:18:01 +02:00
# define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
( KFD_MAX_NUM_OF_PROCESSES * \
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS )
2014-07-16 23:25:31 +03:00
2014-07-17 00:45:35 +03:00
# define KFD_KERNEL_QUEUE_SIZE 2048
2014-07-17 00:48:28 +03:00
/* Kernel module parameter to specify the scheduling policy */
extern int sched_policy ;
2017-11-27 18:29:45 -05:00
/*
* Kernel module parameter to specify the maximum process
* number per HW scheduler
*/
extern int hws_max_conc_proc ;
2017-11-14 16:41:19 -05:00
extern int cwsr_enable ;
2014-12-24 13:30:52 +02:00
/*
* Kernel module parameter to specify whether to send sigterm to HSA process on
* unhandled exception
*/
extern int send_sigterm ;
2018-03-15 17:27:53 -04:00
/*
* This kernel module is used to simulate large bar machine on non - large bar
* enabled machines .
*/
extern int debug_largebar ;
2017-12-08 23:09:03 -05:00
/*
* Ignore CRAT table during KFD initialization , can be used to work around
* broken CRAT tables on some AMD systems
*/
extern int ignore_crat ;
2018-04-10 17:33:09 -04:00
/*
* Set sh_mem_config . retry_disable on Vega10
*/
2018-07-11 22:33:06 -04:00
extern int noretry ;
2018-04-10 17:33:09 -04:00
2018-07-11 22:33:05 -04:00
/*
* Halt if HWS hang is detected
*/
extern int halt_if_hws_hang ;
2014-07-17 00:45:35 +03:00
enum cache_policy {
cache_policy_coherent ,
cache_policy_noncoherent
} ;
2018-04-10 17:33:05 -04:00
# define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
2015-05-10 12:15:46 +03:00
struct kfd_event_interrupt_class {
bool ( * interrupt_isr ) ( struct kfd_dev * dev ,
2018-07-11 22:32:51 -04:00
const uint32_t * ih_ring_entry , uint32_t * patched_ihre ,
bool * patched_flag ) ;
2015-05-10 12:15:46 +03:00
void ( * interrupt_wq ) ( struct kfd_dev * dev ,
2018-07-11 22:32:51 -04:00
const uint32_t * ih_ring_entry ) ;
2015-05-10 12:15:46 +03:00
} ;
2014-07-16 21:08:55 +03:00
struct kfd_device_info {
2017-09-20 18:10:19 -04:00
enum amd_asic_type asic_family ;
2015-05-10 12:15:46 +03:00
const struct kfd_event_interrupt_class * event_interrupt_class ;
2014-07-16 21:08:55 +03:00
unsigned int max_pasid_bits ;
2015-05-20 13:43:04 +03:00
unsigned int max_no_of_hqd ;
2018-04-10 17:33:03 -04:00
unsigned int doorbell_size ;
2014-07-16 21:08:55 +03:00
size_t ih_ring_entry_size ;
2014-10-13 16:35:12 +03:00
uint8_t num_of_watch_points ;
2014-07-16 23:25:31 +03:00
uint16_t mqd_size_aligned ;
2017-11-14 16:41:19 -05:00
bool supports_cwsr ;
2017-12-08 19:22:12 -05:00
bool needs_iommu_device ;
2018-01-04 17:17:41 -05:00
bool needs_pci_atomics ;
2018-07-13 16:17:44 -04:00
unsigned int num_sdma_engines ;
2018-02-09 16:29:14 -05:00
unsigned int num_sdma_queues_per_engine ;
2014-07-16 21:08:55 +03:00
} ;
2014-10-26 09:53:10 +02:00
struct kfd_mem_obj {
uint32_t range_start ;
uint32_t range_end ;
uint64_t gpu_addr ;
uint32_t * cpu_ptr ;
2018-04-10 17:33:08 -04:00
void * gtt_mem ;
2014-10-26 09:53:10 +02:00
} ;
2017-09-20 18:10:18 -04:00
struct kfd_vmid_info {
uint32_t first_vmid_kfd ;
uint32_t last_vmid_kfd ;
uint32_t vmid_num_kfd ;
} ;
2014-07-16 21:08:55 +03:00
struct kfd_dev {
struct kgd_dev * kgd ;
const struct kfd_device_info * device_info ;
struct pci_dev * pdev ;
unsigned int id ; /* topology stub index */
2014-07-16 23:25:31 +03:00
phys_addr_t doorbell_base ; /* Start of actual doorbells used by
* KFD . It is aligned for mapping
* into user mode
*/
size_t doorbell_id_offset ; /* Doorbell offset (from KFD doorbell
* to HW doorbell , GFX reserved some
* at the start )
*/
u32 __iomem * doorbell_kernel_ptr ; /* This is a pointer for a doorbells
* page used by kernel queue
*/
2014-07-16 21:08:55 +03:00
struct kgd2kfd_shared_resources shared_resources ;
2017-09-20 18:10:18 -04:00
struct kfd_vmid_info vm_info ;
2014-07-16 21:08:55 +03:00
2015-03-17 19:32:53 +08:00
const struct kfd2kgd_calls * kfd2kgd ;
struct mutex doorbell_mutex ;
2015-05-19 18:37:51 -07:00
DECLARE_BITMAP ( doorbell_available_index ,
KFD_MAX_NUM_OF_QUEUES_PER_PROCESS ) ;
2015-03-17 19:32:53 +08:00
2014-10-26 09:53:10 +02:00
void * gtt_mem ;
uint64_t gtt_start_gpu_addr ;
void * gtt_start_cpu_ptr ;
void * gtt_sa_bitmap ;
struct mutex gtt_sa_lock ;
unsigned int gtt_sa_chunk_size ;
unsigned int gtt_sa_num_of_chunks ;
2014-07-17 01:37:30 +03:00
/* Interrupts */
2017-10-27 19:35:31 -04:00
struct kfifo ih_fifo ;
2017-10-27 19:35:34 -04:00
struct workqueue_struct * ih_wq ;
2014-07-17 01:37:30 +03:00
struct work_struct interrupt_work ;
spinlock_t interrupt_lock ;
2014-07-17 00:45:35 +03:00
/* QCM Device instance */
struct device_queue_manager * dqm ;
2014-07-16 21:08:55 +03:00
2014-07-17 00:45:35 +03:00
bool init_complete ;
2014-07-17 01:37:30 +03:00
/*
* Interrupts of interest to KFD are copied
* from the HW ring into a SW ring .
*/
bool interrupts_active ;
2015-05-20 13:48:26 +03:00
/* Debug manager */
struct kfd_dbgmgr * dbgmgr ;
2017-11-14 16:41:19 -05:00
2018-08-20 20:15:00 -04:00
/* Firmware versions */
uint16_t mec_fw_version ;
uint16_t sdma_fw_version ;
2017-11-27 18:29:45 -05:00
/* Maximum process number mapped to HW scheduler */
unsigned int max_proc_per_quantum ;
2017-11-14 16:41:19 -05:00
/* CWSR */
bool cwsr_enabled ;
const void * cwsr_isa ;
unsigned int cwsr_isa_size ;
2018-07-06 11:32:42 -04:00
/* xGMI */
uint64_t hive_id ;
2018-06-04 15:22:24 -04:00
bool pci_atomic_requested ;
2014-07-16 21:08:55 +03:00
} ;
2014-07-16 23:25:31 +03:00
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1 ,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2 ,
KFD_MEMPOOL_FRAMEBUFFER = 3 ,
} ;
2014-07-16 21:08:55 +03:00
/* Character device interface */
int kfd_chardev_init ( void ) ;
void kfd_chardev_exit ( void ) ;
struct device * kfd_chardev ( void ) ;
2014-07-17 00:55:28 +03:00
/**
2017-09-27 00:09:48 -04:00
* enum kfd_unmap_queues_filter
2014-07-17 00:55:28 +03:00
*
2017-09-27 00:09:48 -04:00
* @ KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE : Preempts single queue .
2014-07-17 00:55:28 +03:00
*
2017-09-27 00:09:48 -04:00
* @ KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : Preempts all queues in the
2014-07-17 00:55:28 +03:00
* running queues list .
*
2017-09-27 00:09:48 -04:00
* @ KFD_UNMAP_QUEUES_FILTER_BY_PASID : Preempts queues that belongs to
2014-07-17 00:55:28 +03:00
* specific process .
*
*/
2017-09-27 00:09:48 -04:00
enum kfd_unmap_queues_filter {
KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE ,
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES ,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES ,
KFD_UNMAP_QUEUES_FILTER_BY_PASID
2014-07-17 00:55:28 +03:00
} ;
2014-07-16 23:25:31 +03:00
2014-07-17 00:18:51 +03:00
/**
* enum kfd_queue_type
*
* @ KFD_QUEUE_TYPE_COMPUTE : Regular user mode queue type .
*
* @ KFD_QUEUE_TYPE_SDMA : Sdma user mode queue type .
*
* @ KFD_QUEUE_TYPE_HIQ : HIQ queue type .
*
* @ KFD_QUEUE_TYPE_DIQ : DIQ queue type .
*/
enum kfd_queue_type {
KFD_QUEUE_TYPE_COMPUTE ,
KFD_QUEUE_TYPE_SDMA ,
KFD_QUEUE_TYPE_HIQ ,
KFD_QUEUE_TYPE_DIQ
} ;
2014-07-17 00:36:17 +03:00
enum kfd_queue_format {
KFD_QUEUE_FORMAT_PM4 ,
KFD_QUEUE_FORMAT_AQL
} ;
2014-07-17 00:18:51 +03:00
/**
* struct queue_properties
*
* @ type : The queue type .
*
* @ queue_id : Queue identifier .
*
* @ queue_address : Queue ring buffer address .
*
* @ queue_size : Queue ring buffer size .
*
* @ priority : Defines the queue priority relative to other queues in the
* process .
* This is just an indication and HW scheduling may override the priority as
* necessary while keeping the relative prioritization .
* the priority granularity is from 0 to f which f is the highest priority .
* currently all queues are initialized with the highest priority .
*
* @ queue_percent : This field is partially implemented and currently a zero in
* this field defines that the queue is non active .
*
* @ read_ptr : User space address which points to the number of dwords the
* cp read from the ring buffer . This field updates automatically by the H / W .
*
* @ write_ptr : Defines the number of dwords written to the ring buffer .
*
* @ doorbell_ptr : This field aim is to notify the H / W of new packet written to
2017-08-15 23:00:04 -04:00
* the queue ring buffer . This field should be similar to write_ptr and the
* user should update this field after he updated the write_ptr .
2014-07-17 00:18:51 +03:00
*
* @ doorbell_off : The doorbell offset in the doorbell pci - bar .
*
2017-08-15 23:00:04 -04:00
* @ is_interop : Defines if this is a interop queue . Interop queue means that
* the queue can access both graphics and compute resources .
2014-07-17 00:18:51 +03:00
*
2018-02-06 20:32:45 -05:00
* @ is_evicted : Defines if the queue is evicted . Only active queues
* are evicted , rendering them inactive .
*
* @ is_active : Defines if the queue is active or not . @ is_active and
* @ is_evicted are protected by the DQM lock .
2014-07-17 00:18:51 +03:00
*
* @ vmid : If the scheduling mode is no cp scheduling the field defines the vmid
* of the queue .
*
* This structure represents the queue properties for each queue no matter if
* it ' s user mode or kernel mode queue .
*
*/
struct queue_properties {
enum kfd_queue_type type ;
2014-07-17 00:36:17 +03:00
enum kfd_queue_format format ;
2014-07-17 00:18:51 +03:00
unsigned int queue_id ;
uint64_t queue_address ;
uint64_t queue_size ;
uint32_t priority ;
uint32_t queue_percent ;
uint32_t * read_ptr ;
uint32_t * write_ptr ;
2018-04-10 17:33:03 -04:00
void __iomem * doorbell_ptr ;
2014-07-17 00:18:51 +03:00
uint32_t doorbell_off ;
bool is_interop ;
2018-02-06 20:32:45 -05:00
bool is_evicted ;
2014-07-17 00:18:51 +03:00
bool is_active ;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid ;
2015-01-03 22:12:31 +02:00
/* Relevant only for sdma queues*/
uint32_t sdma_engine_id ;
uint32_t sdma_queue_id ;
uint32_t sdma_vm_addr ;
2015-01-04 10:37:18 +02:00
/* Relevant only for VI */
uint64_t eop_ring_buffer_address ;
uint32_t eop_ring_buffer_size ;
uint64_t ctx_save_restore_area_address ;
uint32_t ctx_save_restore_area_size ;
2017-11-14 16:41:19 -05:00
uint32_t ctl_stack_size ;
uint64_t tba_addr ;
uint64_t tma_addr ;
2018-07-14 19:05:59 -04:00
/* Relevant for CU */
uint32_t cu_mask_count ; /* Must be a multiple of 32 */
uint32_t * cu_mask ;
2014-07-17 00:18:51 +03:00
} ;
/**
* struct queue
*
* @ list : Queue linked list .
*
* @ mqd : The queue MQD .
*
* @ mqd_mem_obj : The MQD local gpu memory object .
*
* @ gart_mqd_addr : The MQD gart mc address .
*
* @ properties : The queue properties .
*
* @ mec : Used only in no cp scheduling mode and identifies to micro engine id
2017-08-15 23:00:04 -04:00
* that the queue should be execute on .
2014-07-17 00:18:51 +03:00
*
2017-08-15 23:00:04 -04:00
* @ pipe : Used only in no cp scheduling mode and identifies the queue ' s pipe
* id .
2014-07-17 00:18:51 +03:00
*
* @ queue : Used only in no cp scheduliong mode and identifies the queue ' s slot .
*
* @ process : The kfd process that created this queue .
*
* @ device : The kfd device that created this queue .
*
* This structure represents user mode compute queues .
* It contains all the necessary data to handle such queues .
*
*/
struct queue {
struct list_head list ;
void * mqd ;
struct kfd_mem_obj * mqd_mem_obj ;
uint64_t gart_mqd_addr ;
struct queue_properties properties ;
uint32_t mec ;
uint32_t pipe ;
uint32_t queue ;
2015-01-03 22:12:31 +02:00
unsigned int sdma_id ;
2018-04-10 17:33:05 -04:00
unsigned int doorbell_id ;
2015-01-03 22:12:31 +02:00
2014-07-17 00:18:51 +03:00
struct kfd_process * process ;
struct kfd_dev * device ;
} ;
2014-07-17 00:36:17 +03:00
/*
* Please read the kfd_mqd_manager . h description .
*/
enum KFD_MQD_TYPE {
2015-01-04 10:36:30 +02:00
KFD_MQD_TYPE_COMPUTE = 0 , /* for no cp scheduling */
KFD_MQD_TYPE_HIQ , /* for hiq */
KFD_MQD_TYPE_CP , /* for cp queues and diq */
KFD_MQD_TYPE_SDMA , /* for sdma queues */
2014-07-17 00:36:17 +03:00
KFD_MQD_TYPE_MAX
} ;
2014-07-17 00:55:28 +03:00
struct scheduling_resources {
unsigned int vmid_mask ;
enum kfd_queue_type type ;
uint64_t queue_mask ;
uint64_t gws_mask ;
uint32_t oac_mask ;
uint32_t gds_heap_base ;
uint32_t gds_heap_size ;
} ;
struct process_queue_manager {
/* data */
struct kfd_process * process ;
struct list_head queues ;
unsigned long * queue_slot_bitmap ;
} ;
struct qcm_process_device {
/* The Device Queue Manager that owns this data */
struct device_queue_manager * dqm ;
struct process_queue_manager * pqm ;
/* Queues list */
struct list_head queues_list ;
struct list_head priv_queue_list ;
unsigned int queue_count ;
unsigned int vmid ;
bool is_debug ;
2018-02-06 20:32:45 -05:00
unsigned int evicted ; /* eviction counter, 0=active */
2017-09-27 00:09:52 -04:00
/* This flag tells if we should reset all wavefronts on
* process termination
*/
bool reset_wavefronts ;
2014-07-17 00:55:28 +03:00
/*
* All the memory management data should be here too
*/
uint64_t gds_context_area ;
2018-10-16 13:12:53 -04:00
/* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
2018-03-13 17:44:09 -04:00
uint64_t page_table_base ;
2014-07-17 00:55:28 +03:00
uint32_t sh_mem_config ;
uint32_t sh_mem_bases ;
uint32_t sh_mem_ape1_base ;
uint32_t sh_mem_ape1_limit ;
uint32_t gds_size ;
uint32_t num_gws ;
uint32_t num_oac ;
2017-08-15 23:00:20 -04:00
uint32_t sh_hidden_private_base ;
2017-11-14 16:41:19 -05:00
/* CWSR memory */
void * cwsr_kaddr ;
2018-03-15 17:27:47 -04:00
uint64_t cwsr_base ;
2017-11-14 16:41:19 -05:00
uint64_t tba_addr ;
uint64_t tma_addr ;
2018-03-15 17:27:47 -04:00
/* IB memory */
uint64_t ib_base ;
2018-03-15 17:27:50 -04:00
void * ib_kaddr ;
2018-04-10 17:33:05 -04:00
/* doorbell resources per process per device */
unsigned long * doorbell_bitmap ;
2014-07-17 00:55:28 +03:00
} ;
2018-02-06 20:32:45 -05:00
/* KFD Memory Eviction */
/* Approx. wait time before attempting to restore evicted BOs */
# define PROCESS_RESTORE_TIME_MS 100
/* Approx. back off time if restore fails due to lack of memory */
# define PROCESS_BACK_OFF_TIME_MS 100
/* Approx. time before evicting the process again */
# define PROCESS_ACTIVE_TIME_MS 10
2018-03-15 17:27:51 -04:00
/* 8 byte handle containing GPU ID in the most significant 4 bytes and
* idr_handle in the least significant 4 bytes
*/
# define MAKE_HANDLE(gpu_id, idr_handle) \
( ( ( uint64_t ) ( gpu_id ) < < 32 ) + idr_handle )
# define GET_GPU_ID(handle) (handle >> 32)
# define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
2017-09-20 18:10:14 -04:00
enum kfd_pdd_bound {
PDD_UNBOUND = 0 ,
PDD_BOUND ,
PDD_BOUND_SUSPENDED ,
} ;
2014-07-16 23:25:31 +03:00
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
* List of all per - device data for a process .
* Starts from kfd_process . per_device_data .
*/
struct list_head per_device_list ;
/* The device that owns this data. */
struct kfd_dev * dev ;
2017-09-27 00:09:52 -04:00
/* The process that owns this kfd_process_device. */
struct kfd_process * process ;
2014-07-16 23:25:31 +03:00
2014-07-17 01:04:10 +03:00
/* per-process-per device QCM data structure */
struct qcm_process_device qpd ;
2014-07-16 23:25:31 +03:00
/*Apertures*/
uint64_t lds_base ;
uint64_t lds_limit ;
uint64_t gpuvm_base ;
uint64_t gpuvm_limit ;
uint64_t scratch_base ;
uint64_t scratch_limit ;
2018-02-06 20:32:44 -05:00
/* VM context for GPUVM allocations */
2018-03-15 17:27:44 -04:00
struct file * drm_file ;
2018-02-06 20:32:44 -05:00
void * vm ;
2018-03-15 17:27:48 -04:00
/* GPUVM allocations storage */
struct idr alloc_idr ;
2017-09-27 00:09:52 -04:00
/* Flag used to tell the pdd has dequeued from the dqm.
* This is used to prevent dev - > dqm - > ops . process_termination ( ) from
* being called twice when it is already called in IOMMU callback
* function .
2015-03-25 13:12:20 +02:00
*/
2017-09-27 00:09:52 -04:00
bool already_dequeued ;
2017-12-08 19:22:12 -05:00
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound ;
2014-07-16 23:25:31 +03:00
} ;
2014-11-19 17:07:00 +02:00
# define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
2014-07-16 21:08:55 +03:00
/* Process data */
struct kfd_process {
2014-07-16 23:25:31 +03:00
/*
* kfd_process are stored in an mm_struct * - > kfd_process *
* hash table ( kfd_processes in kfd_process . c )
*/
struct hlist_node kfd_processes ;
2017-10-27 19:35:19 -04:00
/*
* Opaque pointer to mm_struct . We don ' t hold a reference to
* it so it should never be dereferenced from here . This is
* only used for looking up processes by their mm .
*/
void * mm ;
2014-07-16 23:25:31 +03:00
2017-11-27 18:29:51 -05:00
struct kref ref ;
struct work_struct release_work ;
2014-07-16 23:25:31 +03:00
struct mutex mutex ;
/*
* In any process , the thread that started main ( ) is the lead
* thread and outlives the rest .
* It is here because amd_iommu_bind_pasid wants a task_struct .
2017-11-01 19:21:33 -04:00
* It can also be used for safely getting a reference to the
* mm_struct of the process .
2014-07-16 23:25:31 +03:00
*/
struct task_struct * lead_thread ;
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier ;
/* Use for delayed freeing of kfd_process structure */
struct rcu_head rcu ;
unsigned int pasid ;
2017-08-26 02:00:57 -04:00
unsigned int doorbell_index ;
2014-07-16 23:25:31 +03:00
/*
* List of kfd_process_device structures ,
* one for each device the process is using .
*/
struct list_head per_device_data ;
2014-07-17 01:04:10 +03:00
struct process_queue_manager pqm ;
2014-07-16 23:25:31 +03:00
/*Is the user space process 32 bit?*/
bool is_32bit_user_mode ;
2015-05-10 12:15:46 +03:00
/* Event-related data */
struct mutex event_mutex ;
2017-10-27 19:35:27 -04:00
/* Event ID allocator and lookup */
struct idr event_idr ;
2017-10-27 19:35:26 -04:00
/* Event page */
struct kfd_signal_page * signal_page ;
2017-10-27 19:35:29 -04:00
size_t signal_mapped_size ;
2015-05-10 12:15:46 +03:00
size_t signal_event_count ;
2017-09-20 18:10:22 -04:00
bool signal_event_limit_reached ;
2018-02-06 20:32:44 -05:00
/* Information used for memory eviction */
void * kgd_process_info ;
/* Eviction fence that is attached to all the BOs of this process. The
* fence will be triggered during eviction and new one will be created
* during restore
*/
struct dma_fence * ef ;
2018-02-06 20:32:45 -05:00
/* Work items for evicting and restoring BOs */
struct delayed_work eviction_work ;
struct delayed_work restore_work ;
/* seqno of the last scheduled eviction */
unsigned int last_eviction_seqno ;
/* Approx. the last timestamp (in jiffies) when the process was
* restored after an eviction
*/
unsigned long last_restore_timestamp ;
2014-07-16 21:08:55 +03:00
} ;
2017-12-08 19:22:12 -05:00
# define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
extern DECLARE_HASHTABLE ( kfd_processes_table , KFD_PROCESS_TABLE_SIZE ) ;
extern struct srcu_struct kfd_processes_srcu ;
2014-12-29 14:20:05 +02:00
/**
* Ioctl function type .
*
* \ param filep pointer to file structure .
* \ param p amdkfd process pointer .
* \ param data pointer to arg that was copied from user .
*/
typedef int amdkfd_ioctl_t ( struct file * filep , struct kfd_process * p ,
void * data ) ;
struct amdkfd_ioctl_desc {
unsigned int cmd ;
int flags ;
amdkfd_ioctl_t * func ;
unsigned int cmd_drv ;
const char * name ;
} ;
2018-09-07 12:00:07 -04:00
bool kfd_dev_is_large_bar ( struct kfd_dev * dev ) ;
2014-12-29 14:20:05 +02:00
2018-03-23 15:30:36 -04:00
int kfd_process_create_wq ( void ) ;
2014-07-16 23:25:31 +03:00
void kfd_process_destroy_wq ( void ) ;
2017-11-14 16:41:19 -05:00
struct kfd_process * kfd_create_process ( struct file * filep ) ;
2014-07-16 23:25:31 +03:00
struct kfd_process * kfd_get_process ( const struct task_struct * ) ;
2015-05-10 12:15:46 +03:00
struct kfd_process * kfd_lookup_process_by_pasid ( unsigned int pasid ) ;
2018-02-06 20:32:45 -05:00
struct kfd_process * kfd_lookup_process_by_mm ( const struct mm_struct * mm ) ;
2017-11-27 18:29:52 -05:00
void kfd_unref_process ( struct kfd_process * p ) ;
2018-03-23 15:32:32 -04:00
int kfd_process_evict_queues ( struct kfd_process * p ) ;
int kfd_process_restore_queues ( struct kfd_process * p ) ;
2018-02-06 20:32:45 -05:00
void kfd_suspend_all_processes ( void ) ;
int kfd_resume_all_processes ( void ) ;
2014-07-16 23:25:31 +03:00
2018-03-15 17:27:44 -04:00
int kfd_process_device_init_vm ( struct kfd_process_device * pdd ,
struct file * drm_file ) ;
2014-07-17 01:27:00 +03:00
struct kfd_process_device * kfd_bind_process_to_device ( struct kfd_dev * dev ,
2017-09-20 18:10:14 -04:00
struct kfd_process * p ) ;
2014-07-16 23:25:31 +03:00
struct kfd_process_device * kfd_get_process_device_data ( struct kfd_dev * dev ,
2014-11-18 14:00:04 +02:00
struct kfd_process * p ) ;
struct kfd_process_device * kfd_create_process_device_data ( struct kfd_dev * dev ,
struct kfd_process * p ) ;
2014-07-16 23:25:31 +03:00
2018-04-10 17:33:04 -04:00
int kfd_reserved_mem_mmap ( struct kfd_dev * dev , struct kfd_process * process ,
2017-11-14 16:41:19 -05:00
struct vm_area_struct * vma ) ;
2018-03-15 17:27:48 -04:00
/* KFD process API for creating and translating handles */
int kfd_process_device_create_obj_handle ( struct kfd_process_device * pdd ,
void * mem ) ;
void * kfd_process_device_translate_handle ( struct kfd_process_device * p ,
int handle ) ;
void kfd_process_device_remove_obj_handle ( struct kfd_process_device * pdd ,
int handle ) ;
2014-07-17 01:49:36 +03:00
/* Process device data iterator */
2017-08-15 23:00:04 -04:00
struct kfd_process_device * kfd_get_first_process_device_data (
struct kfd_process * p ) ;
struct kfd_process_device * kfd_get_next_process_device_data (
struct kfd_process * p ,
2014-07-17 01:49:36 +03:00
struct kfd_process_device * pdd ) ;
bool kfd_has_process_device_data ( struct kfd_process * p ) ;
2014-07-16 23:25:31 +03:00
/* PASIDs */
int kfd_pasid_init ( void ) ;
void kfd_pasid_exit ( void ) ;
bool kfd_set_pasid_limit ( unsigned int new_limit ) ;
unsigned int kfd_get_pasid_limit ( void ) ;
unsigned int kfd_pasid_alloc ( void ) ;
void kfd_pasid_free ( unsigned int pasid ) ;
/* Doorbells */
2018-04-10 17:33:05 -04:00
size_t kfd_doorbell_process_slice ( struct kfd_dev * kfd ) ;
2017-08-15 23:00:10 -04:00
int kfd_doorbell_init ( struct kfd_dev * kfd ) ;
void kfd_doorbell_fini ( struct kfd_dev * kfd ) ;
2018-04-10 17:33:04 -04:00
int kfd_doorbell_mmap ( struct kfd_dev * dev , struct kfd_process * process ,
struct vm_area_struct * vma ) ;
2018-04-10 17:33:03 -04:00
void __iomem * kfd_get_kernel_doorbell ( struct kfd_dev * kfd ,
2014-07-16 23:25:31 +03:00
unsigned int * doorbell_off ) ;
void kfd_release_kernel_doorbell ( struct kfd_dev * kfd , u32 __iomem * db_addr ) ;
u32 read_kernel_doorbell ( u32 __iomem * db ) ;
2018-04-10 17:33:03 -04:00
void write_kernel_doorbell ( void __iomem * db , u32 value ) ;
2018-04-08 22:03:51 -04:00
void write_kernel_doorbell64 ( void __iomem * db , u64 value ) ;
2018-04-10 17:33:05 -04:00
unsigned int kfd_doorbell_id_to_offset ( struct kfd_dev * kfd ,
2014-07-16 23:25:31 +03:00
struct kfd_process * process ,
2018-04-10 17:33:05 -04:00
unsigned int doorbell_id ) ;
2017-08-26 02:00:57 -04:00
phys_addr_t kfd_get_process_doorbells ( struct kfd_dev * dev ,
struct kfd_process * process ) ;
int kfd_alloc_process_doorbells ( struct kfd_process * process ) ;
void kfd_free_process_doorbells ( struct kfd_process * process ) ;
2014-07-16 23:25:31 +03:00
2014-10-27 14:36:07 +02:00
/* GTT Sub-Allocator */
int kfd_gtt_sa_allocate ( struct kfd_dev * kfd , unsigned int size ,
struct kfd_mem_obj * * mem_obj ) ;
int kfd_gtt_sa_free ( struct kfd_dev * kfd , struct kfd_mem_obj * mem_obj ) ;
2014-07-16 21:08:55 +03:00
extern struct device * kfd_device ;
2014-07-16 21:22:32 +03:00
/* Topology */
int kfd_topology_init ( void ) ;
void kfd_topology_shutdown ( void ) ;
int kfd_topology_add_device ( struct kfd_dev * gpu ) ;
int kfd_topology_remove_device ( struct kfd_dev * gpu ) ;
2017-12-08 23:08:59 -05:00
struct kfd_topology_device * kfd_topology_device_by_proximity_domain (
uint32_t proximity_domain ) ;
2018-09-12 21:42:20 -04:00
struct kfd_topology_device * kfd_topology_device_by_id ( uint32_t gpu_id ) ;
2014-07-16 21:22:32 +03:00
struct kfd_dev * kfd_device_by_id ( uint32_t gpu_id ) ;
struct kfd_dev * kfd_device_by_pci_dev ( const struct pci_dev * pdev ) ;
2018-11-20 21:00:29 -05:00
struct kfd_dev * kfd_device_by_kgd ( const struct kgd_dev * kgd ) ;
2017-12-08 23:08:53 -05:00
int kfd_topology_enum_kfd_devices ( uint8_t idx , struct kfd_dev * * kdev ) ;
2017-12-08 23:08:58 -05:00
int kfd_numa_node_to_apic_id ( int numa_node_id ) ;
2014-07-16 21:22:32 +03:00
2014-07-16 21:08:55 +03:00
/* Interrupts */
2014-07-17 01:37:30 +03:00
int kfd_interrupt_init ( struct kfd_dev * dev ) ;
void kfd_interrupt_exit ( struct kfd_dev * dev ) ;
bool enqueue_ih_ring_entry ( struct kfd_dev * kfd , const void * ih_ring_entry ) ;
2018-07-11 22:32:51 -04:00
bool interrupt_is_wanted ( struct kfd_dev * dev ,
const uint32_t * ih_ring_entry ,
uint32_t * patched_ihre , bool * flag ) ;
2014-07-16 21:08:55 +03:00
2014-07-16 23:25:31 +03:00
/* amdkfd Apertures */
int kfd_init_apertures ( struct kfd_process * process ) ;
2014-07-17 00:45:35 +03:00
/* Queue Context Management */
2016-09-17 15:01:45 +10:00
int init_queue ( struct queue * * q , const struct queue_properties * properties ) ;
2014-07-17 00:45:35 +03:00
void uninit_queue ( struct queue * q ) ;
2014-07-17 01:04:10 +03:00
void print_queue_properties ( struct queue_properties * q ) ;
2014-07-17 00:45:35 +03:00
void print_queue ( struct queue * q ) ;
2014-07-17 01:27:00 +03:00
struct mqd_manager * mqd_manager_init ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2015-01-04 11:24:25 +02:00
struct mqd_manager * mqd_manager_init_cik ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2018-01-04 17:17:45 -05:00
struct mqd_manager * mqd_manager_init_cik_hawaii ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2015-01-04 11:24:25 +02:00
struct mqd_manager * mqd_manager_init_vi ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2018-01-04 17:17:45 -05:00
struct mqd_manager * mqd_manager_init_vi_tonga ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2018-04-10 17:33:08 -04:00
struct mqd_manager * mqd_manager_init_v9 ( enum KFD_MQD_TYPE type ,
struct kfd_dev * dev ) ;
2014-07-17 01:27:00 +03:00
struct device_queue_manager * device_queue_manager_init ( struct kfd_dev * dev ) ;
void device_queue_manager_uninit ( struct device_queue_manager * dqm ) ;
2014-07-17 00:55:28 +03:00
struct kernel_queue * kernel_queue_init ( struct kfd_dev * dev ,
enum kfd_queue_type type ) ;
void kernel_queue_uninit ( struct kernel_queue * kq ) ;
2018-07-11 22:32:50 -04:00
int kfd_process_vm_fault ( struct device_queue_manager * dqm , unsigned int pasid ) ;
2014-07-17 00:55:28 +03:00
2014-07-17 01:04:10 +03:00
/* Process Queue Manager */
struct process_queue_node {
struct queue * q ;
struct kernel_queue * kq ;
struct list_head process_queue_list ;
} ;
2017-09-27 00:09:52 -04:00
void kfd_process_dequeue_from_device ( struct kfd_process_device * pdd ) ;
void kfd_process_dequeue_from_all_devices ( struct kfd_process * p ) ;
2014-07-17 01:04:10 +03:00
int pqm_init ( struct process_queue_manager * pqm , struct kfd_process * p ) ;
void pqm_uninit ( struct process_queue_manager * pqm ) ;
int pqm_create_queue ( struct process_queue_manager * pqm ,
struct kfd_dev * dev ,
struct file * f ,
struct queue_properties * properties ,
unsigned int * qid ) ;
int pqm_destroy_queue ( struct process_queue_manager * pqm , unsigned int qid ) ;
int pqm_update_queue ( struct process_queue_manager * pqm , unsigned int qid ,
struct queue_properties * p ) ;
2018-07-14 19:05:59 -04:00
int pqm_set_cu_mask ( struct process_queue_manager * pqm , unsigned int qid ,
struct queue_properties * p ) ;
2015-05-20 13:48:26 +03:00
struct kernel_queue * pqm_get_kernel_queue ( struct process_queue_manager * pqm ,
unsigned int qid ) ;
2017-05-02 17:39:37 -05:00
int pqm_get_wave_state ( struct process_queue_manager * pqm ,
unsigned int qid ,
void __user * ctl_stack ,
u32 * ctl_stack_used_size ,
u32 * save_area_used_size ) ;
2014-07-17 01:04:10 +03:00
2015-05-20 13:58:12 +03:00
int amdkfd_fence_wait_timeout ( unsigned int * fence_addr ,
unsigned int fence_value ,
2017-09-20 18:10:15 -04:00
unsigned int timeout_ms ) ;
2015-05-20 13:58:12 +03:00
2014-07-17 00:45:35 +03:00
/* Packet Manager */
2014-07-17 01:27:00 +03:00
# define KFD_FENCE_COMPLETED (100)
# define KFD_FENCE_INIT (10)
2014-07-17 00:55:28 +03:00
2014-07-17 00:45:35 +03:00
struct packet_manager {
struct device_queue_manager * dqm ;
struct kernel_queue * priv_queue ;
struct mutex lock ;
bool allocated ;
struct kfd_mem_obj * ib_buffer_obj ;
2017-11-27 18:29:49 -05:00
unsigned int ib_size_bytes ;
2018-04-10 17:33:06 -04:00
const struct packet_manager_funcs * pmf ;
} ;
struct packet_manager_funcs {
/* Support ASIC-specific packet formats for PM4 packets */
int ( * map_process ) ( struct packet_manager * pm , uint32_t * buffer ,
struct qcm_process_device * qpd ) ;
int ( * runlist ) ( struct packet_manager * pm , uint32_t * buffer ,
uint64_t ib , size_t ib_size_in_dwords , bool chain ) ;
int ( * set_resources ) ( struct packet_manager * pm , uint32_t * buffer ,
struct scheduling_resources * res ) ;
int ( * map_queues ) ( struct packet_manager * pm , uint32_t * buffer ,
struct queue * q , bool is_static ) ;
int ( * unmap_queues ) ( struct packet_manager * pm , uint32_t * buffer ,
enum kfd_queue_type type ,
enum kfd_unmap_queues_filter mode ,
uint32_t filter_param , bool reset ,
unsigned int sdma_engine ) ;
int ( * query_status ) ( struct packet_manager * pm , uint32_t * buffer ,
uint64_t fence_address , uint32_t fence_value ) ;
int ( * release_mem ) ( uint64_t gpu_addr , uint32_t * buffer ) ;
/* Packet sizes */
int map_process_size ;
int runlist_size ;
int set_resources_size ;
int map_queues_size ;
int unmap_queues_size ;
int query_status_size ;
int release_mem_size ;
2014-07-17 00:45:35 +03:00
} ;
2018-04-10 17:33:06 -04:00
extern const struct packet_manager_funcs kfd_vi_pm_funcs ;
2018-04-10 17:33:07 -04:00
extern const struct packet_manager_funcs kfd_v9_pm_funcs ;
2018-04-10 17:33:06 -04:00
2014-07-17 01:27:00 +03:00
int pm_init ( struct packet_manager * pm , struct device_queue_manager * dqm ) ;
void pm_uninit ( struct packet_manager * pm ) ;
int pm_send_set_resources ( struct packet_manager * pm ,
struct scheduling_resources * res ) ;
int pm_send_runlist ( struct packet_manager * pm , struct list_head * dqm_queues ) ;
int pm_send_query_status ( struct packet_manager * pm , uint64_t fence_address ,
uint32_t fence_value ) ;
int pm_send_unmap_queue ( struct packet_manager * pm , enum kfd_queue_type type ,
2017-09-27 00:09:48 -04:00
enum kfd_unmap_queues_filter mode ,
2014-07-17 01:27:00 +03:00
uint32_t filter_param , bool reset ,
unsigned int sdma_engine ) ;
2014-07-17 00:55:28 +03:00
void pm_release_ib ( struct packet_manager * pm ) ;
2018-04-10 17:33:07 -04:00
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header ( unsigned int opcode , size_t packet_size ) ;
int pm_set_resources_vi ( struct packet_manager * pm , uint32_t * buffer ,
struct scheduling_resources * res ) ;
2014-07-16 23:25:31 +03:00
uint64_t kfd_get_number_elems ( struct kfd_dev * kfd ) ;
2015-05-10 12:15:46 +03:00
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik ;
2018-04-10 17:33:10 -04:00
extern const struct kfd_event_interrupt_class event_interrupt_class_v9 ;
2014-11-25 10:34:31 +02:00
extern const struct kfd_device_global_init_class device_global_init_class_cik ;
2015-05-10 12:15:46 +03:00
void kfd_event_init_process ( struct kfd_process * p ) ;
void kfd_event_free_process ( struct kfd_process * p ) ;
int kfd_event_mmap ( struct kfd_process * process , struct vm_area_struct * vma ) ;
int kfd_wait_on_events ( struct kfd_process * p ,
2015-04-14 18:05:49 +03:00
uint32_t num_events , void __user * data ,
2015-05-10 12:15:46 +03:00
bool all , uint32_t user_timeout_ms ,
2017-10-27 19:35:22 -04:00
uint32_t * wait_result ) ;
2015-05-10 12:15:46 +03:00
void kfd_signal_event_interrupt ( unsigned int pasid , uint32_t partial_id ,
uint32_t valid_id_bits ) ;
2015-04-14 18:05:49 +03:00
void kfd_signal_iommu_event ( struct kfd_dev * dev ,
unsigned int pasid , unsigned long address ,
bool is_write_requested , bool is_execute_requested ) ;
2014-11-25 10:34:31 +02:00
void kfd_signal_hw_exception_event ( unsigned int pasid ) ;
2015-05-10 12:15:46 +03:00
int kfd_set_event ( struct kfd_process * p , uint32_t event_id ) ;
int kfd_reset_event ( struct kfd_process * p , uint32_t event_id ) ;
2018-03-15 17:27:52 -04:00
int kfd_event_page_set ( struct kfd_process * p , void * kernel_address ,
uint64_t size ) ;
2015-05-10 12:15:46 +03:00
int kfd_event_create ( struct file * devkfd , struct kfd_process * p ,
uint32_t event_type , bool auto_reset , uint32_t node_id ,
uint32_t * event_id , uint32_t * event_trigger_data ,
uint64_t * event_page_offset , uint32_t * event_slot_index ) ;
int kfd_event_destroy ( struct kfd_process * p , uint32_t event_id ) ;
2018-07-11 22:32:50 -04:00
void kfd_signal_vm_fault_event ( struct kfd_dev * dev , unsigned int pasid ,
struct kfd_vm_fault_info * info ) ;
2018-07-11 22:32:56 -04:00
void kfd_signal_reset_event ( struct kfd_dev * dev ) ;
2018-02-06 20:32:44 -05:00
void kfd_flush_tlb ( struct kfd_process_device * pdd ) ;
2015-05-20 18:05:44 +03:00
int dbgdev_wave_reset_wavefronts ( struct kfd_dev * dev , struct kfd_process * p ) ;
2018-07-11 22:32:56 -04:00
bool kfd_is_locked ( void ) ;
2017-11-27 18:29:49 -05:00
/* Debugfs */
# if defined(CONFIG_DEBUG_FS)
void kfd_debugfs_init ( void ) ;
void kfd_debugfs_fini ( void ) ;
int kfd_debugfs_mqds_by_process ( struct seq_file * m , void * data ) ;
int pqm_debugfs_mqds ( struct seq_file * m , void * data ) ;
int kfd_debugfs_hqds_by_device ( struct seq_file * m , void * data ) ;
int dqm_debugfs_hqds ( struct seq_file * m , void * data ) ;
int kfd_debugfs_rls_by_device ( struct seq_file * m , void * data ) ;
int pm_debugfs_runlist ( struct seq_file * m , void * data ) ;
2018-07-11 22:33:04 -04:00
int kfd_debugfs_hang_hws ( struct kfd_dev * dev ) ;
int pm_debugfs_hang_hws ( struct packet_manager * pm ) ;
int dqm_debugfs_execute_queues ( struct device_queue_manager * dqm ) ;
2017-11-27 18:29:49 -05:00
# else
static inline void kfd_debugfs_init ( void ) { }
static inline void kfd_debugfs_fini ( void ) { }
# endif
2014-07-16 21:08:55 +03:00
# endif