2014-07-15 13:53:32 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
/*
* This file defines the private interface between the
* AMD kernel graphics drivers and the AMD KFD .
*/
# ifndef KGD_KFD_INTERFACE_H_INCLUDED
# define KGD_KFD_INTERFACE_H_INCLUDED
# include <linux/types.h>
2017-02-03 16:28:48 -05:00
# include <linux/bitmap.h>
2018-02-06 20:32:35 -05:00
# include <linux/dma-fence.h>
2014-07-15 13:53:32 +03:00
struct pci_dev ;
2017-02-02 00:38:22 -05:00
# define KGD_MAX_QUEUES 128
2014-07-15 13:53:32 +03:00
struct kfd_dev ;
struct kgd_dev ;
struct kgd_mem ;
2017-08-15 23:00:17 -04:00
enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0 ,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET ,
} ;
2018-07-11 22:32:49 -04:00
struct kfd_vm_fault_info {
uint64_t page_addr ;
uint32_t vmid ;
uint32_t mc_id ;
uint32_t status ;
bool prot_valid ;
bool prot_read ;
bool prot_write ;
bool prot_exec ;
} ;
2017-12-08 23:08:39 -05:00
struct kfd_cu_info {
uint32_t num_shader_engines ;
uint32_t num_shader_arrays_per_engine ;
uint32_t num_cu_per_sh ;
uint32_t cu_active_number ;
uint32_t cu_ao_mask ;
uint32_t simd_per_cu ;
uint32_t max_waves_per_simd ;
uint32_t wave_front_size ;
uint32_t max_scratch_slots_per_cu ;
uint32_t lds_size ;
uint32_t cu_bitmap [ 4 ] [ 4 ] ;
} ;
2017-12-08 23:08:41 -05:00
/* For getting GPU local memory information from KGD */
struct kfd_local_mem_info {
uint64_t local_mem_size_private ;
uint64_t local_mem_size_public ;
uint32_t vram_width ;
uint32_t mem_clk_max ;
} ;
2014-07-15 13:53:32 +03:00
enum kgd_memory_pool {
KGD_POOL_SYSTEM_CACHEABLE = 1 ,
KGD_POOL_SYSTEM_WRITECOMBINE = 2 ,
KGD_POOL_FRAMEBUFFER = 3 ,
} ;
2018-08-29 12:39:16 -05:00
/**
* enum kfd_sched_policy
*
* @ KFD_SCHED_POLICY_HWS : H / W scheduling policy known as command processor ( cp )
* scheduling . In this scheduling mode we ' re using the firmware code to
* schedule the user mode queues and kernel queues such as HIQ and DIQ .
* the HIQ queue is used as a special queue that dispatches the configuration
* to the cp and the user mode queues list that are currently running .
* the DIQ queue is a debugging queue that dispatches debugging commands to the
* firmware .
* in this scheduling mode user mode queues over subscription feature is
* enabled .
*
* @ KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION : The same as above but the over
* subscription feature disabled .
*
* @ KFD_SCHED_POLICY_NO_HWS : no H / W scheduling policy is a mode which directly
* set the command processor registers and sets the queues " manually " . This
* mode is used * ONLY * for debugging proposes .
*
*/
enum kfd_sched_policy {
KFD_SCHED_POLICY_HWS = 0 ,
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION ,
KFD_SCHED_POLICY_NO_HWS
} ;
2014-07-15 13:53:32 +03:00
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap ;
2017-02-03 16:28:48 -05:00
/* number of pipes per mec */
uint32_t num_pipe_per_mec ;
/* number of queues per pipe */
uint32_t num_queue_per_pipe ;
/* Bit n == 1 means Queue n is available for KFD */
DECLARE_BITMAP ( queue_bitmap , KGD_MAX_QUEUES ) ;
2014-07-15 13:53:32 +03:00
2019-01-09 23:31:14 -05:00
/* SDMA doorbell assignments (SOC15 and later chips only). Only
2018-04-10 17:33:02 -04:00
* specific doorbells are routed to each SDMA engine . Others
* are routed to IH and VCN . They are not usable by the CP .
*/
2019-01-09 23:31:14 -05:00
uint32_t * sdma_doorbell_idx ;
2019-02-13 13:15:05 -05:00
/* From SOC15 onward, the doorbell index range not usable for CP
* queues .
*/
uint32_t non_cp_doorbells_start ;
uint32_t non_cp_doorbells_end ;
2018-04-10 17:33:02 -04:00
2014-07-15 13:53:32 +03:00
/* Base address of doorbell aperture. */
phys_addr_t doorbell_physical_address ;
/* Size in bytes of doorbell aperture. */
size_t doorbell_aperture_size ;
/* Number of bytes at start of aperture reserved for KGD. */
size_t doorbell_start_offset ;
2018-02-06 20:32:36 -05:00
/* GPUVM address space size in bytes */
uint64_t gpuvm_size ;
/* Minor device number of the render node */
int drm_render_minor ;
2014-07-15 13:53:32 +03:00
} ;
2017-08-15 23:00:21 -04:00
struct tile_config {
uint32_t * tile_config_ptr ;
uint32_t * macro_tile_config_ptr ;
uint32_t num_tile_configs ;
uint32_t num_macro_tile_configs ;
uint32_t gb_addr_config ;
uint32_t num_banks ;
uint32_t num_ranks ;
} ;
2018-08-29 12:39:16 -05:00
# define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
2018-02-06 20:32:38 -05:00
/*
* Allocation flag domains
2018-03-15 17:27:51 -04:00
* NOTE : This must match the corresponding definitions in kfd_ioctl . h .
2018-02-06 20:32:38 -05:00
*/
# define ALLOC_MEM_FLAGS_VRAM (1 << 0)
# define ALLOC_MEM_FLAGS_GTT (1 << 1)
2018-11-20 21:44:27 -05:00
# define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
# define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
2019-04-11 14:43:39 -05:00
# define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
2018-02-06 20:32:38 -05:00
/*
* Allocation flags attributes / access options .
2018-03-15 17:27:51 -04:00
* NOTE : This must match the corresponding definitions in kfd_ioctl . h .
2018-02-06 20:32:38 -05:00
*/
# define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
# define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
# define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
# define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
# define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
# define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
2014-07-15 13:53:32 +03:00
/**
* struct kfd2kgd_calls
*
* @ program_sh_mem_settings : A function that should initiate the memory
* properties such as main aperture memory type ( cache / non cached ) and
* secondary aperture base address , size and memory type .
* This function is used only for no cp scheduling mode .
*
* @ set_pasid_vmid_mapping : Exposes pasid / vmid pair to the H / W for no cp
* scheduling mode . Only used for no cp scheduling mode .
*
* @ hqd_load : Loads the mqd structure to a H / W hqd slot . used only for no cp
* sceduling mode .
*
2015-01-03 22:12:29 +02:00
* @ hqd_sdma_load : Loads the SDMA mqd structure to a H / W SDMA hqd slot .
* used only for no HWS mode .
*
2017-11-27 18:29:48 -05:00
* @ hqd_dump : Dumps CPC HQD registers to an array of address - value pairs .
* Array is allocated with kmalloc , needs to be freed with kfree by caller .
*
* @ hqd_sdma_dump : Dumps SDMA HQD registers to an array of address - value pairs .
* Array is allocated with kmalloc , needs to be freed with kfree by caller .
*
2014-07-15 13:53:32 +03:00
* @ hqd_is_occupies : Checks if a hqd slot is occupied .
*
* @ hqd_destroy : Destructs and preempts the queue assigned to that hqd slot .
*
2015-01-03 22:12:29 +02:00
* @ hqd_sdma_is_occupied : Checks if an SDMA hqd slot is occupied .
*
* @ hqd_sdma_destroy : Destructs and preempts the SDMA queue assigned to that
* SDMA hqd slot .
*
2017-08-15 23:00:19 -04:00
* @ set_scratch_backing_va : Sets VA for scratch backing memory of a VMID .
* Only used for no cp scheduling mode
*
2017-08-15 23:00:21 -04:00
* @ get_tile_config : Returns GPU - specific tiling mode information
*
2018-02-06 20:32:38 -05:00
* @ set_vm_context_page_table_base : Program page table base for a VMID
*
* @ invalidate_tlbs : Invalidate TLBs for a specific PASID
*
* @ invalidate_tlbs_vmid : Invalidate TLBs for a specific VMID
*
2018-07-11 22:32:51 -04:00
* @ read_vmid_from_vmfault_reg : On Hawaii the VMID is not set in the
* IH ring entry . This function allows the KFD ISR to get the VMID
* from the fault status register as early as possible .
*
2018-07-06 11:26:08 -04:00
* @ get_hive_id : Returns hive id of current device , 0 if xgmi is not enabled
*
2014-07-15 13:53:32 +03:00
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver .
*
*/
struct kfd2kgd_calls {
/* Register access functions */
void ( * program_sh_mem_settings ) ( struct kgd_dev * kgd , uint32_t vmid ,
uint32_t sh_mem_config , uint32_t sh_mem_ape1_base ,
uint32_t sh_mem_ape1_limit , uint32_t sh_mem_bases ) ;
int ( * set_pasid_vmid_mapping ) ( struct kgd_dev * kgd , unsigned int pasid ,
unsigned int vmid ) ;
2015-03-05 15:13:18 +02:00
int ( * init_interrupts ) ( struct kgd_dev * kgd , uint32_t pipe_id ) ;
2014-07-15 13:53:32 +03:00
int ( * hqd_load ) ( struct kgd_dev * kgd , void * mqd , uint32_t pipe_id ,
2017-08-15 23:00:17 -04:00
uint32_t queue_id , uint32_t __user * wptr ,
uint32_t wptr_shift , uint32_t wptr_mask ,
struct mm_struct * mm ) ;
2014-07-15 13:53:32 +03:00
2019-12-25 15:50:51 +08:00
int ( * hiq_mqd_load ) ( struct kgd_dev * kgd , void * mqd ,
uint32_t pipe_id , uint32_t queue_id ,
uint32_t doorbell_off ) ;
2017-11-01 19:21:58 -04:00
int ( * hqd_sdma_load ) ( struct kgd_dev * kgd , void * mqd ,
uint32_t __user * wptr , struct mm_struct * mm ) ;
2015-01-03 22:12:29 +02:00
2017-11-27 18:29:48 -05:00
int ( * hqd_dump ) ( struct kgd_dev * kgd ,
uint32_t pipe_id , uint32_t queue_id ,
uint32_t ( * * dump ) [ 2 ] , uint32_t * n_regs ) ;
int ( * hqd_sdma_dump ) ( struct kgd_dev * kgd ,
uint32_t engine_id , uint32_t queue_id ,
uint32_t ( * * dump ) [ 2 ] , uint32_t * n_regs ) ;
2014-12-09 12:00:09 +02:00
bool ( * hqd_is_occupied ) ( struct kgd_dev * kgd , uint64_t queue_address ,
2014-07-15 13:53:32 +03:00
uint32_t pipe_id , uint32_t queue_id ) ;
2017-08-15 23:00:17 -04:00
int ( * hqd_destroy ) ( struct kgd_dev * kgd , void * mqd , uint32_t reset_type ,
2014-07-15 13:53:32 +03:00
unsigned int timeout , uint32_t pipe_id ,
uint32_t queue_id ) ;
2015-01-03 22:12:29 +02:00
bool ( * hqd_sdma_is_occupied ) ( struct kgd_dev * kgd , void * mqd ) ;
int ( * hqd_sdma_destroy ) ( struct kgd_dev * kgd , void * mqd ,
unsigned int timeout ) ;
2014-09-28 11:51:15 +03:00
int ( * address_watch_disable ) ( struct kgd_dev * kgd ) ;
int ( * address_watch_execute ) ( struct kgd_dev * kgd ,
unsigned int watch_point_id ,
uint32_t cntl_val ,
uint32_t addr_hi ,
uint32_t addr_lo ) ;
int ( * wave_control_execute ) ( struct kgd_dev * kgd ,
uint32_t gfx_index_val ,
uint32_t sq_cmd ) ;
uint32_t ( * address_watch_get_offset ) ( struct kgd_dev * kgd ,
unsigned int watch_point_id ,
unsigned int reg_offset ) ;
2019-09-25 23:57:30 -04:00
bool ( * get_atc_vmid_pasid_mapping_info ) (
2015-05-19 19:25:01 +03:00
struct kgd_dev * kgd ,
2019-09-25 23:57:30 -04:00
uint8_t vmid ,
uint16_t * p_pasid ) ;
2014-09-28 11:51:15 +03:00
2019-09-18 18:17:57 -04:00
/* No longer needed from GFXv9 onward. The scratch base address is
* passed to the shader by the CP . It ' s the user mode driver ' s
* responsibility .
*/
2017-08-15 23:00:19 -04:00
void ( * set_scratch_backing_va ) ( struct kgd_dev * kgd ,
uint64_t va , uint32_t vmid ) ;
2019-09-18 18:17:57 -04:00
2017-08-15 23:00:21 -04:00
int ( * get_tile_config ) ( struct kgd_dev * kgd , struct tile_config * config ) ;
2017-12-08 23:08:39 -05:00
2018-02-06 20:32:38 -05:00
void ( * set_vm_context_page_table_base ) ( struct kgd_dev * kgd ,
2018-03-13 17:44:09 -04:00
uint32_t vmid , uint64_t page_table_base ) ;
2018-07-11 22:32:51 -04:00
uint32_t ( * read_vmid_from_vmfault_reg ) ( struct kgd_dev * kgd ) ;
2018-07-06 11:26:08 -04:00
uint64_t ( * get_hive_id ) ( struct kgd_dev * kgd ) ;
2014-07-15 13:53:32 +03:00
} ;
2015-01-03 22:12:29 +02:00
# endif /* KGD_KFD_INTERFACE_H_INCLUDED */