2015-06-12 21:35:14 +03:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
/* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
# ifndef AMDGPU_AMDKFD_H_INCLUDED
# define AMDGPU_AMDKFD_H_INCLUDED
# include <linux/types.h>
2018-03-15 10:08:35 +02:00
# include <linux/mm.h>
2018-03-23 15:32:33 -04:00
# include <linux/workqueue.h>
2015-06-12 21:35:14 +03:00
# include <kgd_kfd_interface.h>
2018-02-06 20:32:38 -05:00
# include <drm/ttm/ttm_execbuf_util.h>
# include "amdgpu_sync.h"
# include "amdgpu_vm.h"
2015-06-12 21:35:14 +03:00
2018-11-19 20:05:54 -05:00
extern uint64_t amdgpu_amdkfd_total_mem_size ;
2018-02-06 20:32:35 -05:00
2015-06-12 21:35:14 +03:00
struct amdgpu_device ;
2018-02-06 20:32:38 -05:00
struct kfd_bo_va_list {
struct list_head bo_list ;
struct amdgpu_bo_va * bo_va ;
void * kgd_dev ;
bool is_mapped ;
uint64_t va ;
uint64_t pte_flags ;
} ;
2015-06-12 21:35:14 +03:00
struct kgd_mem {
2018-02-06 20:32:38 -05:00
struct mutex lock ;
2015-06-12 21:35:14 +03:00
struct amdgpu_bo * bo ;
2018-02-06 20:32:38 -05:00
struct list_head bo_va_list ;
/* protected by amdkfd_process_info.lock */
struct ttm_validate_buffer validate_list ;
struct ttm_validate_buffer resv_list ;
uint32_t domain ;
unsigned int mapped_to_gpu_memory ;
uint64_t va ;
2019-08-26 18:39:11 -04:00
uint32_t alloc_flags ;
2018-02-06 20:32:38 -05:00
2018-03-23 15:32:33 -04:00
atomic_t invalid ;
2018-02-06 20:32:38 -05:00
struct amdkfd_process_info * process_info ;
struct amdgpu_sync sync ;
bool aql_queue ;
2020-04-28 20:59:55 -04:00
bool is_imported ;
2015-06-12 21:35:14 +03:00
} ;
2018-02-06 20:32:35 -05:00
/* KFD Memory Eviction */
struct amdgpu_amdkfd_fence {
struct dma_fence base ;
struct mm_struct * mm ;
spinlock_t lock ;
char timeline_name [ TASK_COMM_LEN ] ;
} ;
2018-11-19 20:05:54 -05:00
struct amdgpu_kfd_dev {
struct kfd_dev * dev ;
uint64_t vram_used ;
} ;
2019-04-12 11:07:16 -04:00
enum kgd_engine_type {
KGD_ENGINE_PFP = 1 ,
KGD_ENGINE_ME ,
KGD_ENGINE_CE ,
KGD_ENGINE_MEC1 ,
KGD_ENGINE_MEC2 ,
KGD_ENGINE_RLC ,
KGD_ENGINE_SDMA1 ,
KGD_ENGINE_SDMA2 ,
KGD_ENGINE_MAX
} ;
2018-02-06 20:32:35 -05:00
struct amdgpu_amdkfd_fence * amdgpu_amdkfd_fence_create ( u64 context ,
struct mm_struct * mm ) ;
bool amdkfd_fence_check_mm ( struct dma_fence * f , struct mm_struct * mm ) ;
struct amdgpu_amdkfd_fence * to_amdgpu_amdkfd_fence ( struct dma_fence * f ) ;
2020-02-11 11:28:34 +08:00
int amdgpu_amdkfd_remove_fence_on_pt_pd_bos ( struct amdgpu_bo * bo ) ;
2018-02-06 20:32:35 -05:00
2018-02-06 20:32:38 -05:00
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
struct list_head vm_list_head ;
/* List head for all KFD BOs that belong to a KFD process. */
struct list_head kfd_bo_list ;
2018-03-23 15:32:33 -04:00
/* List of userptr BOs that are valid or invalid */
struct list_head userptr_valid_list ;
struct list_head userptr_inval_list ;
2018-02-06 20:32:38 -05:00
/* Lock to protect kfd_bo_list */
struct mutex lock ;
/* Number of VMs */
unsigned int n_vms ;
/* Eviction Fence */
struct amdgpu_amdkfd_fence * eviction_fence ;
2018-03-23 15:32:33 -04:00
/* MMU-notifier related fields */
atomic_t evicted_bos ;
struct delayed_work restore_userptr_work ;
struct pid * pid ;
2018-02-06 20:32:38 -05:00
} ;
2016-02-09 13:30:12 +02:00
int amdgpu_amdkfd_init ( void ) ;
2015-06-12 21:35:14 +03:00
void amdgpu_amdkfd_fini ( void ) ;
2020-01-21 19:11:03 -05:00
void amdgpu_amdkfd_suspend ( struct amdgpu_device * adev , bool run_pm ) ;
int amdgpu_amdkfd_resume ( struct amdgpu_device * adev , bool run_pm ) ;
2017-02-01 17:02:13 -05:00
void amdgpu_amdkfd_interrupt ( struct amdgpu_device * adev ,
2015-06-12 21:35:14 +03:00
const void * ih_ring_entry ) ;
2017-02-01 17:02:13 -05:00
void amdgpu_amdkfd_device_probe ( struct amdgpu_device * adev ) ;
void amdgpu_amdkfd_device_init ( struct amdgpu_device * adev ) ;
void amdgpu_amdkfd_device_fini ( struct amdgpu_device * adev ) ;
2015-06-12 21:35:14 +03:00
2018-03-23 15:32:28 -04:00
int amdgpu_amdkfd_evict_userptr ( struct kgd_mem * mem , struct mm_struct * mm ) ;
2018-02-06 20:32:39 -05:00
int amdgpu_amdkfd_submit_ib ( struct kgd_dev * kgd , enum kgd_engine_type engine ,
uint32_t vmid , uint64_t gpu_addr ,
uint32_t * ib_cmd , uint32_t ib_len ) ;
2018-07-16 19:10:36 -04:00
void amdgpu_amdkfd_set_compute_idle ( struct kgd_dev * kgd , bool idle ) ;
2019-05-29 13:52:17 +08:00
bool amdgpu_amdkfd_have_atomics_support ( struct kgd_dev * kgd ) ;
2019-12-19 23:57:03 -06:00
int amdgpu_amdkfd_flush_gpu_tlb_vmid ( struct kgd_dev * kgd , uint16_t vmid ) ;
int amdgpu_amdkfd_flush_gpu_tlb_pasid ( struct kgd_dev * kgd , uint16_t pasid ) ;
2018-02-06 20:32:39 -05:00
2018-02-06 20:32:36 -05:00
bool amdgpu_amdkfd_is_kfd_vmid ( struct amdgpu_device * adev , u32 vmid ) ;
2018-07-11 22:32:55 -04:00
int amdgpu_amdkfd_pre_reset ( struct amdgpu_device * adev ) ;
int amdgpu_amdkfd_post_reset ( struct amdgpu_device * adev ) ;
2018-07-11 22:32:57 -04:00
void amdgpu_amdkfd_gpu_reset ( struct kgd_dev * kgd ) ;
2015-06-12 21:35:14 +03:00
/* Shared API */
2018-10-18 13:38:19 -04:00
int amdgpu_amdkfd_alloc_gtt_mem ( struct kgd_dev * kgd , size_t size ,
void * * mem_obj , uint64_t * gpu_addr ,
void * * cpu_ptr , bool mqd_gfx9 ) ;
void amdgpu_amdkfd_free_gtt_mem ( struct kgd_dev * kgd , void * mem_obj ) ;
2019-05-06 09:48:37 -05:00
int amdgpu_amdkfd_alloc_gws ( struct kgd_dev * kgd , size_t size , void * * mem_obj ) ;
void amdgpu_amdkfd_free_gws ( struct kgd_dev * kgd , void * mem_obj ) ;
2019-05-08 16:14:45 -05:00
int amdgpu_amdkfd_add_gws_to_process ( void * info , void * gws , struct kgd_mem * * mem ) ;
int amdgpu_amdkfd_remove_gws_from_process ( void * info , void * mem ) ;
2019-04-12 11:07:16 -04:00
uint32_t amdgpu_amdkfd_get_fw_version ( struct kgd_dev * kgd ,
enum kgd_engine_type type ) ;
2018-10-18 13:38:19 -04:00
void amdgpu_amdkfd_get_local_mem_info ( struct kgd_dev * kgd ,
struct kfd_local_mem_info * mem_info ) ;
uint64_t amdgpu_amdkfd_get_gpu_clock_counter ( struct kgd_dev * kgd ) ;
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz ( struct kgd_dev * kgd ) ;
void amdgpu_amdkfd_get_cu_info ( struct kgd_dev * kgd , struct kfd_cu_info * cu_info ) ;
2018-11-20 21:00:29 -05:00
int amdgpu_amdkfd_get_dmabuf_info ( struct kgd_dev * kgd , int dma_buf_fd ,
struct kgd_dev * * dmabuf_kgd ,
uint64_t * bo_size , void * metadata_buffer ,
size_t buffer_size , uint32_t * metadata_size ,
uint32_t * flags ) ;
2017-12-08 23:09:05 -05:00
uint64_t amdgpu_amdkfd_get_vram_usage ( struct kgd_dev * kgd ) ;
2018-07-06 11:28:23 -04:00
uint64_t amdgpu_amdkfd_get_hive_id ( struct kgd_dev * kgd ) ;
2020-02-25 17:17:37 -05:00
uint64_t amdgpu_amdkfd_get_unique_id ( struct kgd_dev * kgd ) ;
2019-04-11 14:43:39 -05:00
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr ( struct kgd_dev * kgd ) ;
2019-05-03 09:10:38 -05:00
uint32_t amdgpu_amdkfd_get_num_gws ( struct kgd_dev * kgd ) ;
2020-04-16 14:08:59 -05:00
uint32_t amdgpu_amdkfd_get_asic_rev_id ( struct kgd_dev * kgd ) ;
2019-04-17 14:28:18 -04:00
uint8_t amdgpu_amdkfd_get_xgmi_hops_count ( struct kgd_dev * dst , struct kgd_dev * src ) ;
2015-06-12 21:35:14 +03:00
2019-08-30 00:10:34 -04:00
/* Read user wptr from a specified user address space with page fault
* disabled . The memory must be pinned and mapped to the hardware when
* this is called in hqd_load functions , so it should never fault in
* the first place . This resolves a circular lock dependency involving
* four locks , including the DQM lock and mmap_sem .
*/
2017-08-15 23:00:17 -04:00
# define read_user_wptr(mmptr, wptr, dst) \
( { \
bool valid = false ; \
if ( ( mmptr ) & & ( wptr ) ) { \
2019-08-30 00:10:34 -04:00
pagefault_disable ( ) ; \
2017-08-15 23:00:17 -04:00
if ( ( mmptr ) = = current - > mm ) { \
valid = ! get_user ( ( dst ) , ( wptr ) ) ; \
} else if ( current - > mm = = NULL ) { \
use_mm ( mmptr ) ; \
valid = ! get_user ( ( dst ) , ( wptr ) ) ; \
unuse_mm ( mmptr ) ; \
} \
2019-08-30 00:10:34 -04:00
pagefault_enable ( ) ; \
2017-08-15 23:00:17 -04:00
} \
valid ; \
} )
2018-02-06 20:32:38 -05:00
/* GPUVM API */
2018-08-29 12:33:52 -05:00
int amdgpu_amdkfd_gpuvm_create_process_vm ( struct kgd_dev * kgd , unsigned int pasid ,
void * * vm , void * * process_info ,
2018-05-18 22:18:16 +03:00
struct dma_fence * * ef ) ;
2018-03-15 17:27:43 -04:00
int amdgpu_amdkfd_gpuvm_acquire_process_vm ( struct kgd_dev * kgd ,
2018-08-29 12:33:52 -05:00
struct file * filp , unsigned int pasid ,
2018-05-18 22:18:16 +03:00
void * * vm , void * * process_info ,
struct dma_fence * * ef ) ;
2018-03-15 17:27:43 -04:00
void amdgpu_amdkfd_gpuvm_destroy_cb ( struct amdgpu_device * adev ,
2018-05-18 22:18:16 +03:00
struct amdgpu_vm * vm ) ;
2018-02-06 20:32:38 -05:00
void amdgpu_amdkfd_gpuvm_destroy_process_vm ( struct kgd_dev * kgd , void * vm ) ;
2018-08-27 15:18:36 -04:00
void amdgpu_amdkfd_gpuvm_release_process_vm ( struct kgd_dev * kgd , void * vm ) ;
2018-03-13 17:44:09 -04:00
uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir ( void * vm ) ;
2018-02-06 20:32:38 -05:00
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu (
struct kgd_dev * kgd , uint64_t va , uint64_t size ,
void * vm , struct kgd_mem * * mem ,
uint64_t * offset , uint32_t flags ) ;
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu (
2020-04-28 20:59:55 -04:00
struct kgd_dev * kgd , struct kgd_mem * mem , uint64_t * size ) ;
2018-02-06 20:32:38 -05:00
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu (
struct kgd_dev * kgd , struct kgd_mem * mem , void * vm ) ;
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu (
struct kgd_dev * kgd , struct kgd_mem * mem , void * vm ) ;
int amdgpu_amdkfd_gpuvm_sync_memory (
struct kgd_dev * kgd , struct kgd_mem * mem , bool intr ) ;
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel ( struct kgd_dev * kgd ,
struct kgd_mem * mem , void * * kptr , uint64_t * size ) ;
int amdgpu_amdkfd_gpuvm_restore_process_bos ( void * process_info ,
struct dma_fence * * ef ) ;
2018-07-11 22:32:49 -04:00
int amdgpu_amdkfd_gpuvm_get_vm_fault_info ( struct kgd_dev * kgd ,
struct kfd_vm_fault_info * info ) ;
2018-11-20 21:00:29 -05:00
int amdgpu_amdkfd_gpuvm_import_dmabuf ( struct kgd_dev * kgd ,
struct dma_buf * dmabuf ,
uint64_t va , void * vm ,
struct kgd_mem * * mem , uint64_t * size ,
uint64_t * mmap_offset ) ;
2018-02-06 20:32:38 -05:00
void amdgpu_amdkfd_gpuvm_init_mem_limits ( void ) ;
2018-11-19 20:05:54 -05:00
void amdgpu_amdkfd_unreserve_memory_limit ( struct amdgpu_bo * bo ) ;
2018-02-06 20:32:38 -05:00
2020-02-26 12:49:03 -05:00
int amdgpu_amdkfd_get_tile_config ( struct kgd_dev * kgd ,
struct tile_config * config ) ;
2018-12-13 11:57:35 -05:00
/* KGD2KFD callbacks */
2018-12-14 09:35:17 -05:00
int kgd2kfd_init ( void ) ;
2018-12-13 11:57:35 -05:00
void kgd2kfd_exit ( void ) ;
struct kfd_dev * kgd2kfd_probe ( struct kgd_dev * kgd , struct pci_dev * pdev ,
2019-09-03 17:55:30 -04:00
unsigned int asic_type , bool vf ) ;
2018-12-13 11:57:35 -05:00
bool kgd2kfd_device_init ( struct kfd_dev * kfd ,
2018-09-26 16:09:37 -04:00
struct drm_device * ddev ,
2018-12-13 11:57:35 -05:00
const struct kgd2kfd_shared_resources * gpu_resources ) ;
void kgd2kfd_device_exit ( struct kfd_dev * kfd ) ;
2020-01-21 19:11:03 -05:00
void kgd2kfd_suspend ( struct kfd_dev * kfd , bool run_pm ) ;
int kgd2kfd_resume ( struct kfd_dev * kfd , bool run_pm ) ;
2018-12-13 11:57:35 -05:00
int kgd2kfd_pre_reset ( struct kfd_dev * kfd ) ;
int kgd2kfd_post_reset ( struct kfd_dev * kfd ) ;
void kgd2kfd_interrupt ( struct kfd_dev * kfd , const void * ih_ring_entry ) ;
int kgd2kfd_quiesce_mm ( struct mm_struct * mm ) ;
int kgd2kfd_resume_mm ( struct mm_struct * mm ) ;
int kgd2kfd_schedule_evict_and_restore_process ( struct mm_struct * mm ,
struct dma_fence * fence ) ;
2019-01-11 14:38:51 -05:00
void kgd2kfd_set_sram_ecc_flag ( struct kfd_dev * kfd ) ;
2018-12-13 11:57:35 -05:00
2015-06-12 21:35:14 +03:00
# endif /* AMDGPU_AMDKFD_H_INCLUDED */