2016-08-02 11:32:41 +08:00
/*
* Copyright 2016 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
# ifndef __AMDGPU_TTM_H__
# define __AMDGPU_TTM_H__
2017-09-18 07:14:56 -04:00
# include "amdgpu.h"
2017-12-06 17:49:39 +01:00
# include <drm/gpu_scheduler.h>
2016-08-02 11:32:41 +08:00
2016-09-12 13:34:37 +02:00
# define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
# define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
# define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
2016-08-02 11:32:41 +08:00
2016-09-12 13:34:37 +02:00
# define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
# define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
# define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
2016-08-02 11:32:41 +08:00
2017-06-28 12:18:54 +02:00
# define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
# define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
2016-08-02 11:32:41 +08:00
struct amdgpu_mman {
struct ttm_bo_device bdev ;
bool mem_global_referenced ;
bool initialized ;
2018-02-27 10:01:59 -05:00
void __iomem * aper_base_kaddr ;
2016-08-02 11:32:41 +08:00
# if defined(CONFIG_DEBUG_FS)
2017-09-18 07:14:56 -04:00
struct dentry * debugfs_entries [ 8 ] ;
2016-08-02 11:32:41 +08:00
# endif
/* buffer handling */
const struct amdgpu_buffer_funcs * buffer_funcs ;
struct amdgpu_ring * buffer_funcs_ring ;
2018-03-01 11:09:15 +01:00
bool buffer_funcs_enabled ;
2017-06-30 11:05:54 +02:00
struct mutex gtt_window_lock ;
2016-08-02 11:32:41 +08:00
/* Scheduler entity for buffer moves */
2017-12-06 17:49:39 +01:00
struct drm_sched_entity entity ;
2016-08-02 11:32:41 +08:00
} ;
2017-10-03 15:41:56 -04:00
struct amdgpu_copy_mem {
struct ttm_buffer_object * bo ;
struct ttm_mem_reg * mem ;
unsigned long offset ;
} ;
2016-09-09 16:32:33 +02:00
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func ;
2016-08-24 15:51:49 +02:00
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func ;
2016-09-09 16:32:33 +02:00
2017-10-27 14:17:09 +02:00
bool amdgpu_gtt_mgr_has_gart_addr ( struct ttm_mem_reg * mem ) ;
2017-08-07 17:11:33 +02:00
uint64_t amdgpu_gtt_mgr_usage ( struct ttm_mem_type_manager * man ) ;
2017-10-16 16:50:32 +02:00
int amdgpu_gtt_mgr_recover ( struct ttm_mem_type_manager * man ) ;
2016-09-09 16:32:33 +02:00
2018-07-11 12:06:31 +02:00
u64 amdgpu_vram_mgr_bo_visible_size ( struct amdgpu_bo * bo ) ;
2017-08-07 17:46:49 +02:00
uint64_t amdgpu_vram_mgr_usage ( struct ttm_mem_type_manager * man ) ;
uint64_t amdgpu_vram_mgr_vis_usage ( struct ttm_mem_type_manager * man ) ;
2018-03-01 10:41:46 +01:00
int amdgpu_ttm_init ( struct amdgpu_device * adev ) ;
2018-04-06 14:54:10 -05:00
void amdgpu_ttm_late_init ( struct amdgpu_device * adev ) ;
2018-03-01 10:41:46 +01:00
void amdgpu_ttm_fini ( struct amdgpu_device * adev ) ;
2018-03-01 11:01:52 +01:00
void amdgpu_ttm_set_buffer_funcs_status ( struct amdgpu_device * adev ,
bool enable ) ;
2018-03-01 10:41:46 +01:00
2017-06-29 11:46:15 +02:00
int amdgpu_copy_buffer ( struct amdgpu_ring * ring , uint64_t src_offset ,
uint64_t dst_offset , uint32_t byte_count ,
2016-08-02 11:32:41 +08:00
struct reservation_object * resv ,
2017-06-29 11:46:15 +02:00
struct dma_fence * * fence , bool direct_submit ,
bool vm_needs_flush ) ;
2017-10-03 15:41:56 -04:00
int amdgpu_ttm_copy_mem_to_mem ( struct amdgpu_device * adev ,
struct amdgpu_copy_mem * src ,
struct amdgpu_copy_mem * dst ,
uint64_t size ,
struct reservation_object * resv ,
struct dma_fence * * f ) ;
2016-08-02 11:32:41 +08:00
int amdgpu_fill_buffer ( struct amdgpu_bo * bo ,
2018-01-24 19:58:45 +01:00
uint32_t src_data ,
2016-08-02 11:32:41 +08:00
struct reservation_object * resv ,
2016-10-25 13:00:45 +01:00
struct dma_fence * * fence ) ;
2016-08-02 11:32:41 +08:00
int amdgpu_mmap ( struct file * filp , struct vm_area_struct * vma ) ;
2017-10-27 15:43:14 +02:00
int amdgpu_ttm_alloc_gart ( struct ttm_buffer_object * bo ) ;
2017-10-16 16:50:32 +02:00
int amdgpu_ttm_recover_gart ( struct ttm_buffer_object * tbo ) ;
2016-09-05 17:00:57 +02:00
2017-09-08 17:19:19 +02:00
int amdgpu_ttm_tt_get_user_pages ( struct ttm_tt * ttm , struct page * * pages ) ;
void amdgpu_ttm_tt_set_user_pages ( struct ttm_tt * ttm , struct page * * pages ) ;
void amdgpu_ttm_tt_mark_user_pages ( struct ttm_tt * ttm ) ;
int amdgpu_ttm_tt_set_userptr ( struct ttm_tt * ttm , uint64_t addr ,
uint32_t flags ) ;
bool amdgpu_ttm_tt_has_userptr ( struct ttm_tt * ttm ) ;
struct mm_struct * amdgpu_ttm_tt_get_usermm ( struct ttm_tt * ttm ) ;
bool amdgpu_ttm_tt_affect_userptr ( struct ttm_tt * ttm , unsigned long start ,
unsigned long end ) ;
bool amdgpu_ttm_tt_userptr_invalidated ( struct ttm_tt * ttm ,
int * last_invalidated ) ;
bool amdgpu_ttm_tt_userptr_needs_pages ( struct ttm_tt * ttm ) ;
bool amdgpu_ttm_tt_is_readonly ( struct ttm_tt * ttm ) ;
2018-08-22 14:11:19 +02:00
uint64_t amdgpu_ttm_tt_pde_flags ( struct ttm_tt * ttm , struct ttm_mem_reg * mem ) ;
2017-09-08 17:19:19 +02:00
uint64_t amdgpu_ttm_tt_pte_flags ( struct amdgpu_device * adev , struct ttm_tt * ttm ,
struct ttm_mem_reg * mem ) ;
2016-08-02 11:32:41 +08:00
# endif