2017-09-12 14:29:07 -04:00
/*
* Copyright 2017 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Christian König
*/
# ifndef __AMDGPU_MN_H__
# define __AMDGPU_MN_H__
2019-07-02 18:39:45 -04:00
# include <linux/types.h>
# include <linux/hmm.h>
# include <linux/rwsem.h>
# include <linux/workqueue.h>
# include <linux/interval_tree.h>
2017-09-12 14:29:07 -04:00
2018-03-23 15:32:28 -04:00
enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX ,
AMDGPU_MN_TYPE_HSA ,
} ;
2019-07-02 18:39:45 -04:00
/**
* struct amdgpu_mn
*
* @ adev : amdgpu device pointer
* @ mm : process address space
* @ type : type of MMU notifier
* @ work : destruction work item
* @ node : hash table node to find structure by adev and mn
* @ lock : rw semaphore protecting the notifier nodes
* @ objects : interval tree containing amdgpu_mn_nodes
* @ mirror : HMM mirror function support
*
* Data for each amdgpu device and process address space .
*/
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device * adev ;
struct mm_struct * mm ;
enum amdgpu_mn_type type ;
/* only used on destruction */
struct work_struct work ;
/* protected by adev->mn_lock */
struct hlist_node node ;
/* objects protected by lock */
struct rw_semaphore lock ;
struct rb_root_cached objects ;
# ifdef CONFIG_HMM_MIRROR
/* HMM mirror */
struct hmm_mirror mirror ;
# endif
} ;
2018-07-23 17:45:46 -04:00
# if defined(CONFIG_HMM_MIRROR)
2017-09-12 14:29:07 -04:00
void amdgpu_mn_lock ( struct amdgpu_mn * mn ) ;
void amdgpu_mn_unlock ( struct amdgpu_mn * mn ) ;
2018-03-23 15:32:28 -04:00
struct amdgpu_mn * amdgpu_mn_get ( struct amdgpu_device * adev ,
enum amdgpu_mn_type type ) ;
2017-09-12 14:29:07 -04:00
int amdgpu_mn_register ( struct amdgpu_bo * bo , unsigned long addr ) ;
void amdgpu_mn_unregister ( struct amdgpu_bo * bo ) ;
drm/amdgpu: replace get_user_pages with HMM mirror helpers
Use HMM helper function hmm_vma_fault() to get physical pages backing
userptr and start CPU page table update track of those pages. Then use
hmm_vma_range_done() to check if those pages are updated before
amdgpu_cs_submit for gfx or before user queues are resumed for kfd.
If userptr pages are updated, for gfx, amdgpu_cs_ioctl will restart
from scratch, for kfd, restore worker is rescheduled to retry.
HMM simplify the CPU page table concurrent update check, so remove
guptasklock, mmu_invalidations, last_set_pages fields from
amdgpu_ttm_tt struct.
HMM does not pin the page (increase page ref count), so remove related
operations like release_pages(), put_page(), mark_page_dirty().
Signed-off-by: Philip Yang <Philip.Yang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-12-13 15:35:28 -05:00
void amdgpu_hmm_init_range ( struct hmm_range * range ) ;
2017-09-12 14:29:07 -04:00
# else
static inline void amdgpu_mn_lock ( struct amdgpu_mn * mn ) { }
static inline void amdgpu_mn_unlock ( struct amdgpu_mn * mn ) { }
2018-03-23 15:32:28 -04:00
static inline struct amdgpu_mn * amdgpu_mn_get ( struct amdgpu_device * adev ,
enum amdgpu_mn_type type )
2017-09-12 14:29:07 -04:00
{
return NULL ;
}
static inline int amdgpu_mn_register ( struct amdgpu_bo * bo , unsigned long addr )
{
2019-03-04 10:37:55 -05:00
DRM_WARN_ONCE ( " HMM_MIRROR kernel config option is not enabled, "
" add CONFIG_ZONE_DEVICE=y in config file to fix this \n " ) ;
2017-09-12 14:29:07 -04:00
return - ENODEV ;
}
static inline void amdgpu_mn_unregister ( struct amdgpu_bo * bo ) { }
# endif
# endif