2015-04-20 16:55:21 -04:00
/*
* Copyright 2014 Advanced Micro Devices , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
*/
/*
* Authors :
* Christian König < christian . koenig @ amd . com >
*/
# include <linux/firmware.h>
# include <linux/module.h>
# include <linux/mmu_notifier.h>
2017-03-30 14:03:59 +02:00
# include <linux/interval_tree.h>
2015-04-20 16:55:21 -04:00
# include <drm/drmP.h>
# include <drm/drm.h>
# include "amdgpu.h"
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device * adev ;
struct mm_struct * mm ;
struct mmu_notifier mn ;
/* only used on destruction */
struct work_struct work ;
/* protected by adev->mn_lock */
struct hlist_node node ;
2016-03-18 19:29:51 +01:00
/* objects protected by lock */
2017-09-05 14:50:24 +02:00
struct rw_semaphore lock ;
2017-09-08 16:15:08 -07:00
struct rb_root_cached objects ;
2017-09-05 17:30:46 +02:00
struct mutex read_lock ;
atomic_t recursion ;
2015-04-20 16:55:21 -04:00
} ;
struct amdgpu_mn_node {
struct interval_tree_node it ;
struct list_head bos ;
} ;
/**
* amdgpu_mn_destroy - destroy the rmn
*
* @ work : previously sheduled work item
*
* Lazy destroys the notifier from a work item
*/
static void amdgpu_mn_destroy ( struct work_struct * work )
{
struct amdgpu_mn * rmn = container_of ( work , struct amdgpu_mn , work ) ;
struct amdgpu_device * adev = rmn - > adev ;
struct amdgpu_mn_node * node , * next_node ;
struct amdgpu_bo * bo , * next_bo ;
mutex_lock ( & adev - > mn_lock ) ;
2017-09-05 14:50:24 +02:00
down_write ( & rmn - > lock ) ;
2015-04-20 16:55:21 -04:00
hash_del ( & rmn - > node ) ;
2017-09-08 16:15:08 -07:00
rbtree_postorder_for_each_entry_safe ( node , next_node ,
& rmn - > objects . rb_root , it . rb ) {
2015-04-20 16:55:21 -04:00
list_for_each_entry_safe ( bo , next_bo , & node - > bos , mn_list ) {
bo - > mn = NULL ;
list_del_init ( & bo - > mn_list ) ;
}
kfree ( node ) ;
}
2017-09-05 14:50:24 +02:00
up_write ( & rmn - > lock ) ;
2016-02-16 15:29:23 -05:00
mutex_unlock ( & adev - > mn_lock ) ;
2016-01-14 00:35:08 -05:00
mmu_notifier_unregister_no_release ( & rmn - > mn , rmn - > mm ) ;
2015-04-20 16:55:21 -04:00
kfree ( rmn ) ;
}
/**
* amdgpu_mn_release - callback to notify about mm destruction
*
* @ mn : our notifier
* @ mn : the mm this callback is about
*
* Shedule a work item to lazy destroy our notifier .
*/
static void amdgpu_mn_release ( struct mmu_notifier * mn ,
struct mm_struct * mm )
{
struct amdgpu_mn * rmn = container_of ( mn , struct amdgpu_mn , mn ) ;
INIT_WORK ( & rmn - > work , amdgpu_mn_destroy ) ;
schedule_work ( & rmn - > work ) ;
}
2017-09-12 14:25:14 -04:00
/**
* amdgpu_mn_lock - take the write side lock for this mn
*/
void amdgpu_mn_lock ( struct amdgpu_mn * mn )
{
if ( mn )
down_write ( & mn - > lock ) ;
}
/**
* amdgpu_mn_unlock - drop the write side lock for this mn
*/
void amdgpu_mn_unlock ( struct amdgpu_mn * mn )
{
if ( mn )
up_write ( & mn - > lock ) ;
}
2017-09-05 17:30:46 +02:00
/**
* amdgpu_mn_read_lock - take the rmn read lock
*
* @ rmn : our notifier
*
* Take the rmn read side lock .
*/
static void amdgpu_mn_read_lock ( struct amdgpu_mn * rmn )
{
mutex_lock ( & rmn - > read_lock ) ;
if ( atomic_inc_return ( & rmn - > recursion ) = = 1 )
down_read_non_owner ( & rmn - > lock ) ;
mutex_unlock ( & rmn - > read_lock ) ;
}
/**
* amdgpu_mn_read_unlock - drop the rmn read lock
*
* @ rmn : our notifier
*
* Drop the rmn read side lock .
*/
static void amdgpu_mn_read_unlock ( struct amdgpu_mn * rmn )
{
if ( atomic_dec_return ( & rmn - > recursion ) = = 0 )
up_read_non_owner ( & rmn - > lock ) ;
}
2016-03-18 19:29:52 +01:00
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
* @ node : the node with the BOs to unmap
*
* We block for all BOs and unmap them by move them
* into system domain again .
*/
static void amdgpu_mn_invalidate_node ( struct amdgpu_mn_node * node ,
unsigned long start ,
unsigned long end )
{
struct amdgpu_bo * bo ;
long r ;
list_for_each_entry ( bo , & node - > bos , mn_list ) {
if ( ! amdgpu_ttm_tt_affect_userptr ( bo - > tbo . ttm , start , end ) )
continue ;
r = reservation_object_wait_timeout_rcu ( bo - > tbo . resv ,
true , false , MAX_SCHEDULE_TIMEOUT ) ;
if ( r < = 0 )
DRM_ERROR ( " (%ld) failed to wait for user bo \n " , r ) ;
2017-09-05 14:36:44 +02:00
amdgpu_ttm_tt_mark_user_pages ( bo - > tbo . ttm ) ;
2016-03-18 19:29:52 +01:00
}
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
*
* @ mn : our notifier
* @ mn : the mm this callback is about
* @ start : start of updated range
* @ end : end of updated range
*
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again .
*/
static void amdgpu_mn_invalidate_range_start ( struct mmu_notifier * mn ,
struct mm_struct * mm ,
unsigned long start ,
unsigned long end )
{
struct amdgpu_mn * rmn = container_of ( mn , struct amdgpu_mn , mn ) ;
struct interval_tree_node * it ;
/* notification is exclusive, but interval is inclusive */
end - = 1 ;
2017-09-05 17:30:46 +02:00
amdgpu_mn_read_lock ( rmn ) ;
2016-03-18 19:29:51 +01:00
2015-04-20 16:55:21 -04:00
it = interval_tree_iter_first ( & rmn - > objects , start , end ) ;
while ( it ) {
struct amdgpu_mn_node * node ;
node = container_of ( it , struct amdgpu_mn_node , it ) ;
it = interval_tree_iter_next ( it , start , end ) ;
2016-03-18 19:29:52 +01:00
amdgpu_mn_invalidate_node ( node , start , end ) ;
2015-04-20 16:55:21 -04:00
}
2017-09-05 17:30:46 +02:00
}
2016-03-18 19:29:51 +01:00
2017-09-05 17:30:46 +02:00
/**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @ mn : our notifier
* @ mn : the mm this callback is about
* @ start : start of updated range
* @ end : end of updated range
*
* Release the lock again to allow new command submissions .
*/
static void amdgpu_mn_invalidate_range_end ( struct mmu_notifier * mn ,
struct mm_struct * mm ,
unsigned long start ,
unsigned long end )
{
struct amdgpu_mn * rmn = container_of ( mn , struct amdgpu_mn , mn ) ;
2016-03-18 19:29:51 +01:00
2017-09-05 17:30:46 +02:00
amdgpu_mn_read_unlock ( rmn ) ;
2015-04-20 16:55:21 -04:00
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
. release = amdgpu_mn_release ,
. invalidate_range_start = amdgpu_mn_invalidate_range_start ,
2017-09-05 17:30:46 +02:00
. invalidate_range_end = amdgpu_mn_invalidate_range_end ,
2015-04-20 16:55:21 -04:00
} ;
/**
* amdgpu_mn_get - create notifier context
*
* @ adev : amdgpu device pointer
*
* Creates a notifier context for current - > mm .
*/
2017-09-12 14:25:14 -04:00
struct amdgpu_mn * amdgpu_mn_get ( struct amdgpu_device * adev )
2015-04-20 16:55:21 -04:00
{
struct mm_struct * mm = current - > mm ;
struct amdgpu_mn * rmn ;
int r ;
mutex_lock ( & adev - > mn_lock ) ;
2016-05-23 16:26:17 -07:00
if ( down_write_killable ( & mm - > mmap_sem ) ) {
mutex_unlock ( & adev - > mn_lock ) ;
return ERR_PTR ( - EINTR ) ;
}
2015-04-20 16:55:21 -04:00
hash_for_each_possible ( adev - > mn_hash , rmn , node , ( unsigned long ) mm )
if ( rmn - > mm = = mm )
goto release_locks ;
rmn = kzalloc ( sizeof ( * rmn ) , GFP_KERNEL ) ;
if ( ! rmn ) {
rmn = ERR_PTR ( - ENOMEM ) ;
goto release_locks ;
}
rmn - > adev = adev ;
rmn - > mm = mm ;
rmn - > mn . ops = & amdgpu_mn_ops ;
2017-09-05 14:50:24 +02:00
init_rwsem ( & rmn - > lock ) ;
2017-09-08 16:15:08 -07:00
rmn - > objects = RB_ROOT_CACHED ;
2017-09-05 17:30:46 +02:00
mutex_init ( & rmn - > read_lock ) ;
atomic_set ( & rmn - > recursion , 0 ) ;
2015-04-20 16:55:21 -04:00
r = __mmu_notifier_register ( & rmn - > mn , mm ) ;
if ( r )
goto free_rmn ;
hash_add ( adev - > mn_hash , & rmn - > node , ( unsigned long ) mm ) ;
release_locks :
up_write ( & mm - > mmap_sem ) ;
2016-02-16 15:29:23 -05:00
mutex_unlock ( & adev - > mn_lock ) ;
2015-04-20 16:55:21 -04:00
return rmn ;
free_rmn :
up_write ( & mm - > mmap_sem ) ;
2016-02-16 15:29:23 -05:00
mutex_unlock ( & adev - > mn_lock ) ;
2015-04-20 16:55:21 -04:00
kfree ( rmn ) ;
return ERR_PTR ( r ) ;
}
/**
* amdgpu_mn_register - register a BO for notifier updates
*
* @ bo : amdgpu buffer object
* @ addr : userptr addr we should monitor
*
* Registers an MMU notifier for the given BO at the specified address .
* Returns 0 on success , - ERRNO if anything goes wrong .
*/
int amdgpu_mn_register ( struct amdgpu_bo * bo , unsigned long addr )
{
unsigned long end = addr + amdgpu_bo_size ( bo ) - 1 ;
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2015-04-20 16:55:21 -04:00
struct amdgpu_mn * rmn ;
struct amdgpu_mn_node * node = NULL ;
struct list_head bos ;
struct interval_tree_node * it ;
rmn = amdgpu_mn_get ( adev ) ;
if ( IS_ERR ( rmn ) )
return PTR_ERR ( rmn ) ;
INIT_LIST_HEAD ( & bos ) ;
2017-09-05 14:50:24 +02:00
down_write ( & rmn - > lock ) ;
2015-04-20 16:55:21 -04:00
while ( ( it = interval_tree_iter_first ( & rmn - > objects , addr , end ) ) ) {
kfree ( node ) ;
node = container_of ( it , struct amdgpu_mn_node , it ) ;
interval_tree_remove ( & node - > it , & rmn - > objects ) ;
addr = min ( it - > start , addr ) ;
end = max ( it - > last , end ) ;
list_splice ( & node - > bos , & bos ) ;
}
if ( ! node ) {
node = kmalloc ( sizeof ( struct amdgpu_mn_node ) , GFP_KERNEL ) ;
if ( ! node ) {
2017-09-05 14:50:24 +02:00
up_write ( & rmn - > lock ) ;
2015-04-20 16:55:21 -04:00
return - ENOMEM ;
}
}
bo - > mn = rmn ;
node - > it . start = addr ;
node - > it . last = end ;
INIT_LIST_HEAD ( & node - > bos ) ;
list_splice ( & bos , & node - > bos ) ;
list_add ( & bo - > mn_list , & node - > bos ) ;
interval_tree_insert ( & node - > it , & rmn - > objects ) ;
2017-09-05 14:50:24 +02:00
up_write ( & rmn - > lock ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
* amdgpu_mn_unregister - unregister a BO for notifier updates
*
* @ bo : amdgpu buffer object
*
* Remove any registration of MMU notifier updates from the buffer object .
*/
void amdgpu_mn_unregister ( struct amdgpu_bo * bo )
{
2016-09-15 14:58:48 +02:00
struct amdgpu_device * adev = amdgpu_ttm_adev ( bo - > tbo . bdev ) ;
2016-02-16 15:29:23 -05:00
struct amdgpu_mn * rmn ;
2015-04-20 16:55:21 -04:00
struct list_head * head ;
2016-02-16 15:29:23 -05:00
mutex_lock ( & adev - > mn_lock ) ;
rmn = bo - > mn ;
if ( rmn = = NULL ) {
mutex_unlock ( & adev - > mn_lock ) ;
2015-04-20 16:55:21 -04:00
return ;
2016-02-16 15:29:23 -05:00
}
2015-04-20 16:55:21 -04:00
2017-09-05 14:50:24 +02:00
down_write ( & rmn - > lock ) ;
2016-02-09 16:13:37 +01:00
2015-04-20 16:55:21 -04:00
/* save the next list entry for later */
head = bo - > mn_list . next ;
bo - > mn = NULL ;
2017-08-01 22:34:55 -04:00
list_del_init ( & bo - > mn_list ) ;
2015-04-20 16:55:21 -04:00
if ( list_empty ( head ) ) {
struct amdgpu_mn_node * node ;
node = container_of ( head , struct amdgpu_mn_node , bos ) ;
interval_tree_remove ( & node - > it , & rmn - > objects ) ;
kfree ( node ) ;
}
2017-09-05 14:50:24 +02:00
up_write ( & rmn - > lock ) ;
2016-02-16 15:29:23 -05:00
mutex_unlock ( & adev - > mn_lock ) ;
2015-04-20 16:55:21 -04:00
}
2017-09-12 14:25:14 -04:00