2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-05-17 16:19:32 -04:00
/*
* Copyright ( C ) 2016 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
2021-10-14 16:51:34 +08:00
# include <linux/vmalloc.h>
2021-12-02 12:34:00 -08:00
# include <linux/sched/mm.h>
2021-10-14 16:51:34 +08:00
2016-05-17 16:19:32 -04:00
# include "msm_drv.h"
# include "msm_gem.h"
2020-11-16 09:48:50 -08:00
# include "msm_gpu.h"
2020-09-01 08:41:55 -07:00
# include "msm_gpu_trace.h"
2016-05-17 16:19:32 -04:00
2021-04-05 10:45:31 -07:00
/* Default disabled for now until it has some more testing on the different
* iommu combinations that can be paired with the driver :
*/
bool enable_eviction = false ;
MODULE_PARM_DESC ( enable_eviction , " Enable swappable GEM buffers " ) ;
module_param ( enable_eviction , bool , 0600 ) ;
static bool can_swap ( void )
{
return enable_eviction & & get_nr_swap_pages ( ) > 0 ;
}
2016-05-17 16:19:32 -04:00
static unsigned long
msm_gem_shrinker_count ( struct shrinker * shrinker , struct shrink_control * sc )
{
struct msm_drm_private * priv =
container_of ( shrinker , struct msm_drm_private , shrinker ) ;
2021-04-05 10:45:31 -07:00
unsigned count = priv - > shrinkable_count ;
if ( can_swap ( ) )
count + = priv - > evictable_count ;
return count ;
2016-05-17 16:19:32 -04:00
}
2021-04-05 10:45:25 -07:00
static bool
purge ( struct msm_gem_object * msm_obj )
{
if ( ! is_purgeable ( msm_obj ) )
return false ;
/*
* This will move the obj out of still_in_list to
* the purged list
*/
msm_gem_purge ( & msm_obj - > base ) ;
return true ;
}
2021-04-05 10:45:31 -07:00
static bool
evict ( struct msm_gem_object * msm_obj )
{
if ( is_unevictable ( msm_obj ) )
return false ;
msm_gem_evict ( & msm_obj - > base ) ;
return true ;
}
2016-05-17 16:19:32 -04:00
static unsigned long
2021-04-05 10:45:25 -07:00
scan ( struct msm_drm_private * priv , unsigned nr_to_scan , struct list_head * list ,
bool ( * shrink ) ( struct msm_gem_object * msm_obj ) )
2016-05-17 16:19:32 -04:00
{
2021-04-05 10:45:25 -07:00
unsigned freed = 0 ;
2021-04-02 14:12:26 -07:00
struct list_head still_in_list ;
2016-05-17 16:19:32 -04:00
2021-04-02 14:12:26 -07:00
INIT_LIST_HEAD ( & still_in_list ) ;
2020-10-23 09:51:14 -07:00
mutex_lock ( & priv - > mm_lock ) ;
2021-04-05 10:45:25 -07:00
while ( freed < nr_to_scan ) {
2021-04-02 14:12:26 -07:00
struct msm_gem_object * msm_obj = list_first_entry_or_null (
2021-04-05 10:45:25 -07:00
list , typeof ( * msm_obj ) , mm_list ) ;
2021-04-02 14:12:26 -07:00
if ( ! msm_obj )
2016-05-17 16:19:32 -04:00
break ;
2021-04-02 14:12:26 -07:00
list_move_tail ( & msm_obj - > mm_list , & still_in_list ) ;
/*
* If it is in the process of being freed , msm_gem_free_object
* can be blocked on mm_lock waiting to remove it . So just
* skip it .
2021-03-31 18:27:19 -07:00
*/
2021-04-02 14:12:26 -07:00
if ( ! kref_get_unless_zero ( & msm_obj - > base . refcount ) )
2020-10-23 09:51:07 -07:00
continue ;
2021-04-02 14:12:26 -07:00
/*
* Now that we own a reference , we can drop mm_lock for the
* rest of the loop body , to reduce contention with the
2021-04-06 08:18:16 -07:00
* retire_submit path ( which could make more objects purgeable )
2021-04-02 14:12:26 -07:00
*/
mutex_unlock ( & priv - > mm_lock ) ;
/*
* Note that this still needs to be trylock , since we can
* hit shrinker in response to trying to get backing pages
* for this obj ( ie . while it ' s lock is already held )
*/
if ( ! msm_gem_trylock ( & msm_obj - > base ) )
goto tail ;
2021-04-05 10:45:25 -07:00
if ( shrink ( msm_obj ) )
2016-05-17 16:19:32 -04:00
freed + = msm_obj - > base . size > > PAGE_SHIFT ;
2021-04-05 10:45:25 -07:00
2020-10-23 09:51:07 -07:00
msm_gem_unlock ( & msm_obj - > base ) ;
2021-04-02 14:12:26 -07:00
tail :
drm_gem_object_put ( & msm_obj - > base ) ;
mutex_lock ( & priv - > mm_lock ) ;
2016-05-17 16:19:32 -04:00
}
2021-04-05 10:45:25 -07:00
list_splice_tail ( & still_in_list , list ) ;
2020-10-23 09:51:14 -07:00
mutex_unlock ( & priv - > mm_lock ) ;
2021-04-05 10:45:25 -07:00
return freed ;
}
static unsigned long
msm_gem_shrinker_scan ( struct shrinker * shrinker , struct shrink_control * sc )
{
struct msm_drm_private * priv =
container_of ( shrinker , struct msm_drm_private , shrinker ) ;
unsigned long freed ;
freed = scan ( priv , sc - > nr_to_scan , & priv - > inactive_dontneed , purge ) ;
if ( freed > 0 )
2020-09-01 08:41:55 -07:00
trace_msm_gem_purge ( freed < < PAGE_SHIFT ) ;
2016-05-17 16:19:32 -04:00
2021-04-05 10:45:31 -07:00
if ( can_swap ( ) & & freed < sc - > nr_to_scan ) {
int evicted = scan ( priv , sc - > nr_to_scan - freed ,
& priv - > inactive_willneed , evict ) ;
if ( evicted > 0 )
trace_msm_gem_evict ( evicted < < PAGE_SHIFT ) ;
freed + = evicted ;
}
2021-04-05 10:45:25 -07:00
return ( freed > 0 ) ? freed : SHRINK_STOP ;
2016-05-17 16:19:32 -04:00
}
2021-06-14 08:06:18 -07:00
# ifdef CONFIG_DEBUG_FS
unsigned long
msm_gem_shrinker_shrink ( struct drm_device * dev , unsigned long nr_to_scan )
{
struct msm_drm_private * priv = dev - > dev_private ;
struct shrink_control sc = {
. nr_to_scan = nr_to_scan ,
} ;
int ret ;
fs_reclaim_acquire ( GFP_KERNEL ) ;
ret = msm_gem_shrinker_scan ( & priv - > shrinker , & sc ) ;
fs_reclaim_release ( GFP_KERNEL ) ;
return ret ;
}
# endif
2020-11-16 09:48:50 -08:00
/* since we don't know any better, lets bail after a few
* and if necessary the shrinker will be invoked again .
* Seems better than unmapping * everything *
*/
static const int vmap_shrink_limit = 15 ;
2021-04-05 10:45:25 -07:00
static bool
vmap_shrink ( struct msm_gem_object * msm_obj )
2016-05-27 11:16:28 -04:00
{
2021-04-05 10:45:25 -07:00
if ( ! is_vunmapable ( msm_obj ) )
return false ;
2016-05-27 11:16:28 -04:00
2021-04-05 10:45:25 -07:00
msm_gem_vunmap ( & msm_obj - > base ) ;
2020-10-23 09:51:07 -07:00
2021-04-05 10:45:25 -07:00
return true ;
2020-11-16 09:48:50 -08:00
}
static int
msm_gem_shrinker_vmap ( struct notifier_block * nb , unsigned long event , void * ptr )
{
struct msm_drm_private * priv =
container_of ( nb , struct msm_drm_private , vmap_notifier ) ;
struct list_head * mm_lists [ ] = {
2020-11-16 09:48:51 -08:00
& priv - > inactive_dontneed ,
& priv - > inactive_willneed ,
2020-11-16 09:48:50 -08:00
priv - > gpu ? & priv - > gpu - > active_list : NULL ,
NULL ,
} ;
unsigned idx , unmapped = 0 ;
2021-04-05 10:45:25 -07:00
for ( idx = 0 ; mm_lists [ idx ] & & unmapped < vmap_shrink_limit ; idx + + ) {
unmapped + = scan ( priv , vmap_shrink_limit - unmapped ,
mm_lists [ idx ] , vmap_shrink ) ;
2016-05-27 11:16:28 -04:00
}
* ( unsigned long * ) ptr + = unmapped ;
if ( unmapped > 0 )
2020-09-01 08:41:55 -07:00
trace_msm_gem_purge_vmaps ( unmapped ) ;
2016-05-27 11:16:28 -04:00
return NOTIFY_DONE ;
}
2016-05-17 16:19:32 -04:00
/**
* msm_gem_shrinker_init - Initialize msm shrinker
2020-11-24 19:37:45 +00:00
* @ dev : drm device
2016-05-17 16:19:32 -04:00
*
* This function registers and sets up the msm shrinker .
*/
void msm_gem_shrinker_init ( struct drm_device * dev )
{
struct msm_drm_private * priv = dev - > dev_private ;
priv - > shrinker . count_objects = msm_gem_shrinker_count ;
priv - > shrinker . scan_objects = msm_gem_shrinker_scan ;
priv - > shrinker . seeks = DEFAULT_SEEKS ;
WARN_ON ( register_shrinker ( & priv - > shrinker ) ) ;
2016-05-27 11:16:28 -04:00
priv - > vmap_notifier . notifier_call = msm_gem_shrinker_vmap ;
WARN_ON ( register_vmap_purge_notifier ( & priv - > vmap_notifier ) ) ;
2016-05-17 16:19:32 -04:00
}
/**
* msm_gem_shrinker_cleanup - Clean up msm shrinker
2020-11-24 19:37:45 +00:00
* @ dev : drm device
2016-05-17 16:19:32 -04:00
*
* This function unregisters the msm shrinker .
*/
void msm_gem_shrinker_cleanup ( struct drm_device * dev )
{
struct msm_drm_private * priv = dev - > dev_private ;
2016-11-03 17:36:18 +05:30
if ( priv - > shrinker . nr_deferred ) {
WARN_ON ( unregister_vmap_purge_notifier ( & priv - > vmap_notifier ) ) ;
unregister_shrinker ( & priv - > shrinker ) ;
}
2016-05-17 16:19:32 -04:00
}