2021-06-10 09:01:49 +02:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
2022-02-10 17:45:49 +02:00
# include <linux/shmem_fs.h>
2021-06-10 09:01:49 +02:00
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2022-02-28 12:36:04 +00:00
# include <drm/drm_buddy.h>
2021-06-10 09:01:49 +02:00
# include "i915_drv.h"
2022-02-28 12:36:04 +00:00
# include "i915_ttm_buddy_manager.h"
2021-06-10 09:01:49 +02:00
# include "intel_memory_region.h"
# include "intel_region_ttm.h"
2021-09-22 08:25:22 +02:00
# include "gem/i915_gem_mman.h"
2021-06-10 09:01:49 +02:00
# include "gem/i915_gem_object.h"
# include "gem/i915_gem_region.h"
# include "gem/i915_gem_ttm.h"
2021-11-04 12:07:17 +01:00
# include "gem/i915_gem_ttm_move.h"
2021-09-22 08:25:22 +02:00
# include "gem/i915_gem_ttm_pm.h"
2021-06-10 09:01:49 +02:00
# define I915_TTM_PRIO_PURGE 0
# define I915_TTM_PRIO_NO_PAGES 1
# define I915_TTM_PRIO_HAS_PAGES 2
2022-02-28 12:36:04 +00:00
# define I915_TTM_PRIO_NEEDS_CPU_ACCESS 3
2021-06-10 09:01:49 +02:00
2021-06-16 16:24:57 +01:00
/*
* Size of struct ttm_place vector in on - stack struct ttm_placement allocs
*/
# define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
2021-06-10 09:01:49 +02:00
/**
* struct i915_ttm_tt - TTM page vector with additional private information
* @ ttm : The base TTM page vector .
* @ dev : The struct device used for dma mapping and unmapping .
2021-11-01 13:24:44 +01:00
* @ cached_rsgt : The cached scatter - gather table .
2021-10-18 10:10:49 +01:00
* @ is_shmem : Set if using shmem .
* @ filp : The shmem file , if using shmem backend .
2021-06-10 09:01:49 +02:00
*
* Note that DMA may be going on right up to the point where the page -
* vector is unpopulated in delayed destroy . Hence keep the
* scatter - gather table mapped and cached up to that point . This is
* different from the cached gem object io scatter - gather table which
* doesn ' t have an associated dma mapping .
*/
struct i915_ttm_tt {
struct ttm_tt ttm ;
struct device * dev ;
2021-11-01 13:24:44 +01:00
struct i915_refct_sgt cached_rsgt ;
2021-10-18 10:10:49 +01:00
bool is_shmem ;
struct file * filp ;
2021-06-10 09:01:49 +02:00
} ;
2021-06-16 16:24:57 +01:00
static const struct ttm_place sys_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. mem_type = I915_PL_SYSTEM ,
. flags = 0 ,
2021-06-10 09:01:49 +02:00
} ;
static struct ttm_placement i915_sys_placement = {
. num_placement = 1 ,
2021-06-16 16:24:57 +01:00
. placement = & sys_placement_flags ,
2021-06-10 09:01:49 +02:00
. num_busy_placement = 1 ,
2021-06-16 16:24:57 +01:00
. busy_placement = & sys_placement_flags ,
2021-06-10 09:01:49 +02:00
} ;
2021-09-22 08:25:22 +02:00
/**
* i915_ttm_sys_placement - Return the struct ttm_placement to be
* used for an object in system memory .
*
* Rather than making the struct extern , use this
* function .
*
* Return : A pointer to a static variable for sys placement .
*/
struct ttm_placement * i915_ttm_sys_placement ( void )
{
return & i915_sys_placement ;
}
2021-06-18 15:25:15 +02:00
static int i915_ttm_err_to_gem ( int err )
{
/* Fastpath */
if ( likely ( ! err ) )
return 0 ;
switch ( err ) {
case - EBUSY :
/*
* TTM likes to convert - EDEADLK to - EBUSY , and wants us to
* restart the operation , since we don ' t record the contending
* lock . We use - EAGAIN to restart .
*/
return - EAGAIN ;
case - ENOSPC :
/*
* Memory type / region is full , and we can ' t evict .
* Except possibly system , that returns - ENOMEM ;
*/
return - ENXIO ;
default :
break ;
}
return err ;
}
2021-06-16 16:24:57 +01:00
static enum ttm_caching
i915_ttm_select_tt_caching ( const struct drm_i915_gem_object * obj )
{
/*
2021-10-18 10:10:54 +01:00
* Objects only allowed in system get cached cpu - mappings , or when
* evicting lmem - only buffers to system for swapping . Other objects get
* WC mapping for now . Even if in system .
2021-06-16 16:24:57 +01:00
*/
2021-10-18 10:10:54 +01:00
if ( obj - > mm . n_placements < = 1 )
2021-06-16 16:24:57 +01:00
return ttm_cached ;
return ttm_write_combined ;
}
static void
i915_ttm_place_from_region ( const struct intel_memory_region * mr ,
2021-06-16 16:24:58 +01:00
struct ttm_place * place ,
unsigned int flags )
2021-06-16 16:24:57 +01:00
{
memset ( place , 0 , sizeof ( * place ) ) ;
place - > mem_type = intel_region_to_ttm_type ( mr ) ;
2021-06-16 16:24:58 +01:00
if ( flags & I915_BO_ALLOC_CONTIGUOUS )
2022-02-25 14:54:58 +00:00
place - > flags | = TTM_PL_FLAG_CONTIGUOUS ;
2022-02-25 14:54:57 +00:00
if ( mr - > io_size & & mr - > io_size < mr - > total ) {
2022-02-25 14:54:58 +00:00
if ( flags & I915_BO_ALLOC_GPU_ONLY ) {
place - > flags | = TTM_PL_FLAG_TOPDOWN ;
} else {
place - > fpfn = 0 ;
place - > lpfn = mr - > io_size > > PAGE_SHIFT ;
}
2022-02-25 14:54:57 +00:00
}
2021-06-16 16:24:57 +01:00
}
static void
i915_ttm_placement_from_obj ( const struct drm_i915_gem_object * obj ,
struct ttm_place * requested ,
struct ttm_place * busy ,
struct ttm_placement * placement )
{
unsigned int num_allowed = obj - > mm . n_placements ;
2021-06-16 16:24:58 +01:00
unsigned int flags = obj - > flags ;
2021-06-16 16:24:57 +01:00
unsigned int i ;
placement - > num_placement = 1 ;
i915_ttm_place_from_region ( num_allowed ? obj - > mm . placements [ 0 ] :
2021-06-16 16:24:58 +01:00
obj - > mm . region , requested , flags ) ;
2021-06-16 16:24:57 +01:00
/* Cache this on object? */
placement - > num_busy_placement = num_allowed ;
for ( i = 0 ; i < placement - > num_busy_placement ; + + i )
2021-06-16 16:24:58 +01:00
i915_ttm_place_from_region ( obj - > mm . placements [ i ] , busy + i , flags ) ;
2021-06-16 16:24:57 +01:00
if ( num_allowed = = 0 ) {
* busy = * requested ;
placement - > num_busy_placement = 1 ;
}
placement - > placement = requested ;
placement - > busy_placement = busy ;
}
2021-10-18 10:10:49 +01:00
static int i915_ttm_tt_shmem_populate ( struct ttm_device * bdev ,
struct ttm_tt * ttm ,
struct ttm_operation_ctx * ctx )
{
struct drm_i915_private * i915 = container_of ( bdev , typeof ( * i915 ) , bdev ) ;
struct intel_memory_region * mr = i915 - > mm . regions [ INTEL_MEMORY_SYSTEM ] ;
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
const unsigned int max_segment = i915_sg_segment_size ( ) ;
2021-12-10 19:50:05 +00:00
const size_t size = ( size_t ) ttm - > num_pages < < PAGE_SHIFT ;
2021-10-18 10:10:49 +01:00
struct file * filp = i915_tt - > filp ;
struct sgt_iter sgt_iter ;
struct sg_table * st ;
struct page * page ;
unsigned long i ;
int err ;
if ( ! filp ) {
struct address_space * mapping ;
gfp_t mask ;
filp = shmem_file_setup ( " i915-shmem-tt " , size , VM_NORESERVE ) ;
if ( IS_ERR ( filp ) )
return PTR_ERR ( filp ) ;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE ;
mapping = filp - > f_mapping ;
mapping_set_gfp_mask ( mapping , mask ) ;
GEM_BUG_ON ( ! ( mapping_gfp_mask ( mapping ) & __GFP_RECLAIM ) ) ;
i915_tt - > filp = filp ;
}
2021-11-01 13:24:44 +01:00
st = & i915_tt - > cached_rsgt . table ;
err = shmem_sg_alloc_table ( i915 , st , size , mr , filp - > f_mapping ,
max_segment ) ;
if ( err )
return err ;
2021-10-18 10:10:49 +01:00
2021-11-01 13:24:44 +01:00
err = dma_map_sgtable ( i915_tt - > dev , st , DMA_BIDIRECTIONAL ,
DMA_ATTR_SKIP_CPU_SYNC ) ;
if ( err )
2021-10-18 10:10:49 +01:00
goto err_free_st ;
i = 0 ;
for_each_sgt_page ( page , sgt_iter , st )
ttm - > pages [ i + + ] = page ;
if ( ttm - > page_flags & TTM_TT_FLAG_SWAPPED )
ttm - > page_flags & = ~ TTM_TT_FLAG_SWAPPED ;
return 0 ;
err_free_st :
2021-11-01 13:24:44 +01:00
shmem_sg_free_table ( st , filp - > f_mapping , false , false ) ;
2021-10-18 10:10:49 +01:00
return err ;
}
static void i915_ttm_tt_shmem_unpopulate ( struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
bool backup = ttm - > page_flags & TTM_TT_FLAG_SWAPPED ;
2021-11-01 13:24:44 +01:00
struct sg_table * st = & i915_tt - > cached_rsgt . table ;
shmem_sg_free_table ( st , file_inode ( i915_tt - > filp ) - > i_mapping ,
backup , backup ) ;
}
2021-10-18 10:10:49 +01:00
2021-11-01 13:24:44 +01:00
static void i915_ttm_tt_release ( struct kref * ref )
{
struct i915_ttm_tt * i915_tt =
container_of ( ref , typeof ( * i915_tt ) , cached_rsgt . kref ) ;
struct sg_table * st = & i915_tt - > cached_rsgt . table ;
2021-10-18 10:10:49 +01:00
2021-11-01 13:24:44 +01:00
GEM_WARN_ON ( st - > sgl ) ;
kfree ( i915_tt ) ;
2021-10-18 10:10:49 +01:00
}
2021-11-01 13:24:44 +01:00
static const struct i915_refct_sgt_ops tt_rsgt_ops = {
. release = i915_ttm_tt_release
} ;
2021-06-10 09:01:49 +02:00
static struct ttm_tt * i915_ttm_tt_create ( struct ttm_buffer_object * bo ,
uint32_t page_flags )
{
struct ttm_resource_manager * man =
ttm_manager_type ( bo - > bdev , bo - > resource - > mem_type ) ;
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
2021-11-22 22:45:53 +01:00
enum ttm_caching caching ;
2021-06-10 09:01:49 +02:00
struct i915_ttm_tt * i915_tt ;
int ret ;
2021-11-22 22:45:53 +01:00
if ( ! obj )
return NULL ;
2021-06-10 09:01:49 +02:00
i915_tt = kzalloc ( sizeof ( * i915_tt ) , GFP_KERNEL ) ;
if ( ! i915_tt )
return NULL ;
if ( obj - > flags & I915_BO_ALLOC_CPU_CLEAR & &
man - > use_tt )
2021-09-29 14:26:27 +01:00
page_flags | = TTM_TT_FLAG_ZERO_ALLOC ;
2021-06-10 09:01:49 +02:00
2021-11-22 22:45:53 +01:00
caching = i915_ttm_select_tt_caching ( obj ) ;
2021-10-18 10:10:49 +01:00
if ( i915_gem_object_is_shrinkable ( obj ) & & caching = = ttm_cached ) {
page_flags | = TTM_TT_FLAG_EXTERNAL |
TTM_TT_FLAG_EXTERNAL_MAPPABLE ;
i915_tt - > is_shmem = true ;
2021-06-10 09:01:49 +02:00
}
2021-10-18 10:10:49 +01:00
ret = ttm_tt_init ( & i915_tt - > ttm , bo , page_flags , caching ) ;
if ( ret )
goto err_free ;
2021-11-01 13:24:44 +01:00
__i915_refct_sgt_init ( & i915_tt - > cached_rsgt , bo - > base . size ,
& tt_rsgt_ops ) ;
2021-06-10 09:01:49 +02:00
i915_tt - > dev = obj - > base . dev - > dev ;
return & i915_tt - > ttm ;
2021-10-18 10:10:49 +01:00
err_free :
kfree ( i915_tt ) ;
return NULL ;
}
static int i915_ttm_tt_populate ( struct ttm_device * bdev ,
struct ttm_tt * ttm ,
struct ttm_operation_ctx * ctx )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
if ( i915_tt - > is_shmem )
return i915_ttm_tt_shmem_populate ( bdev , ttm , ctx ) ;
return ttm_pool_alloc ( & bdev - > pool , ttm , ctx ) ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_tt_unpopulate ( struct ttm_device * bdev , struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
2021-11-01 13:24:44 +01:00
struct sg_table * st = & i915_tt - > cached_rsgt . table ;
if ( st - > sgl )
dma_unmap_sgtable ( i915_tt - > dev , st , DMA_BIDIRECTIONAL , 0 ) ;
2021-06-10 09:01:49 +02:00
2021-10-18 10:10:49 +01:00
if ( i915_tt - > is_shmem ) {
i915_ttm_tt_shmem_unpopulate ( ttm ) ;
} else {
2021-11-01 13:24:44 +01:00
sg_free_table ( st ) ;
2021-10-18 10:10:49 +01:00
ttm_pool_free ( & bdev - > pool , ttm ) ;
2021-06-10 09:01:49 +02:00
}
}
static void i915_ttm_tt_destroy ( struct ttm_device * bdev , struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
2021-10-18 10:10:49 +01:00
if ( i915_tt - > filp )
fput ( i915_tt - > filp ) ;
2021-06-15 14:24:08 +02:00
ttm_tt_fini ( ttm ) ;
2021-11-01 13:24:44 +01:00
i915_refct_sgt_put ( & i915_tt - > cached_rsgt ) ;
2021-06-10 09:01:49 +02:00
}
static bool i915_ttm_eviction_valuable ( struct ttm_buffer_object * bo ,
const struct ttm_place * place )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
2022-02-28 12:36:04 +00:00
struct ttm_resource * res = bo - > resource ;
2021-06-10 09:01:49 +02:00
2021-11-22 22:45:53 +01:00
if ( ! obj )
return false ;
2021-10-18 10:10:49 +01:00
/*
* EXTERNAL objects should never be swapped out by TTM , instead we need
* to handle that ourselves . TTM will already skip such objects for us ,
* but we would like to avoid grabbing locks for no good reason .
*/
if ( bo - > ttm & & bo - > ttm - > page_flags & TTM_TT_FLAG_EXTERNAL )
2021-11-22 10:41:47 +03:00
return false ;
2021-10-18 10:10:49 +01:00
2021-06-10 09:01:49 +02:00
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
2022-02-28 12:36:04 +00:00
if ( ! i915_gem_object_evictable ( obj ) )
return false ;
switch ( res - > mem_type ) {
case I915_PL_LMEM0 : {
struct ttm_resource_manager * man =
ttm_manager_type ( bo - > bdev , res - > mem_type ) ;
struct i915_ttm_buddy_resource * bman_res =
to_ttm_buddy_resource ( res ) ;
struct drm_buddy * mm = bman_res - > mm ;
struct drm_buddy_block * block ;
if ( ! place - > fpfn & & ! place - > lpfn )
return true ;
GEM_BUG_ON ( ! place - > lpfn ) ;
/*
* If we just want something mappable then we can quickly check
* if the current victim resource is using any of the CPU
* visible portion .
*/
if ( ! place - > fpfn & &
place - > lpfn = = i915_ttm_buddy_man_visible_size ( man ) )
return bman_res - > used_visible_size > 0 ;
/* Real range allocation */
list_for_each_entry ( block , & bman_res - > blocks , link ) {
unsigned long fpfn =
drm_buddy_block_offset ( block ) > > PAGE_SHIFT ;
unsigned long lpfn = fpfn +
( drm_buddy_block_size ( mm , block ) > > PAGE_SHIFT ) ;
if ( place - > fpfn < lpfn & & place - > lpfn > fpfn )
return true ;
}
return false ;
} default :
break ;
}
return true ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_evict_flags ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
* placement = i915_sys_placement ;
}
2021-11-04 12:07:17 +01:00
/**
* i915_ttm_free_cached_io_rsgt - Free object cached LMEM information
* @ obj : The GEM object
* This function frees any LMEM - related information that is cached on
* the object . For example the radix tree for fast page lookup and the
* cached refcounted sg - table
*/
void i915_ttm_free_cached_io_rsgt ( struct drm_i915_gem_object * obj )
2021-06-10 09:01:49 +02:00
{
2021-06-10 09:01:52 +02:00
struct radix_tree_iter iter ;
void __rcu * * slot ;
2021-11-01 13:24:44 +01:00
if ( ! obj - > ttm . cached_io_rsgt )
2021-06-10 09:01:52 +02:00
return ;
rcu_read_lock ( ) ;
radix_tree_for_each_slot ( slot , & obj - > ttm . get_io_page . radix , & iter , 0 )
radix_tree_delete ( & obj - > ttm . get_io_page . radix , iter . index ) ;
rcu_read_unlock ( ) ;
2021-11-01 13:24:44 +01:00
i915_refct_sgt_put ( obj - > ttm . cached_io_rsgt ) ;
obj - > ttm . cached_io_rsgt = NULL ;
2021-06-10 09:01:49 +02:00
}
2021-11-04 12:07:17 +01:00
/**
* i915_ttm_purge - Clear an object of its memory
* @ obj : The object
*
* This function is called to clear an object of it ' s memory when it is
* marked as not needed anymore .
*
* Return : 0 on success , negative error code on failure .
*/
int i915_ttm_purge ( struct drm_i915_gem_object * obj )
2021-06-10 09:01:49 +02:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-10-18 10:10:49 +01:00
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
2021-06-10 09:01:49 +02:00
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
struct ttm_placement place = { } ;
int ret ;
if ( obj - > mm . madv = = __I915_MADV_PURGED )
2021-10-18 10:10:49 +01:00
return 0 ;
2021-06-10 09:01:49 +02:00
ret = ttm_bo_validate ( bo , & place , & ctx ) ;
2021-10-18 10:10:49 +01:00
if ( ret )
return ret ;
if ( bo - > ttm & & i915_tt - > filp ) {
/*
* The below fput ( which eventually calls shmem_truncate ) might
* be delayed by worker , so when directly called to purge the
* pages ( like by the shrinker ) we should try to be more
* aggressive and release the pages immediately .
*/
shmem_truncate_range ( file_inode ( i915_tt - > filp ) ,
0 , ( loff_t ) - 1 ) ;
fput ( fetch_and_zero ( & i915_tt - > filp ) ) ;
2021-06-10 09:01:49 +02:00
}
2021-10-18 10:10:49 +01:00
obj - > write_domain = 0 ;
obj - > read_domains = 0 ;
i915_ttm_adjust_gem_after_move ( obj ) ;
2021-11-01 13:24:44 +01:00
i915_ttm_free_cached_io_rsgt ( obj ) ;
2021-10-18 10:10:49 +01:00
obj - > mm . madv = __I915_MADV_PURGED ;
2021-11-04 12:07:17 +01:00
2021-10-18 10:10:49 +01:00
return 0 ;
}
2021-12-15 11:07:46 +00:00
static int i915_ttm_shrink ( struct drm_i915_gem_object * obj , unsigned int flags )
2021-10-18 10:10:49 +01:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
struct ttm_operation_ctx ctx = {
. interruptible = true ,
2021-12-15 11:07:46 +00:00
. no_wait_gpu = flags & I915_GEM_OBJECT_SHRINK_NO_GPU_WAIT ,
2021-10-18 10:10:49 +01:00
} ;
struct ttm_placement place = { } ;
int ret ;
if ( ! bo - > ttm | | bo - > resource - > mem_type ! = TTM_PL_SYSTEM )
return 0 ;
GEM_BUG_ON ( ! i915_tt - > is_shmem ) ;
if ( ! i915_tt - > filp )
return 0 ;
2021-11-22 22:45:52 +01:00
ret = ttm_bo_wait_ctx ( bo , & ctx ) ;
if ( ret )
return ret ;
2021-10-18 10:10:49 +01:00
switch ( obj - > mm . madv ) {
case I915_MADV_DONTNEED :
return i915_ttm_purge ( obj ) ;
case __I915_MADV_PURGED :
return 0 ;
}
if ( bo - > ttm - > page_flags & TTM_TT_FLAG_SWAPPED )
return 0 ;
bo - > ttm - > page_flags | = TTM_TT_FLAG_SWAPPED ;
ret = ttm_bo_validate ( bo , & place , & ctx ) ;
if ( ret ) {
bo - > ttm - > page_flags & = ~ TTM_TT_FLAG_SWAPPED ;
return ret ;
}
2021-12-15 11:07:46 +00:00
if ( flags & I915_GEM_OBJECT_SHRINK_WRITEBACK )
2021-10-18 10:10:49 +01:00
__shmem_writeback ( obj - > base . size , i915_tt - > filp - > f_mapping ) ;
return 0 ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_delete_mem_notify ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
if ( likely ( obj ) ) {
2021-09-30 13:32:36 +02:00
__i915_gem_object_pages_fini ( obj ) ;
2021-11-01 13:24:44 +01:00
i915_ttm_free_cached_io_rsgt ( obj ) ;
2021-06-10 09:01:49 +02:00
}
}
2021-11-01 13:24:44 +01:00
static struct i915_refct_sgt * i915_ttm_tt_get_st ( struct ttm_tt * ttm )
2021-06-10 09:01:49 +02:00
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
struct sg_table * st ;
int ret ;
2021-11-01 13:24:44 +01:00
if ( i915_tt - > cached_rsgt . table . sgl )
return i915_refct_sgt_get ( & i915_tt - > cached_rsgt ) ;
2021-06-10 09:01:49 +02:00
2021-11-01 13:24:44 +01:00
st = & i915_tt - > cached_rsgt . table ;
RDMA v5.15 merge window Pull Request
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs core
code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmEudRgACgkQOG33FX4g
mxraJA//c6bMxrrTVrzmrtrkyYD4tYWE8RDfgvoyZtleZnnEOJeunCQWakQrpJSv
ukSnOGCA3PtnmRMdV54f/11YJ/7otxOJodSO7jWsIoBrqG/lISAdX8mn2iHhrvJ0
dIaFEFPLy0WqoMLCJVIYIupR0IStVHb/mWx0uYL4XnnoYKyt7f7K5JMZpNWMhDN2
ieJw0jfrvEYm8pipWuxUvB16XARlzAWQrjqLpMRI+jFRpbDVBY21dz2/LJvOJPrA
LcQ+XXsV/F659ibOAGm6bU4BMda8fE6Lw90B/gmhSswJ205NrdziF5cNYHP0QxcN
oMjrjSWWHc9GEE7MTipC2AH8e36qob16Q7CK+zHEJ+ds7R6/O/8XmED1L8/KFpNA
FGqnjxnxsl1y27mUegfj1Hh8PfoDp2oVq0lmpEw0CYo4cfVzHSMRrbTR//XmW628
Ie/mJddpFK4oLk+QkSNjSLrnxOvdTkdA58PU0i84S5eUVMNm41jJDkxg2J7vp0Zn
sclZsclhUQ9oJ5Q2so81JMWxu4JDn7IByXL0ULBaa6xwQTiVEnyvSxSuPlflhLRW
0vI2ylATYKyWkQqyX7VyWecZJzwhwZj5gMMWmoGsij8bkZhQ/VaQMaesByzSth+h
NV5UAYax4GqyOQ/tg/tqT6e5nrI1zof87H64XdTCBpJ7kFyQ/oA=
=ZwOe
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This is quite a small cycle, no major series stands out. The HNS and
rxe drivers saw the most activity this cycle, with rxe being broken
for a good chunk of time. The significant deleted line count is due to
a SPDX cleanup series.
Summary:
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs
core code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (90 commits)
RDMA/mlx5: Relax DCS QP creation checks
RDMA/hns: Delete unnecessary blank lines.
RDMA/hns: Encapsulate the qp db as a function
RDMA/hns: Adjust the order in which irq are requested and enabled
RDMA/hns: Remove RST2RST error prints for hw v1
RDMA/hns: Remove dqpn filling when modify qp from Init to Init
RDMA/hns: Fix QP's resp incomplete assignment
RDMA/hns: Fix query destination qpn
RDMA/hfi1: Convert to SPDX identifier
IB/rdmavt: Convert to SPDX identifier
RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
RDMA/hns: Bugfix for the missing assignment for dip_idx
RDMA/hns: Bugfix for data type of dip_idx
RDMA/hns: Fix incorrect lsn field
RDMA/irdma: Remove the repeated declaration
RDMA/core/sa_query: Retry SA queries
RDMA: Use the sg_table directly and remove the opencoded version from umem
lib/scatterlist: Fix wrong update of orig_nents
lib/scatterlist: Provide a dedicated function to support table append
RDMA/hns: Delete unused hns bitmap interface
...
2021-09-02 14:47:21 -07:00
ret = sg_alloc_table_from_pages_segment ( st ,
ttm - > pages , ttm - > num_pages ,
0 , ( unsigned long ) ttm - > num_pages < < PAGE_SHIFT ,
i915_sg_segment_size ( ) , GFP_KERNEL ) ;
if ( ret ) {
2021-11-01 13:24:44 +01:00
st - > sgl = NULL ;
RDMA v5.15 merge window Pull Request
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs core
code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmEudRgACgkQOG33FX4g
mxraJA//c6bMxrrTVrzmrtrkyYD4tYWE8RDfgvoyZtleZnnEOJeunCQWakQrpJSv
ukSnOGCA3PtnmRMdV54f/11YJ/7otxOJodSO7jWsIoBrqG/lISAdX8mn2iHhrvJ0
dIaFEFPLy0WqoMLCJVIYIupR0IStVHb/mWx0uYL4XnnoYKyt7f7K5JMZpNWMhDN2
ieJw0jfrvEYm8pipWuxUvB16XARlzAWQrjqLpMRI+jFRpbDVBY21dz2/LJvOJPrA
LcQ+XXsV/F659ibOAGm6bU4BMda8fE6Lw90B/gmhSswJ205NrdziF5cNYHP0QxcN
oMjrjSWWHc9GEE7MTipC2AH8e36qob16Q7CK+zHEJ+ds7R6/O/8XmED1L8/KFpNA
FGqnjxnxsl1y27mUegfj1Hh8PfoDp2oVq0lmpEw0CYo4cfVzHSMRrbTR//XmW628
Ie/mJddpFK4oLk+QkSNjSLrnxOvdTkdA58PU0i84S5eUVMNm41jJDkxg2J7vp0Zn
sclZsclhUQ9oJ5Q2so81JMWxu4JDn7IByXL0ULBaa6xwQTiVEnyvSxSuPlflhLRW
0vI2ylATYKyWkQqyX7VyWecZJzwhwZj5gMMWmoGsij8bkZhQ/VaQMaesByzSth+h
NV5UAYax4GqyOQ/tg/tqT6e5nrI1zof87H64XdTCBpJ7kFyQ/oA=
=ZwOe
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This is quite a small cycle, no major series stands out. The HNS and
rxe drivers saw the most activity this cycle, with rxe being broken
for a good chunk of time. The significant deleted line count is due to
a SPDX cleanup series.
Summary:
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs
core code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (90 commits)
RDMA/mlx5: Relax DCS QP creation checks
RDMA/hns: Delete unnecessary blank lines.
RDMA/hns: Encapsulate the qp db as a function
RDMA/hns: Adjust the order in which irq are requested and enabled
RDMA/hns: Remove RST2RST error prints for hw v1
RDMA/hns: Remove dqpn filling when modify qp from Init to Init
RDMA/hns: Fix QP's resp incomplete assignment
RDMA/hns: Fix query destination qpn
RDMA/hfi1: Convert to SPDX identifier
IB/rdmavt: Convert to SPDX identifier
RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
RDMA/hns: Bugfix for the missing assignment for dip_idx
RDMA/hns: Bugfix for data type of dip_idx
RDMA/hns: Fix incorrect lsn field
RDMA/irdma: Remove the repeated declaration
RDMA/core/sa_query: Retry SA queries
RDMA: Use the sg_table directly and remove the opencoded version from umem
lib/scatterlist: Fix wrong update of orig_nents
lib/scatterlist: Provide a dedicated function to support table append
RDMA/hns: Delete unused hns bitmap interface
...
2021-09-02 14:47:21 -07:00
return ERR_PTR ( ret ) ;
2021-06-10 09:01:49 +02:00
}
ret = dma_map_sgtable ( i915_tt - > dev , st , DMA_BIDIRECTIONAL , 0 ) ;
if ( ret ) {
sg_free_table ( st ) ;
return ERR_PTR ( ret ) ;
}
2021-11-01 13:24:44 +01:00
return i915_refct_sgt_get ( & i915_tt - > cached_rsgt ) ;
2021-06-10 09:01:49 +02:00
}
2021-11-04 12:07:17 +01:00
/**
* i915_ttm_resource_get_st - Get a refcounted sg - table pointing to the
* resource memory
* @ obj : The GEM object used for sg - table caching
* @ res : The struct ttm_resource for which an sg - table is requested .
*
* This function returns a refcounted sg - table representing the memory
* pointed to by @ res . If @ res is the object ' s current resource it may also
* cache the sg_table on the object or attempt to access an already cached
* sg - table . The refcounted sg - table needs to be put when no - longer in use .
*
* Return : A valid pointer to a struct i915_refct_sgt or error pointer on
* failure .
*/
struct i915_refct_sgt *
2021-06-10 09:01:49 +02:00
i915_ttm_resource_get_st ( struct drm_i915_gem_object * obj ,
struct ttm_resource * res )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-11-04 12:07:17 +01:00
if ( ! i915_ttm_gtt_binds_lmem ( res ) )
2021-06-10 09:01:49 +02:00
return i915_ttm_tt_get_st ( bo - > ttm ) ;
2021-06-24 10:42:39 +02:00
/*
* If CPU mapping differs , we need to add the ttm_tt pages to
* the resulting st . Might make sense for GGTT .
*/
2021-11-04 12:07:17 +01:00
GEM_WARN_ON ( ! i915_ttm_cpu_maps_iomem ( res ) ) ;
2021-11-01 13:24:44 +01:00
if ( bo - > resource = = res ) {
if ( ! obj - > ttm . cached_io_rsgt ) {
struct i915_refct_sgt * rsgt ;
rsgt = intel_region_ttm_resource_to_rsgt ( obj - > mm . region ,
res ) ;
if ( IS_ERR ( rsgt ) )
return rsgt ;
obj - > ttm . cached_io_rsgt = rsgt ;
}
return i915_refct_sgt_get ( obj - > ttm . cached_io_rsgt ) ;
}
return intel_region_ttm_resource_to_rsgt ( obj - > mm . region , res ) ;
2021-06-10 09:01:49 +02:00
}
2022-01-06 17:49:10 +00:00
static int i915_ttm_truncate ( struct drm_i915_gem_object * obj )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
int err ;
WARN_ON_ONCE ( obj - > mm . madv = = I915_MADV_WILLNEED ) ;
err = i915_ttm_move_notify ( bo ) ;
if ( err )
return err ;
return i915_ttm_purge ( obj ) ;
}
2021-11-04 12:07:17 +01:00
static void i915_ttm_swap_notify ( struct ttm_buffer_object * bo )
2021-06-10 09:01:49 +02:00
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
2021-11-22 22:45:53 +01:00
int ret ;
if ( ! obj )
return ;
2021-06-10 09:01:49 +02:00
2021-11-22 22:45:53 +01:00
ret = i915_ttm_move_notify ( bo ) ;
2021-11-04 12:07:17 +01:00
GEM_WARN_ON ( ret ) ;
GEM_WARN_ON ( obj - > ttm . cached_io_rsgt ) ;
if ( ! ret & & obj - > mm . madv ! = I915_MADV_WILLNEED )
2021-06-10 09:01:49 +02:00
i915_ttm_purge ( obj ) ;
}
2022-02-28 12:36:05 +00:00
static bool i915_ttm_resource_mappable ( struct ttm_resource * res )
{
struct i915_ttm_buddy_resource * bman_res = to_ttm_buddy_resource ( res ) ;
if ( ! i915_ttm_cpu_maps_iomem ( res ) )
return true ;
return bman_res - > used_visible_size = = bman_res - > base . num_pages ;
}
2021-06-10 09:01:52 +02:00
static int i915_ttm_io_mem_reserve ( struct ttm_device * bdev , struct ttm_resource * mem )
{
2021-11-04 12:07:17 +01:00
if ( ! i915_ttm_cpu_maps_iomem ( mem ) )
2021-06-10 09:01:52 +02:00
return 0 ;
2022-02-28 12:36:05 +00:00
if ( ! i915_ttm_resource_mappable ( mem ) )
return - EINVAL ;
2021-06-10 09:01:52 +02:00
mem - > bus . caching = ttm_write_combined ;
mem - > bus . is_iomem = true ;
return 0 ;
}
static unsigned long i915_ttm_io_mem_pfn ( struct ttm_buffer_object * bo ,
unsigned long page_offset )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
struct scatterlist * sg ;
2021-11-22 22:45:53 +01:00
unsigned long base ;
2021-06-10 09:01:52 +02:00
unsigned int ofs ;
2021-11-22 22:45:53 +01:00
GEM_BUG_ON ( ! obj ) ;
2021-06-10 09:01:52 +02:00
GEM_WARN_ON ( bo - > ttm ) ;
2021-11-22 22:45:53 +01:00
base = obj - > mm . region - > iomap . base - obj - > mm . region - > region . start ;
2021-07-14 14:34:17 -05:00
sg = __i915_gem_object_get_sg ( obj , & obj - > ttm . get_io_page , page_offset , & ofs , true ) ;
2021-06-10 09:01:52 +02:00
return ( ( base + sg_dma_address ( sg ) ) > > PAGE_SHIFT ) + ofs ;
}
2021-11-22 22:45:53 +01:00
/*
* All callbacks need to take care not to downcast a struct ttm_buffer_object
* without checking its subclass , since it might be a TTM ghost object .
*/
2021-06-10 09:01:49 +02:00
static struct ttm_device_funcs i915_ttm_bo_driver = {
. ttm_tt_create = i915_ttm_tt_create ,
2021-10-18 10:10:49 +01:00
. ttm_tt_populate = i915_ttm_tt_populate ,
2021-06-10 09:01:49 +02:00
. ttm_tt_unpopulate = i915_ttm_tt_unpopulate ,
. ttm_tt_destroy = i915_ttm_tt_destroy ,
. eviction_valuable = i915_ttm_eviction_valuable ,
. evict_flags = i915_ttm_evict_flags ,
. move = i915_ttm_move ,
. swap_notify = i915_ttm_swap_notify ,
. delete_mem_notify = i915_ttm_delete_mem_notify ,
2021-06-10 09:01:52 +02:00
. io_mem_reserve = i915_ttm_io_mem_reserve ,
. io_mem_pfn = i915_ttm_io_mem_pfn ,
2021-06-10 09:01:49 +02:00
} ;
/**
* i915_ttm_driver - Return a pointer to the TTM device funcs
*
* Return : Pointer to statically allocated TTM device funcs .
*/
struct ttm_device_funcs * i915_ttm_driver ( void )
{
return & i915_ttm_bo_driver ;
}
2021-06-29 17:12:01 +02:00
static int __i915_ttm_get_pages ( struct drm_i915_gem_object * obj ,
struct ttm_placement * placement )
2021-06-10 09:01:49 +02:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
2021-06-18 15:25:15 +02:00
int real_num_busy ;
2021-06-10 09:01:49 +02:00
int ret ;
2021-06-18 15:25:15 +02:00
/* First try only the requested placement. No eviction. */
2021-06-29 17:12:01 +02:00
real_num_busy = fetch_and_zero ( & placement - > num_busy_placement ) ;
ret = ttm_bo_validate ( bo , placement , & ctx ) ;
2021-06-18 15:25:15 +02:00
if ( ret ) {
ret = i915_ttm_err_to_gem ( ret ) ;
/*
* Anything that wants to restart the operation gets to
* do that .
*/
if ( ret = = - EDEADLK | | ret = = - EINTR | | ret = = - ERESTARTSYS | |
ret = = - EAGAIN )
return ret ;
2021-06-10 09:01:49 +02:00
2021-06-18 15:25:15 +02:00
/*
* If the initial attempt fails , allow all accepted placements ,
* evicting if necessary .
*/
2021-06-29 17:12:01 +02:00
placement - > num_busy_placement = real_num_busy ;
ret = ttm_bo_validate ( bo , placement , & ctx ) ;
2021-06-18 15:25:15 +02:00
if ( ret )
return i915_ttm_err_to_gem ( ret ) ;
}
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
if ( bo - > ttm & & ! ttm_tt_is_populated ( bo - > ttm ) ) {
ret = ttm_tt_populate ( bo - > bdev , bo - > ttm , & ctx ) ;
if ( ret )
return ret ;
i915_ttm_adjust_domains_after_move ( obj ) ;
i915_ttm_adjust_gem_after_move ( obj ) ;
}
2021-07-23 12:21:39 -05:00
if ( ! i915_gem_object_has_pages ( obj ) ) {
2021-11-01 13:24:44 +01:00
struct i915_refct_sgt * rsgt =
i915_ttm_resource_get_st ( obj , bo - > resource ) ;
if ( IS_ERR ( rsgt ) )
return PTR_ERR ( rsgt ) ;
2021-06-10 09:01:49 +02:00
2021-11-01 13:24:44 +01:00
GEM_BUG_ON ( obj - > mm . rsgt ) ;
obj - > mm . rsgt = rsgt ;
__i915_gem_object_set_pages ( obj , & rsgt - > table ,
i915_sg_dma_sizes ( rsgt - > table . sgl ) ) ;
2021-07-23 12:21:39 -05:00
}
2021-06-10 09:01:49 +02:00
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
i915_ttm_adjust_lru ( obj ) ;
2021-06-10 09:01:49 +02:00
return ret ;
}
2021-06-29 17:12:01 +02:00
static int i915_ttm_get_pages ( struct drm_i915_gem_object * obj )
{
struct ttm_place requested , busy [ I915_TTM_MAX_PLACEMENTS ] ;
struct ttm_placement placement ;
GEM_BUG_ON ( obj - > mm . n_placements > I915_TTM_MAX_PLACEMENTS ) ;
/* Move to the requested placement. */
i915_ttm_placement_from_obj ( obj , & requested , busy , & placement ) ;
return __i915_ttm_get_pages ( obj , & placement ) ;
}
/**
* DOC : Migration vs eviction
*
* GEM migration may not be the same as TTM migration / eviction . If
* the TTM core decides to evict an object it may be evicted to a
* TTM memory type that is not in the object ' s allowable GEM regions , or
* in fact theoretically to a TTM memory type that doesn ' t correspond to
* a GEM memory region . In that case the object ' s GEM region is not
* updated , and the data is migrated back to the GEM region at
* get_pages time . TTM may however set up CPU ptes to the object even
* when it is evicted .
* Gem forced migration using the i915_ttm_migrate ( ) op , is allowed even
* to regions that are not in the object ' s list of allowable placements .
*/
2022-02-28 12:36:05 +00:00
static int __i915_ttm_migrate ( struct drm_i915_gem_object * obj ,
struct intel_memory_region * mr ,
unsigned int flags )
2021-06-29 17:12:01 +02:00
{
struct ttm_place requested ;
struct ttm_placement placement ;
int ret ;
2022-02-28 12:36:05 +00:00
i915_ttm_place_from_region ( mr , & requested , flags ) ;
2021-06-29 17:12:01 +02:00
placement . num_placement = 1 ;
placement . num_busy_placement = 1 ;
placement . placement = & requested ;
placement . busy_placement = & requested ;
ret = __i915_ttm_get_pages ( obj , & placement ) ;
if ( ret )
return ret ;
/*
* Reinitialize the region bindings . This is primarily
* required for objects where the new region is not in
* its allowable placements .
*/
if ( obj - > mm . region ! = mr ) {
i915_gem_object_release_memory_region ( obj ) ;
i915_gem_object_init_memory_region ( obj , mr ) ;
}
return 0 ;
}
2022-02-28 12:36:05 +00:00
static int i915_ttm_migrate ( struct drm_i915_gem_object * obj ,
struct intel_memory_region * mr )
{
return __i915_ttm_migrate ( obj , mr , obj - > flags ) ;
}
2021-06-10 09:01:49 +02:00
static void i915_ttm_put_pages ( struct drm_i915_gem_object * obj ,
struct sg_table * st )
{
/*
* We ' re currently not called from a shrinker , so put_pages ( )
* typically means the object is about to destroyed , or called
* from move_notify ( ) . So just avoid doing much for now .
* If the object is not destroyed next , The TTM eviction logic
* and shrinkers will move it out if needed .
*/
2021-11-01 13:24:44 +01:00
if ( obj - > mm . rsgt )
i915_refct_sgt_put ( fetch_and_zero ( & obj - > mm . rsgt ) ) ;
2021-06-10 09:01:49 +02:00
}
2021-11-04 12:07:17 +01:00
/**
* i915_ttm_adjust_lru - Adjust an object ' s position on relevant LRU lists .
* @ obj : The object
*/
void i915_ttm_adjust_lru ( struct drm_i915_gem_object * obj )
2021-06-10 09:01:49 +02:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-10-18 10:10:49 +01:00
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
bool shrinkable =
bo - > ttm & & i915_tt - > filp & & ttm_tt_is_populated ( bo - > ttm ) ;
2021-06-10 09:01:49 +02:00
/*
* Don ' t manipulate the TTM LRUs while in TTM bo destruction .
* We ' re called through i915_ttm_delete_mem_notify ( ) .
*/
if ( ! kref_read ( & bo - > kref ) )
return ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
/*
* We skip managing the shrinker LRU in set_pages ( ) and just manage
* everything here . This does at least solve the issue with having
* temporary shmem mappings ( like with evicted lmem ) not being visible to
* the shrinker . Only our shmem objects are shrinkable , everything else
* we keep as unshrinkable .
*
* To make sure everything plays nice we keep an extra shrink pin in TTM
* if the underlying pages are not currently shrinkable . Once we release
* our pin , like when the pages are moved to shmem , the pages will then
* be added to the shrinker LRU , assuming the caller isn ' t also holding
* a pin .
*
* TODO : consider maybe also bumping the shrinker list here when we have
* already unpinned it , which should give us something more like an LRU .
2021-11-10 09:55:27 +01:00
*
* TODO : There is a small window of opportunity for this function to
* get called from eviction after we ' ve dropped the last GEM refcount ,
* but before the TTM deleted flag is set on the object . Avoid
* adjusting the shrinker list in such cases , since the object is
* not available to the shrinker anyway due to its zero refcount .
* To fix this properly we should move to a TTM shrinker LRU list for
* these objects .
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
*/
2021-11-10 09:55:27 +01:00
if ( kref_get_unless_zero ( & obj - > base . refcount ) ) {
if ( shrinkable ! = obj - > mm . ttm_shrinkable ) {
if ( shrinkable ) {
if ( obj - > mm . madv = = I915_MADV_WILLNEED )
__i915_gem_object_make_shrinkable ( obj ) ;
else
__i915_gem_object_make_purgeable ( obj ) ;
} else {
i915_gem_object_make_unshrinkable ( obj ) ;
}
obj - > mm . ttm_shrinkable = shrinkable ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
}
2021-11-10 09:55:27 +01:00
i915_gem_object_put ( obj ) ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
}
2021-06-10 09:01:49 +02:00
/*
* Put on the correct LRU list depending on the MADV status
*/
spin_lock ( & bo - > bdev - > lru_lock ) ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
if ( shrinkable ) {
2021-10-18 10:10:49 +01:00
/* Try to keep shmem_tt from being considered for shrinking. */
bo - > priority = TTM_MAX_BO_PRIORITY - 1 ;
} else if ( obj - > mm . madv ! = I915_MADV_WILLNEED ) {
2021-06-10 09:01:49 +02:00
bo - > priority = I915_TTM_PRIO_PURGE ;
} else if ( ! i915_gem_object_has_pages ( obj ) ) {
2022-02-09 11:16:52 +00:00
bo - > priority = I915_TTM_PRIO_NO_PAGES ;
2021-06-10 09:01:49 +02:00
} else {
2022-02-28 12:36:04 +00:00
struct ttm_resource_manager * man =
ttm_manager_type ( bo - > bdev , bo - > resource - > mem_type ) ;
/*
* If we need to place an LMEM resource which doesn ' t need CPU
* access then we should try not to victimize mappable objects
* first , since we likely end up stealing more of the mappable
* portion . And likewise when we try to find space for a mappble
* object , we know not to ever victimize objects that don ' t
* occupy any mappable pages .
*/
if ( i915_ttm_cpu_maps_iomem ( bo - > resource ) & &
i915_ttm_buddy_man_visible_size ( man ) < man - > size & &
! ( obj - > flags & I915_BO_ALLOC_GPU_ONLY ) )
bo - > priority = I915_TTM_PRIO_NEEDS_CPU_ACCESS ;
else
bo - > priority = I915_TTM_PRIO_HAS_PAGES ;
2021-06-10 09:01:49 +02:00
}
ttm_bo_move_to_lru_tail ( bo , bo - > resource , NULL ) ;
spin_unlock ( & bo - > bdev - > lru_lock ) ;
}
/*
* TTM - backed gem object destruction requires some clarification .
* Basically we have two possibilities here . We can either rely on the
* i915 delayed destruction and put the TTM object when the object
* is idle . This would be detected by TTM which would bypass the
* TTM delayed destroy handling . The other approach is to put the TTM
* object early and rely on the TTM destroyed handling , and then free
* the leftover parts of the GEM object once TTM ' s destroyed list handling is
* complete . For now , we rely on the latter for two reasons :
* a ) TTM can evict an object even when it ' s on the delayed destroy list ,
* which in theory allows for complete eviction .
* b ) There is work going on in TTM to allow freeing an object even when
* it ' s not idle , and using the TTM destroyed list handling could help us
* benefit from that .
*/
static void i915_ttm_delayed_free ( struct drm_i915_gem_object * obj )
{
2021-09-30 13:32:36 +02:00
GEM_BUG_ON ( ! obj - > ttm . created ) ;
ttm_bo_put ( i915_gem_to_ttm ( obj ) ) ;
2021-06-10 09:01:49 +02:00
}
2021-06-10 09:01:52 +02:00
static vm_fault_t vm_fault_ttm ( struct vm_fault * vmf )
{
struct vm_area_struct * area = vmf - > vma ;
2021-11-22 22:45:53 +01:00
struct ttm_buffer_object * bo = area - > vm_private_data ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
struct drm_device * dev = bo - > base . dev ;
2021-11-22 22:45:53 +01:00
struct drm_i915_gem_object * obj ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
vm_fault_t ret ;
int idx ;
2021-06-10 09:01:52 +02:00
2021-11-22 22:45:53 +01:00
obj = i915_ttm_to_gem ( bo ) ;
if ( ! obj )
return VM_FAULT_SIGBUS ;
2021-06-10 09:01:52 +02:00
/* Sanity check that we allow writing into this object */
if ( unlikely ( i915_gem_object_is_readonly ( obj ) & &
area - > vm_flags & VM_WRITE ) )
return VM_FAULT_SIGBUS ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
ret = ttm_bo_vm_reserve ( bo , vmf ) ;
if ( ret )
return ret ;
2022-01-06 17:49:08 +00:00
if ( obj - > mm . madv ! = I915_MADV_WILLNEED ) {
dma_resv_unlock ( bo - > base . resv ) ;
return VM_FAULT_SIGBUS ;
}
2022-02-28 12:36:05 +00:00
if ( ! i915_ttm_resource_mappable ( bo - > resource ) ) {
int err = - ENODEV ;
int i ;
for ( i = 0 ; i < obj - > mm . n_placements ; i + + ) {
struct intel_memory_region * mr = obj - > mm . placements [ i ] ;
unsigned int flags ;
if ( ! mr - > io_size & & mr - > type ! = INTEL_MEMORY_SYSTEM )
continue ;
flags = obj - > flags ;
flags & = ~ I915_BO_ALLOC_GPU_ONLY ;
err = __i915_ttm_migrate ( obj , mr , flags ) ;
if ( ! err )
break ;
}
if ( err ) {
drm_dbg ( dev , " Unable to make resource CPU accessible \n " ) ;
dma_resv_unlock ( bo - > base . resv ) ;
return VM_FAULT_SIGBUS ;
}
}
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
if ( drm_dev_enter ( dev , & idx ) ) {
ret = ttm_bo_vm_fault_reserved ( vmf , vmf - > vma - > vm_page_prot ,
2021-11-23 12:58:14 +00:00
TTM_BO_VM_NUM_PREFAULT ) ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
drm_dev_exit ( idx ) ;
} else {
ret = ttm_bo_vm_dummy_page ( vmf , vmf - > vma - > vm_page_prot ) ;
}
if ( ret = = VM_FAULT_RETRY & & ! ( vmf - > flags & FAULT_FLAG_RETRY_NOWAIT ) )
return ret ;
i915_ttm_adjust_lru ( obj ) ;
dma_resv_unlock ( bo - > base . resv ) ;
return ret ;
2021-06-10 09:01:52 +02:00
}
static int
vm_access_ttm ( struct vm_area_struct * area , unsigned long addr ,
void * buf , int len , int write )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( area - > vm_private_data ) ;
if ( i915_gem_object_is_readonly ( obj ) & & write )
return - EACCES ;
return ttm_bo_vm_access ( area , addr , buf , len , write ) ;
}
static void ttm_vm_open ( struct vm_area_struct * vma )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( vma - > vm_private_data ) ;
GEM_BUG_ON ( ! obj ) ;
i915_gem_object_get ( obj ) ;
}
static void ttm_vm_close ( struct vm_area_struct * vma )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( vma - > vm_private_data ) ;
GEM_BUG_ON ( ! obj ) ;
i915_gem_object_put ( obj ) ;
}
static const struct vm_operations_struct vm_ops_ttm = {
. fault = vm_fault_ttm ,
. access = vm_access_ttm ,
. open = ttm_vm_open ,
. close = ttm_vm_close ,
} ;
static u64 i915_ttm_mmap_offset ( struct drm_i915_gem_object * obj )
{
/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
GEM_BUG_ON ( ! drm_mm_node_allocated ( & obj - > base . vma_node . vm_node ) ) ;
return drm_vma_node_offset_addr ( & obj - > base . vma_node ) ;
}
2022-01-06 17:49:09 +00:00
static void i915_ttm_unmap_virtual ( struct drm_i915_gem_object * obj )
{
ttm_bo_unmap_virtual ( i915_gem_to_ttm ( obj ) ) ;
}
2021-06-23 15:34:11 +01:00
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
2021-06-10 09:01:49 +02:00
. name = " i915_gem_object_ttm " ,
2021-10-18 10:10:55 +01:00
. flags = I915_GEM_OBJECT_IS_SHRINKABLE |
I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST ,
2021-06-10 09:01:49 +02:00
. get_pages = i915_ttm_get_pages ,
. put_pages = i915_ttm_put_pages ,
2022-01-06 17:49:10 +00:00
. truncate = i915_ttm_truncate ,
2021-12-15 11:07:46 +00:00
. shrink = i915_ttm_shrink ,
2021-10-18 10:10:49 +01:00
2021-06-10 09:01:49 +02:00
. adjust_lru = i915_ttm_adjust_lru ,
. delayed_free = i915_ttm_delayed_free ,
2021-06-29 17:12:01 +02:00
. migrate = i915_ttm_migrate ,
2021-10-18 10:10:49 +01:00
2021-06-10 09:01:52 +02:00
. mmap_offset = i915_ttm_mmap_offset ,
2022-01-06 17:49:09 +00:00
. unmap_virtual = i915_ttm_unmap_virtual ,
2021-06-10 09:01:52 +02:00
. mmap_ops = & vm_ops_ttm ,
2021-06-10 09:01:49 +02:00
} ;
void i915_ttm_bo_destroy ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
i915_gem_object_release_memory_region ( obj ) ;
2021-06-10 09:01:52 +02:00
mutex_destroy ( & obj - > ttm . get_io_page . lock ) ;
2021-08-30 14:09:48 +02:00
2021-09-30 13:32:36 +02:00
if ( obj - > ttm . created ) {
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
/*
* We freely manage the shrinker LRU outide of the mm . pages life
* cycle . As a result when destroying the object we should be
* extra paranoid and ensure we remove it from the LRU , before
* we free the object .
*
* Touching the ttm_shrinkable outside of the object lock here
* should be safe now that the last GEM object ref was dropped .
*/
if ( obj - > mm . ttm_shrinkable )
i915_gem_object_make_unshrinkable ( obj ) ;
2021-09-30 13:32:36 +02:00
i915_ttm_backup_free ( obj ) ;
/* This releases all gem object bindings to the backend. */
__i915_gem_free_object ( obj ) ;
2021-06-10 09:01:49 +02:00
call_rcu ( & obj - > rcu , __i915_gem_free_object_rcu ) ;
2021-09-30 13:32:36 +02:00
} else {
__i915_gem_object_fini ( obj ) ;
}
2021-06-10 09:01:49 +02:00
}
/**
* __i915_gem_ttm_object_init - Initialize a ttm - backed i915 gem object
* @ mem : The initial memory region for the object .
* @ obj : The gem object .
* @ size : Object size in bytes .
* @ flags : gem object flags .
*
* Return : 0 on success , negative error code on failure .
*/
int __i915_gem_ttm_object_init ( struct intel_memory_region * mem ,
struct drm_i915_gem_object * obj ,
resource_size_t size ,
2021-06-25 11:38:23 +01:00
resource_size_t page_size ,
2021-06-10 09:01:49 +02:00
unsigned int flags )
{
static struct lock_class_key lock_class ;
struct drm_i915_private * i915 = mem - > i915 ;
2021-06-24 10:42:39 +02:00
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
2021-06-10 09:01:49 +02:00
enum ttm_bo_type bo_type ;
int ret ;
drm_gem_private_object_init ( & i915 - > drm , & obj - > base , size ) ;
i915_gem_object_init ( obj , & i915_gem_ttm_obj_ops , & lock_class , flags ) ;
2021-09-30 13:32:36 +02:00
/* Don't put on a region list until we're either locked or fully initialized. */
2021-11-22 22:45:51 +01:00
obj - > mm . region = mem ;
2021-09-30 13:32:36 +02:00
INIT_LIST_HEAD ( & obj - > mm . region_link ) ;
2021-06-10 09:01:52 +02:00
INIT_RADIX_TREE ( & obj - > ttm . get_io_page . radix , GFP_KERNEL | __GFP_NOWARN ) ;
mutex_init ( & obj - > ttm . get_io_page . lock ) ;
2021-06-10 09:01:49 +02:00
bo_type = ( obj - > flags & I915_BO_ALLOC_USER ) ? ttm_bo_type_device :
ttm_bo_type_kernel ;
2021-06-24 10:42:39 +02:00
obj - > base . vma_node . driver_private = i915_gem_to_ttm ( obj ) ;
2021-06-25 11:38:23 +01:00
/* Forcing the page size is kernel internal only */
GEM_BUG_ON ( page_size & & obj - > mm . n_placements ) ;
drm/i915/ttm: move shrinker management into adjust_lru
We currently just evict lmem objects to system memory when under memory
pressure. For this case we might lack the usual object mm.pages, which
effectively hides the pages from the i915-gem shrinker, until we
actually "attach" the TT to the object, or in the case of lmem-only
objects it just gets migrated back to lmem when touched again.
For all cases we can just adjust the i915 shrinker LRU each time we also
adjust the TTM LRU. The two cases we care about are:
1) When something is moved by TTM, including when initially populating
an object. Importantly this covers the case where TTM moves something from
lmem <-> smem, outside of the normal get_pages() interface, which
should still ensure the shmem pages underneath are reclaimable.
2) When calling into i915_gem_object_unlock(). The unlock should
ensure the object is removed from the shinker LRU, if it was indeed
swapped out, or just purged, when the shrinker drops the object lock.
v2(Thomas):
- Handle managing the shrinker LRU in adjust_lru, where it is always
safe to touch the object.
v3(Thomas):
- Pretty much a re-write. This time piggy back off the shrink_pin
stuff, which actually seems to fit quite well for what we want here.
v4(Thomas):
- Just use a simple boolean for tracking ttm_shrinkable.
v5:
- Ensure we call adjust_lru when faulting the object, to ensure the
pages are visible to the shrinker, if needed.
- Add back the adjust_lru when in i915_ttm_move (Thomas)
v6(Reported-by: kernel test robot <lkp@intel.com>):
- Remove unused i915_tt
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> #v4
Link: https://patchwork.freedesktop.org/patch/msgid/20211018091055.1998191-6-matthew.auld@intel.com
2021-10-18 10:10:53 +01:00
/*
* Keep an extra shrink pin to prevent the object from being made
* shrinkable too early . If the ttm_tt is ever allocated in shmem , we
* drop the pin . The TTM backend manages the shrinker LRU itself ,
* outside of the normal mm . pages life cycle .
*/
i915_gem_object_make_unshrinkable ( obj ) ;
2021-06-10 09:01:49 +02:00
/*
* If this function fails , it will call the destructor , but
* our caller still owns the object . So no freeing in the
* destructor until obj - > ttm . created is true .
* Similarly , in delayed_destroy , we can ' t call ttm_bo_put ( )
* until successful initialization .
*/
2021-06-24 10:42:39 +02:00
ret = ttm_bo_init_reserved ( & i915 - > bdev , i915_gem_to_ttm ( obj ) , size ,
bo_type , & i915_sys_placement ,
2021-06-25 11:38:23 +01:00
page_size > > PAGE_SHIFT ,
2021-06-24 10:42:39 +02:00
& ctx , NULL , NULL , i915_ttm_bo_destroy ) ;
if ( ret )
return i915_ttm_err_to_gem ( ret ) ;
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
obj - > ttm . created = true ;
2021-09-30 13:32:36 +02:00
i915_gem_object_release_memory_region ( obj ) ;
i915_gem_object_init_memory_region ( obj , mem ) ;
2021-06-24 10:42:39 +02:00
i915_ttm_adjust_domains_after_move ( obj ) ;
i915_ttm_adjust_gem_after_move ( obj ) ;
i915_gem_object_unlock ( obj ) ;
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
return 0 ;
2021-06-10 09:01:49 +02:00
}
2021-06-24 10:42:40 +02:00
static const struct intel_memory_region_ops ttm_system_region_ops = {
. init_object = __i915_gem_ttm_object_init ,
2021-11-22 22:45:51 +01:00
. release = intel_region_ttm_fini ,
2021-06-24 10:42:40 +02:00
} ;
struct intel_memory_region *
i915_gem_ttm_system_setup ( struct drm_i915_private * i915 ,
u16 type , u16 instance )
{
struct intel_memory_region * mr ;
mr = intel_memory_region_create ( i915 , 0 ,
totalram_pages ( ) < < PAGE_SHIFT ,
2022-02-25 14:54:56 +00:00
PAGE_SIZE , 0 , 0 ,
2021-06-24 10:42:40 +02:00
type , instance ,
& ttm_system_region_ops ) ;
if ( IS_ERR ( mr ) )
return mr ;
intel_memory_region_set_name ( mr , " system-ttm " ) ;
return mr ;
2021-06-10 09:01:49 +02:00
}