2021-06-10 09:01:49 +02:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
# include "i915_drv.h"
# include "intel_memory_region.h"
# include "intel_region_ttm.h"
2021-09-22 08:25:22 +02:00
# include "gem/i915_gem_mman.h"
2021-06-10 09:01:49 +02:00
# include "gem/i915_gem_object.h"
# include "gem/i915_gem_region.h"
# include "gem/i915_gem_ttm.h"
2021-09-22 08:25:22 +02:00
# include "gem/i915_gem_ttm_pm.h"
2021-06-10 09:01:49 +02:00
2021-06-17 08:30:16 +02:00
2021-09-22 08:25:22 +02:00
# include "gt/intel_engine_pm.h"
# include "gt/intel_gt.h"
# include "gt/intel_migrate.h"
2021-06-10 09:01:49 +02:00
# define I915_TTM_PRIO_PURGE 0
# define I915_TTM_PRIO_NO_PAGES 1
# define I915_TTM_PRIO_HAS_PAGES 2
2021-06-16 16:24:57 +01:00
/*
* Size of struct ttm_place vector in on - stack struct ttm_placement allocs
*/
# define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
2021-06-10 09:01:49 +02:00
/**
* struct i915_ttm_tt - TTM page vector with additional private information
* @ ttm : The base TTM page vector .
* @ dev : The struct device used for dma mapping and unmapping .
* @ cached_st : The cached scatter - gather table .
2021-10-18 10:10:49 +01:00
* @ is_shmem : Set if using shmem .
* @ filp : The shmem file , if using shmem backend .
2021-06-10 09:01:49 +02:00
*
* Note that DMA may be going on right up to the point where the page -
* vector is unpopulated in delayed destroy . Hence keep the
* scatter - gather table mapped and cached up to that point . This is
* different from the cached gem object io scatter - gather table which
* doesn ' t have an associated dma mapping .
*/
struct i915_ttm_tt {
struct ttm_tt ttm ;
struct device * dev ;
struct sg_table * cached_st ;
2021-10-18 10:10:49 +01:00
bool is_shmem ;
struct file * filp ;
2021-06-10 09:01:49 +02:00
} ;
2021-06-16 16:24:57 +01:00
static const struct ttm_place sys_placement_flags = {
. fpfn = 0 ,
. lpfn = 0 ,
. mem_type = I915_PL_SYSTEM ,
. flags = 0 ,
2021-06-10 09:01:49 +02:00
} ;
static struct ttm_placement i915_sys_placement = {
. num_placement = 1 ,
2021-06-16 16:24:57 +01:00
. placement = & sys_placement_flags ,
2021-06-10 09:01:49 +02:00
. num_busy_placement = 1 ,
2021-06-16 16:24:57 +01:00
. busy_placement = & sys_placement_flags ,
2021-06-10 09:01:49 +02:00
} ;
2021-09-22 08:25:22 +02:00
/**
* i915_ttm_sys_placement - Return the struct ttm_placement to be
* used for an object in system memory .
*
* Rather than making the struct extern , use this
* function .
*
* Return : A pointer to a static variable for sys placement .
*/
struct ttm_placement * i915_ttm_sys_placement ( void )
{
return & i915_sys_placement ;
}
2021-06-18 15:25:15 +02:00
static int i915_ttm_err_to_gem ( int err )
{
/* Fastpath */
if ( likely ( ! err ) )
return 0 ;
switch ( err ) {
case - EBUSY :
/*
* TTM likes to convert - EDEADLK to - EBUSY , and wants us to
* restart the operation , since we don ' t record the contending
* lock . We use - EAGAIN to restart .
*/
return - EAGAIN ;
case - ENOSPC :
/*
* Memory type / region is full , and we can ' t evict .
* Except possibly system , that returns - ENOMEM ;
*/
return - ENXIO ;
default :
break ;
}
return err ;
}
2021-06-24 10:42:39 +02:00
static bool gpu_binds_iomem ( struct ttm_resource * mem )
{
return mem - > mem_type ! = TTM_PL_SYSTEM ;
}
static bool cpu_maps_iomem ( struct ttm_resource * mem )
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
return mem - > mem_type ! = TTM_PL_SYSTEM ;
}
static enum i915_cache_level
i915_ttm_cache_level ( struct drm_i915_private * i915 , struct ttm_resource * res ,
struct ttm_tt * ttm )
{
return ( ( HAS_LLC ( i915 ) | | HAS_SNOOP ( i915 ) ) & & ! gpu_binds_iomem ( res ) & &
ttm - > caching = = ttm_cached ) ? I915_CACHE_LLC :
I915_CACHE_NONE ;
}
2021-06-10 09:01:49 +02:00
static void i915_ttm_adjust_lru ( struct drm_i915_gem_object * obj ) ;
2021-06-16 16:24:57 +01:00
static enum ttm_caching
i915_ttm_select_tt_caching ( const struct drm_i915_gem_object * obj )
{
/*
* Objects only allowed in system get cached cpu - mappings .
* Other objects get WC mapping for now . Even if in system .
*/
if ( obj - > mm . region - > type = = INTEL_MEMORY_SYSTEM & &
obj - > mm . n_placements < = 1 )
return ttm_cached ;
return ttm_write_combined ;
}
static void
i915_ttm_place_from_region ( const struct intel_memory_region * mr ,
2021-06-16 16:24:58 +01:00
struct ttm_place * place ,
unsigned int flags )
2021-06-16 16:24:57 +01:00
{
memset ( place , 0 , sizeof ( * place ) ) ;
place - > mem_type = intel_region_to_ttm_type ( mr ) ;
2021-06-16 16:24:58 +01:00
if ( flags & I915_BO_ALLOC_CONTIGUOUS )
place - > flags = TTM_PL_FLAG_CONTIGUOUS ;
2021-06-16 16:24:57 +01:00
}
static void
i915_ttm_placement_from_obj ( const struct drm_i915_gem_object * obj ,
struct ttm_place * requested ,
struct ttm_place * busy ,
struct ttm_placement * placement )
{
unsigned int num_allowed = obj - > mm . n_placements ;
2021-06-16 16:24:58 +01:00
unsigned int flags = obj - > flags ;
2021-06-16 16:24:57 +01:00
unsigned int i ;
placement - > num_placement = 1 ;
i915_ttm_place_from_region ( num_allowed ? obj - > mm . placements [ 0 ] :
2021-06-16 16:24:58 +01:00
obj - > mm . region , requested , flags ) ;
2021-06-16 16:24:57 +01:00
/* Cache this on object? */
placement - > num_busy_placement = num_allowed ;
for ( i = 0 ; i < placement - > num_busy_placement ; + + i )
2021-06-16 16:24:58 +01:00
i915_ttm_place_from_region ( obj - > mm . placements [ i ] , busy + i , flags ) ;
2021-06-16 16:24:57 +01:00
if ( num_allowed = = 0 ) {
* busy = * requested ;
placement - > num_busy_placement = 1 ;
}
placement - > placement = requested ;
placement - > busy_placement = busy ;
}
2021-10-18 10:10:49 +01:00
static int i915_ttm_tt_shmem_populate ( struct ttm_device * bdev ,
struct ttm_tt * ttm ,
struct ttm_operation_ctx * ctx )
{
struct drm_i915_private * i915 = container_of ( bdev , typeof ( * i915 ) , bdev ) ;
struct intel_memory_region * mr = i915 - > mm . regions [ INTEL_MEMORY_SYSTEM ] ;
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
const unsigned int max_segment = i915_sg_segment_size ( ) ;
const size_t size = ttm - > num_pages < < PAGE_SHIFT ;
struct file * filp = i915_tt - > filp ;
struct sgt_iter sgt_iter ;
struct sg_table * st ;
struct page * page ;
unsigned long i ;
int err ;
if ( ! filp ) {
struct address_space * mapping ;
gfp_t mask ;
filp = shmem_file_setup ( " i915-shmem-tt " , size , VM_NORESERVE ) ;
if ( IS_ERR ( filp ) )
return PTR_ERR ( filp ) ;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE ;
mapping = filp - > f_mapping ;
mapping_set_gfp_mask ( mapping , mask ) ;
GEM_BUG_ON ( ! ( mapping_gfp_mask ( mapping ) & __GFP_RECLAIM ) ) ;
i915_tt - > filp = filp ;
}
st = shmem_alloc_st ( i915 , size , mr , filp - > f_mapping , max_segment ) ;
if ( IS_ERR ( st ) )
return PTR_ERR ( st ) ;
err = dma_map_sg_attrs ( i915_tt - > dev ,
st - > sgl , st - > nents ,
DMA_BIDIRECTIONAL ,
DMA_ATTR_SKIP_CPU_SYNC ) ;
if ( err < = 0 ) {
err = - EINVAL ;
goto err_free_st ;
}
i = 0 ;
for_each_sgt_page ( page , sgt_iter , st )
ttm - > pages [ i + + ] = page ;
if ( ttm - > page_flags & TTM_TT_FLAG_SWAPPED )
ttm - > page_flags & = ~ TTM_TT_FLAG_SWAPPED ;
i915_tt - > cached_st = st ;
return 0 ;
err_free_st :
shmem_free_st ( st , filp - > f_mapping , false , false ) ;
return err ;
}
static void i915_ttm_tt_shmem_unpopulate ( struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
bool backup = ttm - > page_flags & TTM_TT_FLAG_SWAPPED ;
dma_unmap_sg ( i915_tt - > dev , i915_tt - > cached_st - > sgl ,
i915_tt - > cached_st - > nents ,
DMA_BIDIRECTIONAL ) ;
shmem_free_st ( fetch_and_zero ( & i915_tt - > cached_st ) ,
file_inode ( i915_tt - > filp ) - > i_mapping ,
backup , backup ) ;
}
2021-06-10 09:01:49 +02:00
static struct ttm_tt * i915_ttm_tt_create ( struct ttm_buffer_object * bo ,
uint32_t page_flags )
{
struct ttm_resource_manager * man =
ttm_manager_type ( bo - > bdev , bo - > resource - > mem_type ) ;
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
2021-10-18 10:10:49 +01:00
enum ttm_caching caching = i915_ttm_select_tt_caching ( obj ) ;
2021-06-10 09:01:49 +02:00
struct i915_ttm_tt * i915_tt ;
int ret ;
i915_tt = kzalloc ( sizeof ( * i915_tt ) , GFP_KERNEL ) ;
if ( ! i915_tt )
return NULL ;
if ( obj - > flags & I915_BO_ALLOC_CPU_CLEAR & &
man - > use_tt )
2021-09-29 14:26:27 +01:00
page_flags | = TTM_TT_FLAG_ZERO_ALLOC ;
2021-06-10 09:01:49 +02:00
2021-10-18 10:10:49 +01:00
if ( i915_gem_object_is_shrinkable ( obj ) & & caching = = ttm_cached ) {
page_flags | = TTM_TT_FLAG_EXTERNAL |
TTM_TT_FLAG_EXTERNAL_MAPPABLE ;
i915_tt - > is_shmem = true ;
2021-06-10 09:01:49 +02:00
}
2021-10-18 10:10:49 +01:00
ret = ttm_tt_init ( & i915_tt - > ttm , bo , page_flags , caching ) ;
if ( ret )
goto err_free ;
2021-06-10 09:01:49 +02:00
i915_tt - > dev = obj - > base . dev - > dev ;
return & i915_tt - > ttm ;
2021-10-18 10:10:49 +01:00
err_free :
kfree ( i915_tt ) ;
return NULL ;
}
static int i915_ttm_tt_populate ( struct ttm_device * bdev ,
struct ttm_tt * ttm ,
struct ttm_operation_ctx * ctx )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
if ( i915_tt - > is_shmem )
return i915_ttm_tt_shmem_populate ( bdev , ttm , ctx ) ;
return ttm_pool_alloc ( & bdev - > pool , ttm , ctx ) ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_tt_unpopulate ( struct ttm_device * bdev , struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
2021-10-18 10:10:49 +01:00
if ( i915_tt - > is_shmem ) {
i915_ttm_tt_shmem_unpopulate ( ttm ) ;
} else {
if ( i915_tt - > cached_st ) {
dma_unmap_sgtable ( i915_tt - > dev , i915_tt - > cached_st ,
DMA_BIDIRECTIONAL , 0 ) ;
sg_free_table ( i915_tt - > cached_st ) ;
kfree ( i915_tt - > cached_st ) ;
i915_tt - > cached_st = NULL ;
}
ttm_pool_free ( & bdev - > pool , ttm ) ;
2021-06-10 09:01:49 +02:00
}
}
static void i915_ttm_tt_destroy ( struct ttm_device * bdev , struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
2021-10-18 10:10:49 +01:00
if ( i915_tt - > filp )
fput ( i915_tt - > filp ) ;
2021-06-15 14:24:08 +02:00
ttm_tt_fini ( ttm ) ;
2021-06-10 09:01:49 +02:00
kfree ( i915_tt ) ;
}
static bool i915_ttm_eviction_valuable ( struct ttm_buffer_object * bo ,
const struct ttm_place * place )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
2021-10-18 10:10:49 +01:00
/*
* EXTERNAL objects should never be swapped out by TTM , instead we need
* to handle that ourselves . TTM will already skip such objects for us ,
* but we would like to avoid grabbing locks for no good reason .
*/
if ( bo - > ttm & & bo - > ttm - > page_flags & TTM_TT_FLAG_EXTERNAL )
return - EBUSY ;
2021-06-10 09:01:49 +02:00
/* Will do for now. Our pinned objects are still on TTM's LRU lists */
2021-06-16 16:25:00 +01:00
return i915_gem_object_evictable ( obj ) ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_evict_flags ( struct ttm_buffer_object * bo ,
struct ttm_placement * placement )
{
* placement = i915_sys_placement ;
}
static int i915_ttm_move_notify ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
int ret ;
ret = i915_gem_object_unbind ( obj , I915_GEM_OBJECT_UNBIND_ACTIVE ) ;
if ( ret )
return ret ;
ret = __i915_gem_object_put_pages ( obj ) ;
if ( ret )
return ret ;
return 0 ;
}
static void i915_ttm_free_cached_io_st ( struct drm_i915_gem_object * obj )
{
2021-06-10 09:01:52 +02:00
struct radix_tree_iter iter ;
void __rcu * * slot ;
if ( ! obj - > ttm . cached_io_st )
return ;
rcu_read_lock ( ) ;
radix_tree_for_each_slot ( slot , & obj - > ttm . get_io_page . radix , & iter , 0 )
radix_tree_delete ( & obj - > ttm . get_io_page . radix , iter . index ) ;
rcu_read_unlock ( ) ;
sg_free_table ( obj - > ttm . cached_io_st ) ;
kfree ( obj - > ttm . cached_io_st ) ;
obj - > ttm . cached_io_st = NULL ;
2021-06-10 09:01:49 +02:00
}
2021-06-24 10:42:39 +02:00
static void
i915_ttm_adjust_domains_after_move ( struct drm_i915_gem_object * obj )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
if ( cpu_maps_iomem ( bo - > resource ) | | bo - > ttm - > caching ! = ttm_cached ) {
obj - > write_domain = I915_GEM_DOMAIN_WC ;
obj - > read_domains = I915_GEM_DOMAIN_WC ;
} else {
obj - > write_domain = I915_GEM_DOMAIN_CPU ;
obj - > read_domains = I915_GEM_DOMAIN_CPU ;
}
}
static void i915_ttm_adjust_gem_after_move ( struct drm_i915_gem_object * obj )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
unsigned int cache_level ;
2021-06-24 10:42:40 +02:00
unsigned int i ;
/*
* If object was moved to an allowable region , update the object
* region to consider it migrated . Note that if it ' s currently not
* in an allowable region , it ' s evicted and we don ' t update the
* object region .
*/
if ( intel_region_to_ttm_type ( obj - > mm . region ) ! = bo - > resource - > mem_type ) {
for ( i = 0 ; i < obj - > mm . n_placements ; + + i ) {
struct intel_memory_region * mr = obj - > mm . placements [ i ] ;
if ( intel_region_to_ttm_type ( mr ) = = bo - > resource - > mem_type & &
mr ! = obj - > mm . region ) {
i915_gem_object_release_memory_region ( obj ) ;
i915_gem_object_init_memory_region ( obj , mr ) ;
break ;
}
}
}
2021-06-24 10:42:39 +02:00
obj - > mem_flags & = ~ ( I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM ) ;
obj - > mem_flags | = cpu_maps_iomem ( bo - > resource ) ? I915_BO_FLAG_IOMEM :
I915_BO_FLAG_STRUCT_PAGE ;
cache_level = i915_ttm_cache_level ( to_i915 ( bo - > base . dev ) , bo - > resource ,
bo - > ttm ) ;
i915_gem_object_set_cache_coherency ( obj , cache_level ) ;
}
2021-10-18 10:10:49 +01:00
static int i915_ttm_purge ( struct drm_i915_gem_object * obj )
2021-06-10 09:01:49 +02:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-10-18 10:10:49 +01:00
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
2021-06-10 09:01:49 +02:00
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
struct ttm_placement place = { } ;
int ret ;
if ( obj - > mm . madv = = __I915_MADV_PURGED )
2021-10-18 10:10:49 +01:00
return 0 ;
2021-06-10 09:01:49 +02:00
ret = ttm_bo_validate ( bo , & place , & ctx ) ;
2021-10-18 10:10:49 +01:00
if ( ret )
return ret ;
if ( bo - > ttm & & i915_tt - > filp ) {
/*
* The below fput ( which eventually calls shmem_truncate ) might
* be delayed by worker , so when directly called to purge the
* pages ( like by the shrinker ) we should try to be more
* aggressive and release the pages immediately .
*/
shmem_truncate_range ( file_inode ( i915_tt - > filp ) ,
0 , ( loff_t ) - 1 ) ;
fput ( fetch_and_zero ( & i915_tt - > filp ) ) ;
2021-06-10 09:01:49 +02:00
}
2021-10-18 10:10:49 +01:00
obj - > write_domain = 0 ;
obj - > read_domains = 0 ;
i915_ttm_adjust_gem_after_move ( obj ) ;
i915_ttm_free_cached_io_st ( obj ) ;
obj - > mm . madv = __I915_MADV_PURGED ;
return 0 ;
}
static int i915_ttm_shrinker_release_pages ( struct drm_i915_gem_object * obj ,
bool should_writeback )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
struct ttm_placement place = { } ;
int ret ;
if ( ! bo - > ttm | | bo - > resource - > mem_type ! = TTM_PL_SYSTEM )
return 0 ;
GEM_BUG_ON ( ! i915_tt - > is_shmem ) ;
if ( ! i915_tt - > filp )
return 0 ;
switch ( obj - > mm . madv ) {
case I915_MADV_DONTNEED :
return i915_ttm_purge ( obj ) ;
case __I915_MADV_PURGED :
return 0 ;
}
if ( bo - > ttm - > page_flags & TTM_TT_FLAG_SWAPPED )
return 0 ;
bo - > ttm - > page_flags | = TTM_TT_FLAG_SWAPPED ;
ret = ttm_bo_validate ( bo , & place , & ctx ) ;
if ( ret ) {
bo - > ttm - > page_flags & = ~ TTM_TT_FLAG_SWAPPED ;
return ret ;
}
if ( should_writeback )
__shmem_writeback ( obj - > base . size , i915_tt - > filp - > f_mapping ) ;
return 0 ;
2021-06-10 09:01:49 +02:00
}
static void i915_ttm_swap_notify ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
int ret = i915_ttm_move_notify ( bo ) ;
GEM_WARN_ON ( ret ) ;
GEM_WARN_ON ( obj - > ttm . cached_io_st ) ;
if ( ! ret & & obj - > mm . madv ! = I915_MADV_WILLNEED )
i915_ttm_purge ( obj ) ;
}
static void i915_ttm_delete_mem_notify ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
if ( likely ( obj ) ) {
2021-09-30 13:32:36 +02:00
__i915_gem_object_pages_fini ( obj ) ;
2021-06-15 14:24:08 +02:00
i915_ttm_free_cached_io_st ( obj ) ;
2021-06-10 09:01:49 +02:00
}
}
static struct intel_memory_region *
i915_ttm_region ( struct ttm_device * bdev , int ttm_mem_type )
{
struct drm_i915_private * i915 = container_of ( bdev , typeof ( * i915 ) , bdev ) ;
/* There's some room for optimization here... */
GEM_BUG_ON ( ttm_mem_type ! = I915_PL_SYSTEM & &
ttm_mem_type < I915_PL_LMEM0 ) ;
if ( ttm_mem_type = = I915_PL_SYSTEM )
return intel_memory_region_lookup ( i915 , INTEL_MEMORY_SYSTEM ,
0 ) ;
return intel_memory_region_lookup ( i915 , INTEL_MEMORY_LOCAL ,
ttm_mem_type - I915_PL_LMEM0 ) ;
}
static struct sg_table * i915_ttm_tt_get_st ( struct ttm_tt * ttm )
{
struct i915_ttm_tt * i915_tt = container_of ( ttm , typeof ( * i915_tt ) , ttm ) ;
struct sg_table * st ;
int ret ;
if ( i915_tt - > cached_st )
return i915_tt - > cached_st ;
st = kzalloc ( sizeof ( * st ) , GFP_KERNEL ) ;
if ( ! st )
return ERR_PTR ( - ENOMEM ) ;
RDMA v5.15 merge window Pull Request
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs core
code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmEudRgACgkQOG33FX4g
mxraJA//c6bMxrrTVrzmrtrkyYD4tYWE8RDfgvoyZtleZnnEOJeunCQWakQrpJSv
ukSnOGCA3PtnmRMdV54f/11YJ/7otxOJodSO7jWsIoBrqG/lISAdX8mn2iHhrvJ0
dIaFEFPLy0WqoMLCJVIYIupR0IStVHb/mWx0uYL4XnnoYKyt7f7K5JMZpNWMhDN2
ieJw0jfrvEYm8pipWuxUvB16XARlzAWQrjqLpMRI+jFRpbDVBY21dz2/LJvOJPrA
LcQ+XXsV/F659ibOAGm6bU4BMda8fE6Lw90B/gmhSswJ205NrdziF5cNYHP0QxcN
oMjrjSWWHc9GEE7MTipC2AH8e36qob16Q7CK+zHEJ+ds7R6/O/8XmED1L8/KFpNA
FGqnjxnxsl1y27mUegfj1Hh8PfoDp2oVq0lmpEw0CYo4cfVzHSMRrbTR//XmW628
Ie/mJddpFK4oLk+QkSNjSLrnxOvdTkdA58PU0i84S5eUVMNm41jJDkxg2J7vp0Zn
sclZsclhUQ9oJ5Q2so81JMWxu4JDn7IByXL0ULBaa6xwQTiVEnyvSxSuPlflhLRW
0vI2ylATYKyWkQqyX7VyWecZJzwhwZj5gMMWmoGsij8bkZhQ/VaQMaesByzSth+h
NV5UAYax4GqyOQ/tg/tqT6e5nrI1zof87H64XdTCBpJ7kFyQ/oA=
=ZwOe
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This is quite a small cycle, no major series stands out. The HNS and
rxe drivers saw the most activity this cycle, with rxe being broken
for a good chunk of time. The significant deleted line count is due to
a SPDX cleanup series.
Summary:
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs
core code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (90 commits)
RDMA/mlx5: Relax DCS QP creation checks
RDMA/hns: Delete unnecessary blank lines.
RDMA/hns: Encapsulate the qp db as a function
RDMA/hns: Adjust the order in which irq are requested and enabled
RDMA/hns: Remove RST2RST error prints for hw v1
RDMA/hns: Remove dqpn filling when modify qp from Init to Init
RDMA/hns: Fix QP's resp incomplete assignment
RDMA/hns: Fix query destination qpn
RDMA/hfi1: Convert to SPDX identifier
IB/rdmavt: Convert to SPDX identifier
RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
RDMA/hns: Bugfix for the missing assignment for dip_idx
RDMA/hns: Bugfix for data type of dip_idx
RDMA/hns: Fix incorrect lsn field
RDMA/irdma: Remove the repeated declaration
RDMA/core/sa_query: Retry SA queries
RDMA: Use the sg_table directly and remove the opencoded version from umem
lib/scatterlist: Fix wrong update of orig_nents
lib/scatterlist: Provide a dedicated function to support table append
RDMA/hns: Delete unused hns bitmap interface
...
2021-09-02 14:47:21 -07:00
ret = sg_alloc_table_from_pages_segment ( st ,
ttm - > pages , ttm - > num_pages ,
0 , ( unsigned long ) ttm - > num_pages < < PAGE_SHIFT ,
i915_sg_segment_size ( ) , GFP_KERNEL ) ;
if ( ret ) {
2021-06-10 09:01:49 +02:00
kfree ( st ) ;
RDMA v5.15 merge window Pull Request
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs core
code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmEudRgACgkQOG33FX4g
mxraJA//c6bMxrrTVrzmrtrkyYD4tYWE8RDfgvoyZtleZnnEOJeunCQWakQrpJSv
ukSnOGCA3PtnmRMdV54f/11YJ/7otxOJodSO7jWsIoBrqG/lISAdX8mn2iHhrvJ0
dIaFEFPLy0WqoMLCJVIYIupR0IStVHb/mWx0uYL4XnnoYKyt7f7K5JMZpNWMhDN2
ieJw0jfrvEYm8pipWuxUvB16XARlzAWQrjqLpMRI+jFRpbDVBY21dz2/LJvOJPrA
LcQ+XXsV/F659ibOAGm6bU4BMda8fE6Lw90B/gmhSswJ205NrdziF5cNYHP0QxcN
oMjrjSWWHc9GEE7MTipC2AH8e36qob16Q7CK+zHEJ+ds7R6/O/8XmED1L8/KFpNA
FGqnjxnxsl1y27mUegfj1Hh8PfoDp2oVq0lmpEw0CYo4cfVzHSMRrbTR//XmW628
Ie/mJddpFK4oLk+QkSNjSLrnxOvdTkdA58PU0i84S5eUVMNm41jJDkxg2J7vp0Zn
sclZsclhUQ9oJ5Q2so81JMWxu4JDn7IByXL0ULBaa6xwQTiVEnyvSxSuPlflhLRW
0vI2ylATYKyWkQqyX7VyWecZJzwhwZj5gMMWmoGsij8bkZhQ/VaQMaesByzSth+h
NV5UAYax4GqyOQ/tg/tqT6e5nrI1zof87H64XdTCBpJ7kFyQ/oA=
=ZwOe
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"This is quite a small cycle, no major series stands out. The HNS and
rxe drivers saw the most activity this cycle, with rxe being broken
for a good chunk of time. The significant deleted line count is due to
a SPDX cleanup series.
Summary:
- Various cleanup and small features for rtrs
- kmap_local_page() conversions
- Driver updates and fixes for: efa, rxe, mlx5, hfi1, qed, hns
- Cache the IB subnet prefix
- Rework how CRC is calcuated in rxe
- Clean reference counting in iwpm's netlink
- Pull object allocation and lifecycle for user QPs to the uverbs
core code
- Several small hns features and continued general code cleanups
- Fix the scatterlist confusion of orig_nents/nents introduced in an
earlier patch creating the append operation"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (90 commits)
RDMA/mlx5: Relax DCS QP creation checks
RDMA/hns: Delete unnecessary blank lines.
RDMA/hns: Encapsulate the qp db as a function
RDMA/hns: Adjust the order in which irq are requested and enabled
RDMA/hns: Remove RST2RST error prints for hw v1
RDMA/hns: Remove dqpn filling when modify qp from Init to Init
RDMA/hns: Fix QP's resp incomplete assignment
RDMA/hns: Fix query destination qpn
RDMA/hfi1: Convert to SPDX identifier
IB/rdmavt: Convert to SPDX identifier
RDMA/hns: Bugfix for incorrect association between dip_idx and dgid
RDMA/hns: Bugfix for the missing assignment for dip_idx
RDMA/hns: Bugfix for data type of dip_idx
RDMA/hns: Fix incorrect lsn field
RDMA/irdma: Remove the repeated declaration
RDMA/core/sa_query: Retry SA queries
RDMA: Use the sg_table directly and remove the opencoded version from umem
lib/scatterlist: Fix wrong update of orig_nents
lib/scatterlist: Provide a dedicated function to support table append
RDMA/hns: Delete unused hns bitmap interface
...
2021-09-02 14:47:21 -07:00
return ERR_PTR ( ret ) ;
2021-06-10 09:01:49 +02:00
}
ret = dma_map_sgtable ( i915_tt - > dev , st , DMA_BIDIRECTIONAL , 0 ) ;
if ( ret ) {
sg_free_table ( st ) ;
kfree ( st ) ;
return ERR_PTR ( ret ) ;
}
i915_tt - > cached_st = st ;
return st ;
}
static struct sg_table *
i915_ttm_resource_get_st ( struct drm_i915_gem_object * obj ,
struct ttm_resource * res )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-06-24 10:42:39 +02:00
if ( ! gpu_binds_iomem ( res ) )
2021-06-10 09:01:49 +02:00
return i915_ttm_tt_get_st ( bo - > ttm ) ;
2021-06-24 10:42:39 +02:00
/*
* If CPU mapping differs , we need to add the ttm_tt pages to
* the resulting st . Might make sense for GGTT .
*/
GEM_WARN_ON ( ! cpu_maps_iomem ( res ) ) ;
2021-06-16 16:24:59 +01:00
return intel_region_ttm_resource_to_st ( obj - > mm . region , res ) ;
2021-06-10 09:01:49 +02:00
}
2021-06-17 08:30:16 +02:00
static int i915_ttm_accel_move ( struct ttm_buffer_object * bo ,
2021-08-13 16:43:30 +02:00
bool clear ,
2021-06-17 08:30:16 +02:00
struct ttm_resource * dst_mem ,
2021-09-22 08:25:19 +02:00
struct ttm_tt * dst_ttm ,
2021-06-17 08:30:16 +02:00
struct sg_table * dst_st )
{
struct drm_i915_private * i915 = container_of ( bo - > bdev , typeof ( * i915 ) ,
bdev ) ;
struct ttm_resource_manager * src_man =
ttm_manager_type ( bo - > bdev , bo - > resource - > mem_type ) ;
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
struct sg_table * src_st ;
struct i915_request * rq ;
2021-09-22 08:25:19 +02:00
struct ttm_tt * src_ttm = bo - > ttm ;
2021-06-24 10:42:39 +02:00
enum i915_cache_level src_level , dst_level ;
2021-06-17 08:30:16 +02:00
int ret ;
2021-09-22 08:25:22 +02:00
if ( ! i915 - > gt . migrate . context | | intel_gt_is_wedged ( & i915 - > gt ) )
2021-06-17 08:30:16 +02:00
return - EINVAL ;
2021-09-22 08:25:19 +02:00
dst_level = i915_ttm_cache_level ( i915 , dst_mem , dst_ttm ) ;
2021-08-13 16:43:30 +02:00
if ( clear ) {
2021-06-17 08:30:16 +02:00
if ( bo - > type = = ttm_bo_type_kernel )
return - EINVAL ;
intel_engine_pm_get ( i915 - > gt . migrate . context - > engine ) ;
ret = intel_context_migrate_clear ( i915 - > gt . migrate . context , NULL ,
2021-06-24 10:42:39 +02:00
dst_st - > sgl , dst_level ,
gpu_binds_iomem ( dst_mem ) ,
2021-06-17 08:30:16 +02:00
0 , & rq ) ;
if ( ! ret & & rq ) {
i915_request_wait ( rq , 0 , MAX_SCHEDULE_TIMEOUT ) ;
i915_request_put ( rq ) ;
}
intel_engine_pm_put ( i915 - > gt . migrate . context - > engine ) ;
} else {
2021-09-22 08:25:19 +02:00
src_st = src_man - > use_tt ? i915_ttm_tt_get_st ( src_ttm ) :
2021-06-24 10:42:39 +02:00
obj - > ttm . cached_io_st ;
2021-06-17 08:30:16 +02:00
2021-09-22 08:25:19 +02:00
src_level = i915_ttm_cache_level ( i915 , bo - > resource , src_ttm ) ;
2021-06-17 08:30:16 +02:00
intel_engine_pm_get ( i915 - > gt . migrate . context - > engine ) ;
ret = intel_context_migrate_copy ( i915 - > gt . migrate . context ,
2021-06-24 10:42:39 +02:00
NULL , src_st - > sgl , src_level ,
gpu_binds_iomem ( bo - > resource ) ,
dst_st - > sgl , dst_level ,
gpu_binds_iomem ( dst_mem ) ,
2021-06-17 08:30:16 +02:00
& rq ) ;
if ( ! ret & & rq ) {
i915_request_wait ( rq , 0 , MAX_SCHEDULE_TIMEOUT ) ;
i915_request_put ( rq ) ;
}
intel_engine_pm_put ( i915 - > gt . migrate . context - > engine ) ;
}
return ret ;
2021-06-10 09:01:49 +02:00
}
2021-08-13 16:43:30 +02:00
static void __i915_ttm_move ( struct ttm_buffer_object * bo , bool clear ,
struct ttm_resource * dst_mem ,
2021-09-22 08:25:19 +02:00
struct ttm_tt * dst_ttm ,
struct sg_table * dst_st ,
bool allow_accel )
2021-08-13 16:43:30 +02:00
{
2021-09-22 08:25:19 +02:00
int ret = - EINVAL ;
2021-08-13 16:43:30 +02:00
2021-09-22 08:25:19 +02:00
if ( allow_accel )
ret = i915_ttm_accel_move ( bo , clear , dst_mem , dst_ttm , dst_st ) ;
2021-08-13 16:43:30 +02:00
if ( ret ) {
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
struct intel_memory_region * dst_reg , * src_reg ;
union {
struct ttm_kmap_iter_tt tt ;
struct ttm_kmap_iter_iomap io ;
} _dst_iter , _src_iter ;
struct ttm_kmap_iter * dst_iter , * src_iter ;
dst_reg = i915_ttm_region ( bo - > bdev , dst_mem - > mem_type ) ;
src_reg = i915_ttm_region ( bo - > bdev , bo - > resource - > mem_type ) ;
GEM_BUG_ON ( ! dst_reg | | ! src_reg ) ;
dst_iter = ! cpu_maps_iomem ( dst_mem ) ?
2021-09-22 08:25:19 +02:00
ttm_kmap_iter_tt_init ( & _dst_iter . tt , dst_ttm ) :
2021-08-13 16:43:30 +02:00
ttm_kmap_iter_iomap_init ( & _dst_iter . io , & dst_reg - > iomap ,
dst_st , dst_reg - > region . start ) ;
src_iter = ! cpu_maps_iomem ( bo - > resource ) ?
ttm_kmap_iter_tt_init ( & _src_iter . tt , bo - > ttm ) :
ttm_kmap_iter_iomap_init ( & _src_iter . io , & src_reg - > iomap ,
obj - > ttm . cached_io_st ,
src_reg - > region . start ) ;
2021-08-13 16:43:31 +02:00
ttm_move_memcpy ( clear , dst_mem - > num_pages , dst_iter , src_iter ) ;
2021-08-13 16:43:30 +02:00
}
}
2021-06-10 09:01:49 +02:00
static int i915_ttm_move ( struct ttm_buffer_object * bo , bool evict ,
struct ttm_operation_ctx * ctx ,
struct ttm_resource * dst_mem ,
struct ttm_place * hop )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
struct ttm_resource_manager * dst_man =
ttm_manager_type ( bo - > bdev , dst_mem - > mem_type ) ;
2021-08-13 16:43:30 +02:00
struct ttm_tt * ttm = bo - > ttm ;
2021-06-10 09:01:49 +02:00
struct sg_table * dst_st ;
2021-08-13 16:43:30 +02:00
bool clear ;
2021-06-10 09:01:49 +02:00
int ret ;
/* Sync for now. We could do the actual copy async. */
ret = ttm_bo_wait_ctx ( bo , ctx ) ;
if ( ret )
return ret ;
ret = i915_ttm_move_notify ( bo ) ;
if ( ret )
return ret ;
if ( obj - > mm . madv ! = I915_MADV_WILLNEED ) {
i915_ttm_purge ( obj ) ;
ttm_resource_free ( bo , & dst_mem ) ;
return 0 ;
}
/* Populate ttm with pages if needed. Typically system memory. */
Merge tag 'drm-intel-gt-next-2021-10-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes:
- Add uAPI for using PXP protected objects
Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8064
- Add PCI IDs and LMEM discovery/placement uAPI for DG1
Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11584
- Disable engine bonding on Gen12+ except TGL, RKL and ADL-S
Cross-subsystem Changes:
- Merges 'tip/locking/wwmutex' branch (core kernel tip)
- "mei: pxp: export pavp client to me client bus"
Core Changes:
- Update ttm_move_memcpy for async use (Thomas)
Driver Changes:
- Enable GuC submission by default on DG1 (Matt B)
- Add PXP (Protected Xe Path) support for Gen12 integrated (Daniele,
Sean, Anshuman)
See "drm/i915/pxp: add PXP documentation" for details!
- Remove force_probe protection for ADL-S (Raviteja)
- Add base support for XeHP/XeHP SDV (Matt R, Stuart, Lucas)
- Handle DRI_PRIME=1 on Intel igfx + Intel dgfx hybrid graphics setup (Tvrtko)
- Use Transparent Hugepages when IOMMU is enabled (Tvrtko, Chris)
- Implement LMEM backup and restore for suspend / resume (Thomas)
- Report INSTDONE_GEOM values in error state for DG2 (Matt R)
- Add DG2-specific shadow register table (Matt R)
- Update Gen11/Gen12/XeHP shadow register tables (Matt R)
- Maintain backward-compatible nested batch behavior on TGL+ (Matt R)
- Add new LRI reg offsets for DG2 (Akeem)
- Initialize unused MOCS entries to device specific values (Ayaz)
- Track and use the correct UC MOCS index on Gen12 (Ayaz)
- Add separate MOCS table for Gen12 devices other than TGL/RKL (Ayaz)
- Simplify the locking and eliminate some RCU usage (Daniel)
- Add some flushing for the 64K GTT path (Matt A)
- Mark GPU wedging on driver unregister unrecoverable (Janusz)
- Major rework in the GuC codebase, simplify locking and add docs (Matt B)
- Add DG1 GuC/HuC firmwares (Daniele, Matt B)
- Remember to call i915_sw_fence_fini on guc_state.blocked (Matt A)
- Use "gt" forcewake domain name for error messages instead of "blitter" (Matt R)
- Drop now duplicate LMEM uAPI RFC kerneldoc section (Daniel)
- Fix early tracepoints for requests (Matt A)
- Use locked access to ctx->engines in set_priority (Daniel)
- Convert gen6/gen7/gen8 read operations to fwtable (Matt R)
- Drop gen11/gen12 specific mmio write handlers (Matt R)
- Drop gen11 specific mmio read handlers (Matt R)
- Use designated initializers for init/exit table (Kees)
- Fix syncmap memory leak (Matt B)
- Add pretty printing for buddy allocator state debug (Matt A)
- Fix potential error pointer dereference in pinned_context() (Dan)
- Remove IS_ACTIVE macro (Lucas)
- Static code checker fixes (Nathan)
- Clean up disabled warnings (Nathan)
- Increase timeout in i915_gem_contexts selftests 5x for GuC submission (Matt B)
- Ensure wa_init_finish() is called for ctx workaround list (Matt R)
- Initialize L3CC table in mocs init (Sreedhar, Ayaz, Ram)
- Get PM ref before accessing HW register (Vinay)
- Move __i915_gem_free_object to ttm_bo_destroy (Maarten)
- Deduplicate frequency dump on debugfs (Lucas)
- Make wa list per-gt (Venkata)
- Do not define dummy vma in stack (Venkata)
- Take pinning into account in __i915_gem_object_is_lmem (Matt B, Thomas)
- Do not report currently active engine when describing objects (Tvrtko)
- Fix pdfdocs build error by removing nested grid from GuC docs (Akira)
- Remove false warning from the rps worker (Tejas)
- Flush buffer pools on driver remove (Janusz)
- Fix runtime pm handling in i915_gem_shrink (Maarten)
- Rework TTM object initialization slightly (Thomas)
- Use fixed offset for PTEs location (Michal Wa)
- Verify result from CTB (de)register action and improve error messages (Michal Wa)
- Fix bug in user proto-context creation that leaked contexts (Matt B)
- Re-use Gen11 forcewake read functions on Gen12 (Matt R)
- Make shadow tables range-based (Matt R)
- Ditch the i915_gem_ww_ctx loop member (Thomas, Maarten)
- Use NULL instead of 0 where appropriate (Ville)
- Rename pci/debugfs functions to respect file prefix (Jani, Lucas)
- Drop guc_communication_enabled (Daniele)
- Selftest fixes (Thomas, Daniel, Matt A, Maarten)
- Clean up inconsistent indenting (Colin)
- Use direction definition DMA_BIDIRECTIONAL instead of
PCI_DMA_BIDIRECTIONAL (Cai)
- Add "intel_" as prefix in set_mocs_index() (Ayaz)
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YWAO80MB2eyToYoy@jlahtine-mobl.ger.corp.intel.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
2021-10-11 18:09:39 +10:00
if ( ttm & & ( dst_man - > use_tt | | ( ttm - > page_flags & TTM_TT_FLAG_SWAPPED ) ) ) {
2021-08-13 16:43:30 +02:00
ret = ttm_tt_populate ( bo - > bdev , ttm , ctx ) ;
2021-06-10 09:01:49 +02:00
if ( ret )
return ret ;
}
dst_st = i915_ttm_resource_get_st ( obj , dst_mem ) ;
if ( IS_ERR ( dst_st ) )
return PTR_ERR ( dst_st ) ;
2021-08-13 16:43:30 +02:00
clear = ! cpu_maps_iomem ( bo - > resource ) & & ( ! ttm | | ! ttm_tt_is_populated ( ttm ) ) ;
Merge tag 'drm-intel-gt-next-2021-10-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
UAPI Changes:
- Add uAPI for using PXP protected objects
Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8064
- Add PCI IDs and LMEM discovery/placement uAPI for DG1
Mesa changes: https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/11584
- Disable engine bonding on Gen12+ except TGL, RKL and ADL-S
Cross-subsystem Changes:
- Merges 'tip/locking/wwmutex' branch (core kernel tip)
- "mei: pxp: export pavp client to me client bus"
Core Changes:
- Update ttm_move_memcpy for async use (Thomas)
Driver Changes:
- Enable GuC submission by default on DG1 (Matt B)
- Add PXP (Protected Xe Path) support for Gen12 integrated (Daniele,
Sean, Anshuman)
See "drm/i915/pxp: add PXP documentation" for details!
- Remove force_probe protection for ADL-S (Raviteja)
- Add base support for XeHP/XeHP SDV (Matt R, Stuart, Lucas)
- Handle DRI_PRIME=1 on Intel igfx + Intel dgfx hybrid graphics setup (Tvrtko)
- Use Transparent Hugepages when IOMMU is enabled (Tvrtko, Chris)
- Implement LMEM backup and restore for suspend / resume (Thomas)
- Report INSTDONE_GEOM values in error state for DG2 (Matt R)
- Add DG2-specific shadow register table (Matt R)
- Update Gen11/Gen12/XeHP shadow register tables (Matt R)
- Maintain backward-compatible nested batch behavior on TGL+ (Matt R)
- Add new LRI reg offsets for DG2 (Akeem)
- Initialize unused MOCS entries to device specific values (Ayaz)
- Track and use the correct UC MOCS index on Gen12 (Ayaz)
- Add separate MOCS table for Gen12 devices other than TGL/RKL (Ayaz)
- Simplify the locking and eliminate some RCU usage (Daniel)
- Add some flushing for the 64K GTT path (Matt A)
- Mark GPU wedging on driver unregister unrecoverable (Janusz)
- Major rework in the GuC codebase, simplify locking and add docs (Matt B)
- Add DG1 GuC/HuC firmwares (Daniele, Matt B)
- Remember to call i915_sw_fence_fini on guc_state.blocked (Matt A)
- Use "gt" forcewake domain name for error messages instead of "blitter" (Matt R)
- Drop now duplicate LMEM uAPI RFC kerneldoc section (Daniel)
- Fix early tracepoints for requests (Matt A)
- Use locked access to ctx->engines in set_priority (Daniel)
- Convert gen6/gen7/gen8 read operations to fwtable (Matt R)
- Drop gen11/gen12 specific mmio write handlers (Matt R)
- Drop gen11 specific mmio read handlers (Matt R)
- Use designated initializers for init/exit table (Kees)
- Fix syncmap memory leak (Matt B)
- Add pretty printing for buddy allocator state debug (Matt A)
- Fix potential error pointer dereference in pinned_context() (Dan)
- Remove IS_ACTIVE macro (Lucas)
- Static code checker fixes (Nathan)
- Clean up disabled warnings (Nathan)
- Increase timeout in i915_gem_contexts selftests 5x for GuC submission (Matt B)
- Ensure wa_init_finish() is called for ctx workaround list (Matt R)
- Initialize L3CC table in mocs init (Sreedhar, Ayaz, Ram)
- Get PM ref before accessing HW register (Vinay)
- Move __i915_gem_free_object to ttm_bo_destroy (Maarten)
- Deduplicate frequency dump on debugfs (Lucas)
- Make wa list per-gt (Venkata)
- Do not define dummy vma in stack (Venkata)
- Take pinning into account in __i915_gem_object_is_lmem (Matt B, Thomas)
- Do not report currently active engine when describing objects (Tvrtko)
- Fix pdfdocs build error by removing nested grid from GuC docs (Akira)
- Remove false warning from the rps worker (Tejas)
- Flush buffer pools on driver remove (Janusz)
- Fix runtime pm handling in i915_gem_shrink (Maarten)
- Rework TTM object initialization slightly (Thomas)
- Use fixed offset for PTEs location (Michal Wa)
- Verify result from CTB (de)register action and improve error messages (Michal Wa)
- Fix bug in user proto-context creation that leaked contexts (Matt B)
- Re-use Gen11 forcewake read functions on Gen12 (Matt R)
- Make shadow tables range-based (Matt R)
- Ditch the i915_gem_ww_ctx loop member (Thomas, Maarten)
- Use NULL instead of 0 where appropriate (Ville)
- Rename pci/debugfs functions to respect file prefix (Jani, Lucas)
- Drop guc_communication_enabled (Daniele)
- Selftest fixes (Thomas, Daniel, Matt A, Maarten)
- Clean up inconsistent indenting (Colin)
- Use direction definition DMA_BIDIRECTIONAL instead of
PCI_DMA_BIDIRECTIONAL (Cai)
- Add "intel_" as prefix in set_mocs_index() (Ayaz)
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YWAO80MB2eyToYoy@jlahtine-mobl.ger.corp.intel.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
2021-10-11 18:09:39 +10:00
if ( ! ( clear & & ttm & & ! ( ttm - > page_flags & TTM_TT_FLAG_ZERO_ALLOC ) ) )
2021-09-22 08:25:19 +02:00
__i915_ttm_move ( bo , clear , dst_mem , bo - > ttm , dst_st , true ) ;
2021-06-17 08:30:16 +02:00
2021-06-10 09:01:49 +02:00
ttm_bo_move_sync_cleanup ( bo , dst_mem ) ;
2021-06-24 10:42:39 +02:00
i915_ttm_adjust_domains_after_move ( obj ) ;
2021-06-10 09:01:49 +02:00
i915_ttm_free_cached_io_st ( obj ) ;
2021-06-24 10:42:39 +02:00
if ( gpu_binds_iomem ( dst_mem ) | | cpu_maps_iomem ( dst_mem ) ) {
2021-06-10 09:01:49 +02:00
obj - > ttm . cached_io_st = dst_st ;
2021-06-10 09:01:52 +02:00
obj - > ttm . get_io_page . sg_pos = dst_st - > sgl ;
obj - > ttm . get_io_page . sg_idx = 0 ;
}
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
i915_ttm_adjust_gem_after_move ( obj ) ;
2021-06-10 09:01:49 +02:00
return 0 ;
}
2021-06-10 09:01:52 +02:00
static int i915_ttm_io_mem_reserve ( struct ttm_device * bdev , struct ttm_resource * mem )
{
2021-06-24 10:42:39 +02:00
if ( ! cpu_maps_iomem ( mem ) )
2021-06-10 09:01:52 +02:00
return 0 ;
mem - > bus . caching = ttm_write_combined ;
mem - > bus . is_iomem = true ;
return 0 ;
}
static unsigned long i915_ttm_io_mem_pfn ( struct ttm_buffer_object * bo ,
unsigned long page_offset )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
unsigned long base = obj - > mm . region - > iomap . base - obj - > mm . region - > region . start ;
struct scatterlist * sg ;
unsigned int ofs ;
GEM_WARN_ON ( bo - > ttm ) ;
2021-07-14 14:34:17 -05:00
sg = __i915_gem_object_get_sg ( obj , & obj - > ttm . get_io_page , page_offset , & ofs , true ) ;
2021-06-10 09:01:52 +02:00
return ( ( base + sg_dma_address ( sg ) ) > > PAGE_SHIFT ) + ofs ;
}
2021-06-10 09:01:49 +02:00
static struct ttm_device_funcs i915_ttm_bo_driver = {
. ttm_tt_create = i915_ttm_tt_create ,
2021-10-18 10:10:49 +01:00
. ttm_tt_populate = i915_ttm_tt_populate ,
2021-06-10 09:01:49 +02:00
. ttm_tt_unpopulate = i915_ttm_tt_unpopulate ,
. ttm_tt_destroy = i915_ttm_tt_destroy ,
. eviction_valuable = i915_ttm_eviction_valuable ,
. evict_flags = i915_ttm_evict_flags ,
. move = i915_ttm_move ,
. swap_notify = i915_ttm_swap_notify ,
. delete_mem_notify = i915_ttm_delete_mem_notify ,
2021-06-10 09:01:52 +02:00
. io_mem_reserve = i915_ttm_io_mem_reserve ,
. io_mem_pfn = i915_ttm_io_mem_pfn ,
2021-06-10 09:01:49 +02:00
} ;
/**
* i915_ttm_driver - Return a pointer to the TTM device funcs
*
* Return : Pointer to statically allocated TTM device funcs .
*/
struct ttm_device_funcs * i915_ttm_driver ( void )
{
return & i915_ttm_bo_driver ;
}
2021-06-29 17:12:01 +02:00
static int __i915_ttm_get_pages ( struct drm_i915_gem_object * obj ,
struct ttm_placement * placement )
2021-06-10 09:01:49 +02:00
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
struct sg_table * st ;
2021-06-18 15:25:15 +02:00
int real_num_busy ;
2021-06-10 09:01:49 +02:00
int ret ;
2021-06-18 15:25:15 +02:00
/* First try only the requested placement. No eviction. */
2021-06-29 17:12:01 +02:00
real_num_busy = fetch_and_zero ( & placement - > num_busy_placement ) ;
ret = ttm_bo_validate ( bo , placement , & ctx ) ;
2021-06-18 15:25:15 +02:00
if ( ret ) {
ret = i915_ttm_err_to_gem ( ret ) ;
/*
* Anything that wants to restart the operation gets to
* do that .
*/
if ( ret = = - EDEADLK | | ret = = - EINTR | | ret = = - ERESTARTSYS | |
ret = = - EAGAIN )
return ret ;
2021-06-10 09:01:49 +02:00
2021-06-18 15:25:15 +02:00
/*
* If the initial attempt fails , allow all accepted placements ,
* evicting if necessary .
*/
2021-06-29 17:12:01 +02:00
placement - > num_busy_placement = real_num_busy ;
ret = ttm_bo_validate ( bo , placement , & ctx ) ;
2021-06-18 15:25:15 +02:00
if ( ret )
return i915_ttm_err_to_gem ( ret ) ;
}
2021-06-10 09:01:49 +02:00
i915_ttm_adjust_lru ( obj ) ;
2021-06-24 10:42:39 +02:00
if ( bo - > ttm & & ! ttm_tt_is_populated ( bo - > ttm ) ) {
ret = ttm_tt_populate ( bo - > bdev , bo - > ttm , & ctx ) ;
if ( ret )
return ret ;
i915_ttm_adjust_domains_after_move ( obj ) ;
i915_ttm_adjust_gem_after_move ( obj ) ;
}
2021-07-23 12:21:39 -05:00
if ( ! i915_gem_object_has_pages ( obj ) ) {
2021-10-18 10:10:49 +01:00
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
2021-07-23 12:21:39 -05:00
/* Object either has a page vector or is an iomem object */
st = bo - > ttm ? i915_ttm_tt_get_st ( bo - > ttm ) : obj - > ttm . cached_io_st ;
if ( IS_ERR ( st ) )
return PTR_ERR ( st ) ;
2021-06-10 09:01:49 +02:00
2021-07-23 12:21:39 -05:00
__i915_gem_object_set_pages ( obj , st , i915_sg_dma_sizes ( st - > sgl ) ) ;
2021-10-18 10:10:49 +01:00
if ( ! bo - > ttm | | ! i915_tt - > is_shmem )
i915_gem_object_make_unshrinkable ( obj ) ;
2021-07-23 12:21:39 -05:00
}
2021-06-10 09:01:49 +02:00
return ret ;
}
2021-06-29 17:12:01 +02:00
static int i915_ttm_get_pages ( struct drm_i915_gem_object * obj )
{
struct ttm_place requested , busy [ I915_TTM_MAX_PLACEMENTS ] ;
struct ttm_placement placement ;
GEM_BUG_ON ( obj - > mm . n_placements > I915_TTM_MAX_PLACEMENTS ) ;
/* Move to the requested placement. */
i915_ttm_placement_from_obj ( obj , & requested , busy , & placement ) ;
return __i915_ttm_get_pages ( obj , & placement ) ;
}
/**
* DOC : Migration vs eviction
*
* GEM migration may not be the same as TTM migration / eviction . If
* the TTM core decides to evict an object it may be evicted to a
* TTM memory type that is not in the object ' s allowable GEM regions , or
* in fact theoretically to a TTM memory type that doesn ' t correspond to
* a GEM memory region . In that case the object ' s GEM region is not
* updated , and the data is migrated back to the GEM region at
* get_pages time . TTM may however set up CPU ptes to the object even
* when it is evicted .
* Gem forced migration using the i915_ttm_migrate ( ) op , is allowed even
* to regions that are not in the object ' s list of allowable placements .
*/
static int i915_ttm_migrate ( struct drm_i915_gem_object * obj ,
struct intel_memory_region * mr )
{
struct ttm_place requested ;
struct ttm_placement placement ;
int ret ;
i915_ttm_place_from_region ( mr , & requested , obj - > flags ) ;
placement . num_placement = 1 ;
placement . num_busy_placement = 1 ;
placement . placement = & requested ;
placement . busy_placement = & requested ;
ret = __i915_ttm_get_pages ( obj , & placement ) ;
if ( ret )
return ret ;
/*
* Reinitialize the region bindings . This is primarily
* required for objects where the new region is not in
* its allowable placements .
*/
if ( obj - > mm . region ! = mr ) {
i915_gem_object_release_memory_region ( obj ) ;
i915_gem_object_init_memory_region ( obj , mr ) ;
}
return 0 ;
}
2021-06-10 09:01:49 +02:00
static void i915_ttm_put_pages ( struct drm_i915_gem_object * obj ,
struct sg_table * st )
{
/*
* We ' re currently not called from a shrinker , so put_pages ( )
* typically means the object is about to destroyed , or called
* from move_notify ( ) . So just avoid doing much for now .
* If the object is not destroyed next , The TTM eviction logic
* and shrinkers will move it out if needed .
*/
i915_ttm_adjust_lru ( obj ) ;
}
static void i915_ttm_adjust_lru ( struct drm_i915_gem_object * obj )
{
struct ttm_buffer_object * bo = i915_gem_to_ttm ( obj ) ;
2021-10-18 10:10:49 +01:00
struct i915_ttm_tt * i915_tt =
container_of ( bo - > ttm , typeof ( * i915_tt ) , ttm ) ;
2021-06-10 09:01:49 +02:00
/*
* Don ' t manipulate the TTM LRUs while in TTM bo destruction .
* We ' re called through i915_ttm_delete_mem_notify ( ) .
*/
if ( ! kref_read ( & bo - > kref ) )
return ;
/*
* Put on the correct LRU list depending on the MADV status
*/
spin_lock ( & bo - > bdev - > lru_lock ) ;
2021-10-18 10:10:49 +01:00
if ( bo - > ttm & & i915_tt - > filp ) {
/* Try to keep shmem_tt from being considered for shrinking. */
bo - > priority = TTM_MAX_BO_PRIORITY - 1 ;
} else if ( obj - > mm . madv ! = I915_MADV_WILLNEED ) {
2021-06-10 09:01:49 +02:00
bo - > priority = I915_TTM_PRIO_PURGE ;
} else if ( ! i915_gem_object_has_pages ( obj ) ) {
if ( bo - > priority < I915_TTM_PRIO_HAS_PAGES )
bo - > priority = I915_TTM_PRIO_HAS_PAGES ;
} else {
if ( bo - > priority > I915_TTM_PRIO_NO_PAGES )
bo - > priority = I915_TTM_PRIO_NO_PAGES ;
}
ttm_bo_move_to_lru_tail ( bo , bo - > resource , NULL ) ;
spin_unlock ( & bo - > bdev - > lru_lock ) ;
}
/*
* TTM - backed gem object destruction requires some clarification .
* Basically we have two possibilities here . We can either rely on the
* i915 delayed destruction and put the TTM object when the object
* is idle . This would be detected by TTM which would bypass the
* TTM delayed destroy handling . The other approach is to put the TTM
* object early and rely on the TTM destroyed handling , and then free
* the leftover parts of the GEM object once TTM ' s destroyed list handling is
* complete . For now , we rely on the latter for two reasons :
* a ) TTM can evict an object even when it ' s on the delayed destroy list ,
* which in theory allows for complete eviction .
* b ) There is work going on in TTM to allow freeing an object even when
* it ' s not idle , and using the TTM destroyed list handling could help us
* benefit from that .
*/
static void i915_ttm_delayed_free ( struct drm_i915_gem_object * obj )
{
2021-09-30 13:32:36 +02:00
GEM_BUG_ON ( ! obj - > ttm . created ) ;
ttm_bo_put ( i915_gem_to_ttm ( obj ) ) ;
2021-06-10 09:01:49 +02:00
}
2021-06-10 09:01:52 +02:00
static vm_fault_t vm_fault_ttm ( struct vm_fault * vmf )
{
struct vm_area_struct * area = vmf - > vma ;
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( area - > vm_private_data ) ;
/* Sanity check that we allow writing into this object */
if ( unlikely ( i915_gem_object_is_readonly ( obj ) & &
area - > vm_flags & VM_WRITE ) )
return VM_FAULT_SIGBUS ;
return ttm_bo_vm_fault ( vmf ) ;
}
static int
vm_access_ttm ( struct vm_area_struct * area , unsigned long addr ,
void * buf , int len , int write )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( area - > vm_private_data ) ;
if ( i915_gem_object_is_readonly ( obj ) & & write )
return - EACCES ;
return ttm_bo_vm_access ( area , addr , buf , len , write ) ;
}
static void ttm_vm_open ( struct vm_area_struct * vma )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( vma - > vm_private_data ) ;
GEM_BUG_ON ( ! obj ) ;
i915_gem_object_get ( obj ) ;
}
static void ttm_vm_close ( struct vm_area_struct * vma )
{
struct drm_i915_gem_object * obj =
i915_ttm_to_gem ( vma - > vm_private_data ) ;
GEM_BUG_ON ( ! obj ) ;
i915_gem_object_put ( obj ) ;
}
static const struct vm_operations_struct vm_ops_ttm = {
. fault = vm_fault_ttm ,
. access = vm_access_ttm ,
. open = ttm_vm_open ,
. close = ttm_vm_close ,
} ;
static u64 i915_ttm_mmap_offset ( struct drm_i915_gem_object * obj )
{
/* The ttm_bo must be allocated with I915_BO_ALLOC_USER */
GEM_BUG_ON ( ! drm_mm_node_allocated ( & obj - > base . vma_node . vm_node ) ) ;
return drm_vma_node_offset_addr ( & obj - > base . vma_node ) ;
}
2021-06-23 15:34:11 +01:00
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
2021-06-10 09:01:49 +02:00
. name = " i915_gem_object_ttm " ,
. get_pages = i915_ttm_get_pages ,
. put_pages = i915_ttm_put_pages ,
. truncate = i915_ttm_purge ,
2021-10-18 10:10:49 +01:00
. shrinker_release_pages = i915_ttm_shrinker_release_pages ,
2021-06-10 09:01:49 +02:00
. adjust_lru = i915_ttm_adjust_lru ,
. delayed_free = i915_ttm_delayed_free ,
2021-06-29 17:12:01 +02:00
. migrate = i915_ttm_migrate ,
2021-10-18 10:10:49 +01:00
2021-06-10 09:01:52 +02:00
. mmap_offset = i915_ttm_mmap_offset ,
. mmap_ops = & vm_ops_ttm ,
2021-06-10 09:01:49 +02:00
} ;
void i915_ttm_bo_destroy ( struct ttm_buffer_object * bo )
{
struct drm_i915_gem_object * obj = i915_ttm_to_gem ( bo ) ;
i915_gem_object_release_memory_region ( obj ) ;
2021-06-10 09:01:52 +02:00
mutex_destroy ( & obj - > ttm . get_io_page . lock ) ;
2021-08-30 14:09:48 +02:00
2021-09-30 13:32:36 +02:00
if ( obj - > ttm . created ) {
i915_ttm_backup_free ( obj ) ;
/* This releases all gem object bindings to the backend. */
__i915_gem_free_object ( obj ) ;
2021-06-10 09:01:49 +02:00
call_rcu ( & obj - > rcu , __i915_gem_free_object_rcu ) ;
2021-09-30 13:32:36 +02:00
} else {
__i915_gem_object_fini ( obj ) ;
}
2021-06-10 09:01:49 +02:00
}
/**
* __i915_gem_ttm_object_init - Initialize a ttm - backed i915 gem object
* @ mem : The initial memory region for the object .
* @ obj : The gem object .
* @ size : Object size in bytes .
* @ flags : gem object flags .
*
* Return : 0 on success , negative error code on failure .
*/
int __i915_gem_ttm_object_init ( struct intel_memory_region * mem ,
struct drm_i915_gem_object * obj ,
resource_size_t size ,
2021-06-25 11:38:23 +01:00
resource_size_t page_size ,
2021-06-10 09:01:49 +02:00
unsigned int flags )
{
static struct lock_class_key lock_class ;
struct drm_i915_private * i915 = mem - > i915 ;
2021-06-24 10:42:39 +02:00
struct ttm_operation_ctx ctx = {
. interruptible = true ,
. no_wait_gpu = false ,
} ;
2021-06-10 09:01:49 +02:00
enum ttm_bo_type bo_type ;
int ret ;
drm_gem_private_object_init ( & i915 - > drm , & obj - > base , size ) ;
i915_gem_object_init ( obj , & i915_gem_ttm_obj_ops , & lock_class , flags ) ;
2021-09-30 13:32:36 +02:00
/* Don't put on a region list until we're either locked or fully initialized. */
obj - > mm . region = intel_memory_region_get ( mem ) ;
INIT_LIST_HEAD ( & obj - > mm . region_link ) ;
2021-06-10 09:01:52 +02:00
INIT_RADIX_TREE ( & obj - > ttm . get_io_page . radix , GFP_KERNEL | __GFP_NOWARN ) ;
mutex_init ( & obj - > ttm . get_io_page . lock ) ;
2021-06-10 09:01:49 +02:00
bo_type = ( obj - > flags & I915_BO_ALLOC_USER ) ? ttm_bo_type_device :
ttm_bo_type_kernel ;
2021-06-24 10:42:39 +02:00
obj - > base . vma_node . driver_private = i915_gem_to_ttm ( obj ) ;
2021-06-25 11:38:23 +01:00
/* Forcing the page size is kernel internal only */
GEM_BUG_ON ( page_size & & obj - > mm . n_placements ) ;
2021-06-10 09:01:49 +02:00
/*
* If this function fails , it will call the destructor , but
* our caller still owns the object . So no freeing in the
* destructor until obj - > ttm . created is true .
* Similarly , in delayed_destroy , we can ' t call ttm_bo_put ( )
* until successful initialization .
*/
2021-06-24 10:42:39 +02:00
ret = ttm_bo_init_reserved ( & i915 - > bdev , i915_gem_to_ttm ( obj ) , size ,
bo_type , & i915_sys_placement ,
2021-06-25 11:38:23 +01:00
page_size > > PAGE_SHIFT ,
2021-06-24 10:42:39 +02:00
& ctx , NULL , NULL , i915_ttm_bo_destroy ) ;
if ( ret )
return i915_ttm_err_to_gem ( ret ) ;
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
obj - > ttm . created = true ;
2021-09-30 13:32:36 +02:00
i915_gem_object_release_memory_region ( obj ) ;
i915_gem_object_init_memory_region ( obj , mem ) ;
2021-06-24 10:42:39 +02:00
i915_ttm_adjust_domains_after_move ( obj ) ;
i915_ttm_adjust_gem_after_move ( obj ) ;
i915_gem_object_unlock ( obj ) ;
2021-06-10 09:01:49 +02:00
2021-06-24 10:42:39 +02:00
return 0 ;
2021-06-10 09:01:49 +02:00
}
2021-06-24 10:42:40 +02:00
static const struct intel_memory_region_ops ttm_system_region_ops = {
. init_object = __i915_gem_ttm_object_init ,
} ;
struct intel_memory_region *
i915_gem_ttm_system_setup ( struct drm_i915_private * i915 ,
u16 type , u16 instance )
{
struct intel_memory_region * mr ;
mr = intel_memory_region_create ( i915 , 0 ,
totalram_pages ( ) < < PAGE_SHIFT ,
PAGE_SIZE , 0 ,
type , instance ,
& ttm_system_region_ops ) ;
if ( IS_ERR ( mr ) )
return mr ;
intel_memory_region_set_name ( mr , " system-ttm " ) ;
return mr ;
2021-06-10 09:01:49 +02:00
}
2021-09-22 08:25:19 +02:00
/**
* i915_gem_obj_copy_ttm - Copy the contents of one ttm - based gem object to
* another
* @ dst : The destination object
* @ src : The source object
* @ allow_accel : Allow using the blitter . Otherwise TTM memcpy is used .
* @ intr : Whether to perform waits interruptible :
*
* Note : The caller is responsible for assuring that the underlying
* TTM objects are populated if needed and locked .
*
* Return : Zero on success . Negative error code on error . If @ intr = = true ,
* then it may return - ERESTARTSYS or - EINTR .
*/
int i915_gem_obj_copy_ttm ( struct drm_i915_gem_object * dst ,
struct drm_i915_gem_object * src ,
bool allow_accel , bool intr )
{
struct ttm_buffer_object * dst_bo = i915_gem_to_ttm ( dst ) ;
struct ttm_buffer_object * src_bo = i915_gem_to_ttm ( src ) ;
struct ttm_operation_ctx ctx = {
. interruptible = intr ,
} ;
struct sg_table * dst_st ;
int ret ;
assert_object_held ( dst ) ;
assert_object_held ( src ) ;
/*
* Sync for now . This will change with async moves .
*/
ret = ttm_bo_wait_ctx ( dst_bo , & ctx ) ;
if ( ! ret )
ret = ttm_bo_wait_ctx ( src_bo , & ctx ) ;
if ( ret )
return ret ;
dst_st = gpu_binds_iomem ( dst_bo - > resource ) ?
dst - > ttm . cached_io_st : i915_ttm_tt_get_st ( dst_bo - > ttm ) ;
__i915_ttm_move ( src_bo , false , dst_bo - > resource , dst_bo - > ttm ,
dst_st , allow_accel ) ;
return 0 ;
}