2019-10-08 17:01:14 +01:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2019 Intel Corporation
*/
# include "intel_memory_region.h"
# include "i915_drv.h"
2019-10-18 10:07:50 +01:00
/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */
# define REGION_MAP(type, inst) \
BIT ( ( type ) + INTEL_MEMORY_TYPE_SHIFT ) | BIT ( inst )
2021-01-12 19:04:29 +02:00
static const u32 intel_region_map [ ] = {
2019-10-18 10:07:50 +01:00
[ INTEL_REGION_SMEM ] = REGION_MAP ( INTEL_MEMORY_SYSTEM , 0 ) ,
[ INTEL_REGION_LMEM ] = REGION_MAP ( INTEL_MEMORY_LOCAL , 0 ) ,
[ INTEL_REGION_STOLEN ] = REGION_MAP ( INTEL_MEMORY_STOLEN , 0 ) ,
} ;
2020-01-04 19:10:42 +00:00
struct intel_memory_region *
intel_memory_region_by_type ( struct drm_i915_private * i915 ,
enum intel_memory_type mem_type )
{
struct intel_memory_region * mr ;
int id ;
for_each_memory_region ( mr , i915 , id )
if ( mr - > type = = mem_type )
return mr ;
return NULL ;
}
2019-10-08 17:01:14 +01:00
static u64
intel_memory_region_free_pages ( struct intel_memory_region * mem ,
struct list_head * blocks )
{
struct i915_buddy_block * block , * on ;
u64 size = 0 ;
list_for_each_entry_safe ( block , on , blocks , link ) {
size + = i915_buddy_block_size ( & mem - > mm , block ) ;
i915_buddy_free ( & mem - > mm , block ) ;
}
INIT_LIST_HEAD ( blocks ) ;
return size ;
}
void
__intel_memory_region_put_pages_buddy ( struct intel_memory_region * mem ,
struct list_head * blocks )
{
mutex_lock ( & mem - > mm_lock ) ;
2019-12-27 19:07:48 +05:30
mem - > avail + = intel_memory_region_free_pages ( mem , blocks ) ;
2019-10-08 17:01:14 +01:00
mutex_unlock ( & mem - > mm_lock ) ;
}
void
__intel_memory_region_put_block_buddy ( struct i915_buddy_block * block )
{
struct list_head blocks ;
INIT_LIST_HEAD ( & blocks ) ;
list_add ( & block - > link , & blocks ) ;
__intel_memory_region_put_pages_buddy ( block - > private , & blocks ) ;
}
int
__intel_memory_region_get_pages_buddy ( struct intel_memory_region * mem ,
resource_size_t size ,
unsigned int flags ,
struct list_head * blocks )
{
unsigned int min_order = 0 ;
2019-10-08 17:01:15 +01:00
unsigned long n_pages ;
2019-10-08 17:01:14 +01:00
GEM_BUG_ON ( ! IS_ALIGNED ( size , mem - > mm . chunk_size ) ) ;
GEM_BUG_ON ( ! list_empty ( blocks ) ) ;
if ( flags & I915_ALLOC_MIN_PAGE_SIZE ) {
min_order = ilog2 ( mem - > min_page_size ) -
ilog2 ( mem - > mm . chunk_size ) ;
}
2019-10-08 17:01:15 +01:00
if ( flags & I915_ALLOC_CONTIGUOUS ) {
size = roundup_pow_of_two ( size ) ;
min_order = ilog2 ( size ) - ilog2 ( mem - > mm . chunk_size ) ;
}
2020-10-21 11:36:06 +01:00
if ( size > mem - > mm . size )
2019-12-16 12:26:03 +00:00
return - E2BIG ;
2019-10-08 17:01:15 +01:00
n_pages = size > > ilog2 ( mem - > mm . chunk_size ) ;
2019-10-08 17:01:14 +01:00
mutex_lock ( & mem - > mm_lock ) ;
do {
struct i915_buddy_block * block ;
unsigned int order ;
2020-12-02 17:34:44 +00:00
order = fls ( n_pages ) - 1 ;
2019-10-08 17:01:14 +01:00
GEM_BUG_ON ( order > mem - > mm . max_order ) ;
GEM_BUG_ON ( order < min_order ) ;
do {
block = i915_buddy_alloc ( & mem - > mm , order ) ;
if ( ! IS_ERR ( block ) )
break ;
if ( order - - = = min_order )
goto err_free_blocks ;
} while ( 1 ) ;
n_pages - = BIT ( order ) ;
block - > private = mem ;
2020-11-09 11:12:49 +00:00
list_add_tail ( & block - > link , blocks ) ;
2019-10-08 17:01:14 +01:00
if ( ! n_pages )
break ;
} while ( 1 ) ;
2019-12-27 19:07:48 +05:30
mem - > avail - = size ;
2019-10-08 17:01:14 +01:00
mutex_unlock ( & mem - > mm_lock ) ;
return 0 ;
err_free_blocks :
intel_memory_region_free_pages ( mem , blocks ) ;
mutex_unlock ( & mem - > mm_lock ) ;
return - ENXIO ;
}
struct i915_buddy_block *
__intel_memory_region_get_block_buddy ( struct intel_memory_region * mem ,
resource_size_t size ,
unsigned int flags )
{
struct i915_buddy_block * block ;
LIST_HEAD ( blocks ) ;
int ret ;
ret = __intel_memory_region_get_pages_buddy ( mem , size , flags , & blocks ) ;
if ( ret )
return ERR_PTR ( ret ) ;
block = list_first_entry ( & blocks , typeof ( * block ) , link ) ;
list_del_init ( & block - > link ) ;
return block ;
}
int intel_memory_region_init_buddy ( struct intel_memory_region * mem )
{
return i915_buddy_init ( & mem - > mm , resource_size ( & mem - > region ) ,
PAGE_SIZE ) ;
}
void intel_memory_region_release_buddy ( struct intel_memory_region * mem )
{
i915_buddy_fini ( & mem - > mm ) ;
}
struct intel_memory_region *
intel_memory_region_create ( struct drm_i915_private * i915 ,
resource_size_t start ,
resource_size_t size ,
resource_size_t min_page_size ,
resource_size_t io_start ,
const struct intel_memory_region_ops * ops )
{
struct intel_memory_region * mem ;
int err ;
mem = kzalloc ( sizeof ( * mem ) , GFP_KERNEL ) ;
if ( ! mem )
return ERR_PTR ( - ENOMEM ) ;
mem - > i915 = i915 ;
mem - > region = ( struct resource ) DEFINE_RES_MEM ( start , size ) ;
mem - > io_start = io_start ;
mem - > min_page_size = min_page_size ;
mem - > ops = ops ;
2019-12-27 19:07:48 +05:30
mem - > total = size ;
mem - > avail = mem - > total ;
2019-10-08 17:01:14 +01:00
2019-10-08 17:01:16 +01:00
mutex_init ( & mem - > objects . lock ) ;
INIT_LIST_HEAD ( & mem - > objects . list ) ;
INIT_LIST_HEAD ( & mem - > objects . purgeable ) ;
2019-10-08 17:01:14 +01:00
mutex_init ( & mem - > mm_lock ) ;
if ( ops - > init ) {
err = ops - > init ( mem ) ;
if ( err )
goto err_free ;
}
kref_init ( & mem - > kref ) ;
return mem ;
err_free :
kfree ( mem ) ;
return ERR_PTR ( err ) ;
}
2019-12-27 19:07:48 +05:30
void intel_memory_region_set_name ( struct intel_memory_region * mem ,
const char * fmt , . . . )
{
va_list ap ;
va_start ( ap , fmt ) ;
vsnprintf ( mem - > name , sizeof ( mem - > name ) , fmt , ap ) ;
va_end ( ap ) ;
}
2019-10-08 17:01:14 +01:00
static void __intel_memory_region_destroy ( struct kref * kref )
{
struct intel_memory_region * mem =
container_of ( kref , typeof ( * mem ) , kref ) ;
if ( mem - > ops - > release )
mem - > ops - > release ( mem ) ;
mutex_destroy ( & mem - > mm_lock ) ;
2019-10-08 17:01:16 +01:00
mutex_destroy ( & mem - > objects . lock ) ;
2019-10-08 17:01:14 +01:00
kfree ( mem ) ;
}
struct intel_memory_region *
intel_memory_region_get ( struct intel_memory_region * mem )
{
kref_get ( & mem - > kref ) ;
return mem ;
}
void intel_memory_region_put ( struct intel_memory_region * mem )
{
kref_put ( & mem - > kref , __intel_memory_region_destroy ) ;
}
2019-10-26 21:20:32 +01:00
/* Global memory region registration -- only slight layer inversions! */
int intel_memory_regions_hw_probe ( struct drm_i915_private * i915 )
{
int err , i ;
for ( i = 0 ; i < ARRAY_SIZE ( i915 - > mm . regions ) ; i + + ) {
struct intel_memory_region * mem = ERR_PTR ( - ENODEV ) ;
u32 type ;
if ( ! HAS_REGION ( i915 , BIT ( i ) ) )
continue ;
type = MEMORY_TYPE_FROM_REGION ( intel_region_map [ i ] ) ;
switch ( type ) {
case INTEL_MEMORY_SYSTEM :
mem = i915_gem_shmem_setup ( i915 ) ;
break ;
case INTEL_MEMORY_STOLEN :
mem = i915_gem_stolen_setup ( i915 ) ;
break ;
2019-10-30 17:33:20 +00:00
case INTEL_MEMORY_LOCAL :
mem = intel_setup_fake_lmem ( i915 ) ;
break ;
2019-10-26 21:20:32 +01:00
}
if ( IS_ERR ( mem ) ) {
err = PTR_ERR ( mem ) ;
2020-01-09 12:06:46 +03:00
drm_err ( & i915 - > drm ,
" Failed to setup region(%d) type=%d \n " ,
err , type ) ;
2019-10-26 21:20:32 +01:00
goto out_cleanup ;
}
mem - > id = intel_region_map [ i ] ;
mem - > type = type ;
mem - > instance = MEMORY_INSTANCE_FROM_REGION ( intel_region_map [ i ] ) ;
i915 - > mm . regions [ i ] = mem ;
}
return 0 ;
out_cleanup :
intel_memory_regions_driver_release ( i915 ) ;
return err ;
}
void intel_memory_regions_driver_release ( struct drm_i915_private * i915 )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( i915 - > mm . regions ) ; i + + ) {
struct intel_memory_region * region =
fetch_and_zero ( & i915 - > mm . regions [ i ] ) ;
if ( region )
intel_memory_region_put ( region ) ;
}
}
2019-10-08 17:01:14 +01:00
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
# include "selftests/intel_memory_region.c"
# include "selftests/mock_region.c"
# endif