2021-06-16 16:24:55 +01:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
# include <linux/slab.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2022-01-18 16:14:59 +05:30
# include <drm/drm_buddy.h>
2021-06-16 16:24:55 +01:00
# include "i915_ttm_buddy_manager.h"
# include "i915_gem.h"
struct i915_ttm_buddy_manager {
struct ttm_resource_manager manager ;
2022-01-18 16:14:59 +05:30
struct drm_buddy mm ;
2021-06-16 16:24:55 +01:00
struct list_head reserved ;
struct mutex lock ;
2022-02-25 14:54:59 +00:00
unsigned long visible_size ;
unsigned long visible_avail ;
unsigned long visible_reserved ;
2021-06-25 11:38:23 +01:00
u64 default_page_size ;
2021-06-16 16:24:55 +01:00
} ;
static struct i915_ttm_buddy_manager *
to_buddy_manager ( struct ttm_resource_manager * man )
{
return container_of ( man , struct i915_ttm_buddy_manager , manager ) ;
}
static int i915_ttm_buddy_man_alloc ( struct ttm_resource_manager * man ,
struct ttm_buffer_object * bo ,
const struct ttm_place * place ,
struct ttm_resource * * res )
{
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
struct i915_ttm_buddy_resource * bman_res ;
2022-01-18 16:14:59 +05:30
struct drm_buddy * mm = & bman - > mm ;
2022-02-21 22:15:48 +05:30
unsigned long n_pages , lpfn ;
2021-06-16 16:24:55 +01:00
u64 min_page_size ;
u64 size ;
int err ;
2022-02-21 22:15:48 +05:30
lpfn = place - > lpfn ;
if ( ! lpfn )
lpfn = man - > size ;
2021-06-16 16:24:55 +01:00
bman_res = kzalloc ( sizeof ( * bman_res ) , GFP_KERNEL ) ;
if ( ! bman_res )
return - ENOMEM ;
ttm_resource_init ( bo , place , & bman_res - > base ) ;
INIT_LIST_HEAD ( & bman_res - > blocks ) ;
bman_res - > mm = mm ;
2022-02-21 22:15:49 +05:30
if ( place - > flags & TTM_PL_FLAG_TOPDOWN )
bman_res - > flags | = DRM_BUDDY_TOPDOWN_ALLOCATION ;
2022-02-21 22:15:48 +05:30
if ( place - > fpfn | | lpfn ! = man - > size )
bman_res - > flags | = DRM_BUDDY_RANGE_ALLOCATION ;
2021-06-16 16:24:55 +01:00
GEM_BUG_ON ( ! bman_res - > base . num_pages ) ;
size = bman_res - > base . num_pages < < PAGE_SHIFT ;
2021-06-25 11:38:23 +01:00
min_page_size = bman - > default_page_size ;
if ( bo - > page_alignment )
min_page_size = bo - > page_alignment < < PAGE_SHIFT ;
2021-06-16 16:24:55 +01:00
GEM_BUG_ON ( min_page_size < mm - > chunk_size ) ;
2022-02-21 22:15:48 +05:30
2021-06-16 16:24:55 +01:00
if ( place - > flags & TTM_PL_FLAG_CONTIGUOUS ) {
2022-02-21 22:15:48 +05:30
unsigned long pages ;
2021-06-16 16:24:55 +01:00
size = roundup_pow_of_two ( size ) ;
2022-02-21 22:15:48 +05:30
min_page_size = size ;
pages = size > > ilog2 ( mm - > chunk_size ) ;
if ( pages > lpfn )
lpfn = pages ;
2021-06-16 16:24:55 +01:00
}
2022-02-25 14:55:01 +00:00
if ( size > lpfn < < PAGE_SHIFT ) {
2021-06-16 16:24:55 +01:00
err = - E2BIG ;
goto err_free_res ;
}
n_pages = size > > ilog2 ( mm - > chunk_size ) ;
2022-02-21 22:15:48 +05:30
mutex_lock ( & bman - > lock ) ;
2022-02-25 14:54:59 +00:00
if ( lpfn < = bman - > visible_size & & n_pages > bman - > visible_avail ) {
mutex_unlock ( & bman - > lock ) ;
err = - ENOSPC ;
goto err_free_res ;
}
2022-02-21 22:15:48 +05:30
err = drm_buddy_alloc_blocks ( mm , ( u64 ) place - > fpfn < < PAGE_SHIFT ,
( u64 ) lpfn < < PAGE_SHIFT ,
( u64 ) n_pages < < PAGE_SHIFT ,
min_page_size ,
& bman_res - > blocks ,
bman_res - > flags ) ;
mutex_unlock ( & bman - > lock ) ;
if ( unlikely ( err ) )
goto err_free_blocks ;
2021-06-16 16:24:55 +01:00
2022-02-21 22:15:50 +05:30
if ( place - > flags & TTM_PL_FLAG_CONTIGUOUS ) {
u64 original_size = ( u64 ) bman_res - > base . num_pages < < PAGE_SHIFT ;
mutex_lock ( & bman - > lock ) ;
drm_buddy_block_trim ( mm ,
original_size ,
& bman_res - > blocks ) ;
mutex_unlock ( & bman - > lock ) ;
}
2022-02-25 14:54:59 +00:00
if ( lpfn < = bman - > visible_size ) {
bman_res - > used_visible_size = bman_res - > base . num_pages ;
} else {
struct drm_buddy_block * block ;
list_for_each_entry ( block , & bman_res - > blocks , link ) {
unsigned long start =
drm_buddy_block_offset ( block ) > > PAGE_SHIFT ;
if ( start < bman - > visible_size ) {
unsigned long end = start +
( drm_buddy_block_size ( mm , block ) > > PAGE_SHIFT ) ;
bman_res - > used_visible_size + =
min ( end , bman - > visible_size ) - start ;
}
}
}
if ( bman_res - > used_visible_size ) {
mutex_lock ( & bman - > lock ) ;
bman - > visible_avail - = bman_res - > used_visible_size ;
mutex_unlock ( & bman - > lock ) ;
}
2022-02-25 14:55:00 +00:00
if ( place - > lpfn - place - > fpfn = = n_pages )
bman_res - > base . start = place - > fpfn ;
else if ( lpfn < = bman - > visible_size )
bman_res - > base . start = 0 ;
else
bman_res - > base . start = bman - > visible_size ;
2021-06-16 16:24:55 +01:00
* res = & bman_res - > base ;
return 0 ;
err_free_blocks :
mutex_lock ( & bman - > lock ) ;
2022-01-18 16:14:59 +05:30
drm_buddy_free_list ( mm , & bman_res - > blocks ) ;
2021-06-16 16:24:55 +01:00
mutex_unlock ( & bman - > lock ) ;
err_free_res :
2021-07-09 15:18:39 +02:00
ttm_resource_fini ( man , & bman_res - > base ) ;
2021-06-16 16:24:55 +01:00
kfree ( bman_res ) ;
return err ;
}
static void i915_ttm_buddy_man_free ( struct ttm_resource_manager * man ,
struct ttm_resource * res )
{
struct i915_ttm_buddy_resource * bman_res = to_ttm_buddy_resource ( res ) ;
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
mutex_lock ( & bman - > lock ) ;
2022-01-18 16:14:59 +05:30
drm_buddy_free_list ( & bman - > mm , & bman_res - > blocks ) ;
2022-02-25 14:54:59 +00:00
bman - > visible_avail + = bman_res - > used_visible_size ;
2021-06-16 16:24:55 +01:00
mutex_unlock ( & bman - > lock ) ;
2021-07-09 15:18:39 +02:00
ttm_resource_fini ( man , res ) ;
2021-06-16 16:24:55 +01:00
kfree ( bman_res ) ;
}
2021-08-19 10:34:18 +01:00
static void i915_ttm_buddy_man_debug ( struct ttm_resource_manager * man ,
struct drm_printer * printer )
{
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
2022-01-18 16:14:59 +05:30
struct drm_buddy_block * block ;
2021-08-19 10:34:18 +01:00
mutex_lock ( & bman - > lock ) ;
drm_printf ( printer , " default_page_size: %lluKiB \n " ,
bman - > default_page_size > > 10 ) ;
2022-02-25 14:54:59 +00:00
drm_printf ( printer , " visible_avail: %lluMiB \n " ,
( u64 ) bman - > visible_avail < < PAGE_SHIFT > > 20 ) ;
drm_printf ( printer , " visible_size: %lluMiB \n " ,
( u64 ) bman - > visible_size < < PAGE_SHIFT > > 20 ) ;
drm_printf ( printer , " visible_reserved: %lluMiB \n " ,
( u64 ) bman - > visible_reserved < < PAGE_SHIFT > > 20 ) ;
2021-08-19 10:34:18 +01:00
2022-01-18 16:14:59 +05:30
drm_buddy_print ( & bman - > mm , printer ) ;
2021-08-19 10:34:18 +01:00
drm_printf ( printer , " reserved: \n " ) ;
list_for_each_entry ( block , & bman - > reserved , link )
2022-01-18 16:14:59 +05:30
drm_buddy_block_print ( & bman - > mm , block , printer ) ;
2021-08-19 10:34:18 +01:00
mutex_unlock ( & bman - > lock ) ;
}
2021-06-16 16:24:55 +01:00
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
. alloc = i915_ttm_buddy_man_alloc ,
. free = i915_ttm_buddy_man_free ,
2021-08-19 10:34:18 +01:00
. debug = i915_ttm_buddy_man_debug ,
2021-06-16 16:24:55 +01:00
} ;
/**
* i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
* @ bdev : The ttm device
* @ type : Memory type we want to manage
* @ use_tt : Set use_tt for the manager
* @ size : The size in bytes to manage
2022-02-25 14:54:59 +00:00
* @ visible_size : The CPU visible size in bytes to manage
2021-06-25 11:38:23 +01:00
* @ default_page_size : The default minimum page size in bytes for allocations ,
* this must be at least as large as @ chunk_size , and can be overridden by
* setting the BO page_alignment , to be larger or smaller as needed .
2021-06-16 16:24:55 +01:00
* @ chunk_size : The minimum page size in bytes for our allocations i . e
* order - zero
*
* Note that the starting address is assumed to be zero here , since this
* simplifies keeping the property where allocated blocks having natural
* power - of - two alignment . So long as the real starting address is some large
* power - of - two , or naturally start from zero , then this should be fine . Also
* the & i915_ttm_buddy_man_reserve interface can be used to preserve alignment
* if say there is some unusable range from the start of the region . We can
* revisit this in the future and make the interface accept an actual starting
* offset and let it take care of the rest .
*
* Note that if the @ size is not aligned to the @ chunk_size then we perform the
* required rounding to get the usable size . The final size in pages can be
* taken from & ttm_resource_manager . size .
*
* Return : 0 on success , negative error code on failure .
*/
int i915_ttm_buddy_man_init ( struct ttm_device * bdev ,
unsigned int type , bool use_tt ,
2022-02-25 14:54:59 +00:00
u64 size , u64 visible_size , u64 default_page_size ,
2021-06-25 11:38:23 +01:00
u64 chunk_size )
2021-06-16 16:24:55 +01:00
{
struct ttm_resource_manager * man ;
struct i915_ttm_buddy_manager * bman ;
int err ;
bman = kzalloc ( sizeof ( * bman ) , GFP_KERNEL ) ;
if ( ! bman )
return - ENOMEM ;
2022-01-18 16:14:59 +05:30
err = drm_buddy_init ( & bman - > mm , size , chunk_size ) ;
2021-06-16 16:24:55 +01:00
if ( err )
goto err_free_bman ;
mutex_init ( & bman - > lock ) ;
INIT_LIST_HEAD ( & bman - > reserved ) ;
2021-06-25 11:38:23 +01:00
GEM_BUG_ON ( default_page_size < chunk_size ) ;
bman - > default_page_size = default_page_size ;
2022-02-25 14:54:59 +00:00
bman - > visible_size = visible_size > > PAGE_SHIFT ;
bman - > visible_avail = bman - > visible_size ;
2021-06-16 16:24:55 +01:00
man = & bman - > manager ;
man - > use_tt = use_tt ;
man - > func = & i915_ttm_buddy_manager_func ;
2021-08-30 09:22:06 +02:00
ttm_resource_manager_init ( man , bdev , bman - > mm . size > > PAGE_SHIFT ) ;
2021-06-16 16:24:55 +01:00
ttm_resource_manager_set_used ( man , true ) ;
ttm_set_driver_manager ( bdev , type , man ) ;
return 0 ;
err_free_bman :
kfree ( bman ) ;
return err ;
}
/**
* i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
* @ bdev : The ttm device
* @ type : Memory type we want to manage
*
* Note that if we reserved anything with & i915_ttm_buddy_man_reserve , this will
* also be freed for us here .
*
* Return : 0 on success , negative error code on failure .
*/
int i915_ttm_buddy_man_fini ( struct ttm_device * bdev , unsigned int type )
{
struct ttm_resource_manager * man = ttm_manager_type ( bdev , type ) ;
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
2022-01-18 16:14:59 +05:30
struct drm_buddy * mm = & bman - > mm ;
2021-06-16 16:24:55 +01:00
int ret ;
ttm_resource_manager_set_used ( man , false ) ;
ret = ttm_resource_manager_evict_all ( bdev , man ) ;
if ( ret )
return ret ;
ttm_set_driver_manager ( bdev , type , NULL ) ;
mutex_lock ( & bman - > lock ) ;
2022-01-18 16:14:59 +05:30
drm_buddy_free_list ( mm , & bman - > reserved ) ;
drm_buddy_fini ( mm ) ;
2022-02-25 14:54:59 +00:00
bman - > visible_avail + = bman - > visible_reserved ;
WARN_ON_ONCE ( bman - > visible_avail ! = bman - > visible_size ) ;
2021-06-16 16:24:55 +01:00
mutex_unlock ( & bman - > lock ) ;
ttm_resource_manager_cleanup ( man ) ;
kfree ( bman ) ;
return 0 ;
}
/**
* i915_ttm_buddy_man_reserve - Reserve address range
* @ man : The buddy allocator ttm manager
* @ start : The offset in bytes , where the region start is assumed to be zero
* @ size : The size in bytes
*
* Note that the starting address for the region is always assumed to be zero .
*
* Return : 0 on success , negative error code on failure .
*/
int i915_ttm_buddy_man_reserve ( struct ttm_resource_manager * man ,
u64 start , u64 size )
{
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
2022-01-18 16:14:59 +05:30
struct drm_buddy * mm = & bman - > mm ;
2022-02-25 14:54:59 +00:00
unsigned long fpfn = start > > PAGE_SHIFT ;
2022-02-21 22:15:48 +05:30
unsigned long flags = 0 ;
2021-06-16 16:24:55 +01:00
int ret ;
2022-02-21 22:15:48 +05:30
flags | = DRM_BUDDY_RANGE_ALLOCATION ;
2021-06-16 16:24:55 +01:00
mutex_lock ( & bman - > lock ) ;
2022-02-21 22:15:48 +05:30
ret = drm_buddy_alloc_blocks ( mm , start ,
start + size ,
size , mm - > chunk_size ,
& bman - > reserved ,
flags ) ;
2022-02-25 14:54:59 +00:00
if ( fpfn < bman - > visible_size ) {
unsigned long lpfn = fpfn + ( size > > PAGE_SHIFT ) ;
unsigned long visible = min ( lpfn , bman - > visible_size ) - fpfn ;
bman - > visible_reserved + = visible ;
bman - > visible_avail - = visible ;
}
2021-06-16 16:24:55 +01:00
mutex_unlock ( & bman - > lock ) ;
return ret ;
}
2022-02-25 14:54:59 +00:00
/**
* i915_ttm_buddy_man_visible_size - Return the size of the CPU visible portion
* in pages .
* @ man : The buddy allocator ttm manager
*/
u64 i915_ttm_buddy_man_visible_size ( struct ttm_resource_manager * man )
{
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
return bman - > visible_size ;
}
2022-02-28 12:36:07 +00:00
# if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
void i915_ttm_buddy_man_force_visible_size ( struct ttm_resource_manager * man ,
u64 size )
{
struct i915_ttm_buddy_manager * bman = to_buddy_manager ( man ) ;
bman - > visible_size = size ;
}
# endif