2006-08-07 15:30:28 +04:00
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics , Inc . , Bismarck , ND . , USA .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Generic simple memory manager implementation . Intended to be used as a base
* class implementation for more advanced memory managers .
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented . Currently it is just an
* unordered stack of free regions . This could easily be improved if an RB - tree
* is used instead . At least if we expect heavy fragmentation .
*
* Aligned allocations can also see improvement .
*
* Authors :
2007-10-20 01:21:04 +04:00
* Thomas Hellström < thomas - at - tungstengraphics - dot - com >
2006-08-07 15:30:28 +04:00
*/
2012-10-02 21:01:07 +04:00
# include <drm/drmP.h>
# include <drm/drm_mm.h>
2007-01-08 14:25:47 +03:00
# include <linux/slab.h>
2009-08-26 07:13:37 +04:00
# include <linux/seq_file.h>
2011-08-31 02:16:33 +04:00
# include <linux/export.h>
2007-01-08 14:25:47 +03:00
2014-01-23 03:31:48 +04:00
/**
* DOC : Overview
*
* drm_mm provides a simple range allocator . The drivers are free to use the
* resource allocator from the linux core if it suits them , the upside of drm_mm
* is that it ' s in the DRM core . Which means that it ' s easier to extend for
* some of the crazier special purpose needs of gpus .
*
* The main data struct is & drm_mm , allocations are tracked in & drm_mm_node .
* Drivers are free to embed either of them into their own suitable
* datastructures . drm_mm itself will not do any allocations of its own , so if
* drivers choose not to embed nodes they need to still allocate them
* themselves .
*
* The range allocator also supports reservation of preallocated blocks . This is
* useful for taking over initial mode setting configurations from the firmware ,
* where an object needs to be created which exactly matches the firmware ' s
* scanout target . As long as the range is still free it can be inserted anytime
* after the allocator is initialized , which helps with avoiding looped
* depencies in the driver load sequence .
*
* drm_mm maintains a stack of most recently freed holes , which of all
* simplistic datastructures seems to be a fairly decent approach to clustering
* allocations and avoiding too much fragmentation . This means free space
* searches are O ( num_holes ) . Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
* steep cliff not a real concern . Removing a node again is O ( 1 ) .
*
* drm_mm supports a few features : Alignment and range restrictions can be
* supplied . Further more every & drm_mm_node has a color value ( which is just an
* opaqua unsigned long ) which in conjunction with a driver callback can be used
* to implement sophisticated placement restrictions . The i915 DRM driver uses
* this to implement guard pages between incompatible caching domains in the
* graphics TT .
*
2014-04-02 21:03:57 +04:00
* Two behaviors are supported for searching and allocating : bottom - up and top - down .
* The default is bottom - up . Top - down allocation can be used if the memory area
* has different restrictions , or just to reduce fragmentation .
*
2014-01-23 03:31:48 +04:00
* Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging .
*/
2013-07-27 15:39:28 +04:00
static struct drm_mm_node * drm_mm_search_free_generic ( const struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2013-07-27 15:39:28 +04:00
unsigned alignment ,
unsigned long color ,
enum drm_mm_search_flags flags ) ;
static struct drm_mm_node * drm_mm_search_free_in_range_generic ( const struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2013-07-27 15:39:28 +04:00
unsigned alignment ,
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end ,
2013-07-27 15:39:28 +04:00
enum drm_mm_search_flags flags ) ;
2007-01-08 14:25:47 +03:00
2011-02-18 19:59:13 +03:00
static void drm_mm_insert_helper ( struct drm_mm_node * hole_node ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size , unsigned alignment ,
2014-04-02 21:03:57 +04:00
unsigned long color ,
enum drm_mm_allocator_flags flags )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = hole_node - > mm ;
2015-01-23 11:05:06 +03:00
u64 hole_start = drm_mm_hole_node_start ( hole_node ) ;
u64 hole_end = drm_mm_hole_node_end ( hole_node ) ;
u64 adj_start = hole_start ;
u64 adj_end = hole_end ;
2011-02-18 19:59:12 +03:00
2012-11-15 15:32:17 +04:00
BUG_ON ( node - > allocated ) ;
2011-02-18 19:59:14 +03:00
2012-07-10 14:15:23 +04:00
if ( mm - > color_adjust )
mm - > color_adjust ( hole_node , color , & adj_start , & adj_end ) ;
2007-01-08 14:25:47 +03:00
2014-04-02 21:03:57 +04:00
if ( flags & DRM_MM_CREATE_TOP )
adj_start = adj_end - size ;
2012-07-10 14:15:23 +04:00
if ( alignment ) {
2015-01-23 11:05:06 +03:00
u64 tmp = adj_start ;
unsigned rem ;
rem = do_div ( tmp , alignment ) ;
if ( rem ) {
2014-04-02 21:03:57 +04:00
if ( flags & DRM_MM_CREATE_TOP )
2015-01-23 11:05:06 +03:00
adj_start - = rem ;
2014-04-02 21:03:57 +04:00
else
2015-01-23 11:05:06 +03:00
adj_start + = alignment - rem ;
2014-04-02 21:03:57 +04:00
}
2012-07-10 14:15:23 +04:00
}
2014-04-02 21:03:57 +04:00
BUG_ON ( adj_start < hole_start ) ;
BUG_ON ( adj_end > hole_end ) ;
2012-07-10 14:15:23 +04:00
if ( adj_start = = hole_start ) {
2011-02-18 19:59:12 +03:00
hole_node - > hole_follows = 0 ;
2012-07-10 14:15:23 +04:00
list_del ( & hole_node - > hole_stack ) ;
}
2011-02-18 19:59:12 +03:00
2012-07-10 14:15:23 +04:00
node - > start = adj_start ;
2011-02-18 19:59:12 +03:00
node - > size = size ;
node - > mm = mm ;
2012-07-10 14:15:23 +04:00
node - > color = color ;
2011-02-18 19:59:14 +03:00
node - > allocated = 1 ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & node - > hole_stack ) ;
list_add ( & node - > node_list , & hole_node - > node_list ) ;
2012-07-10 14:15:23 +04:00
BUG_ON ( node - > start + node - > size > adj_end ) ;
2011-02-18 19:59:12 +03:00
2012-07-10 14:15:23 +04:00
node - > hole_follows = 0 ;
2012-11-15 15:32:17 +04:00
if ( __drm_mm_hole_node_start ( node ) < hole_end ) {
2011-02-18 19:59:12 +03:00
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
node - > hole_follows = 1 ;
2007-01-08 14:25:47 +03:00
}
2011-02-18 19:59:13 +03:00
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_reserve_node - insert an pre - initialized node
* @ mm : drm_mm allocator to insert @ node into
* @ node : drm_mm_node to insert
*
* This functions inserts an already set - up drm_mm_node into the allocator ,
* meaning that start , size and color must be set by the caller . This is useful
* to initialize the allocator with preallocated objects which must be set - up
* before the range allocator can be set - up , e . g . when taking over a firmware
* framebuffer .
*
* Returns :
* 0 on success , - ENOSPC if there ' s no hole where @ node is .
*/
2013-07-06 01:41:03 +04:00
int drm_mm_reserve_node ( struct drm_mm * mm , struct drm_mm_node * node )
2012-11-15 15:32:16 +04:00
{
2013-07-06 01:41:02 +04:00
struct drm_mm_node * hole ;
2015-01-23 11:05:06 +03:00
u64 end = node - > start + node - > size ;
u64 hole_start ;
u64 hole_end ;
2012-11-15 15:32:16 +04:00
2013-07-06 01:41:03 +04:00
BUG_ON ( node = = NULL ) ;
/* Find the relevant hole to add our node to */
2012-11-15 15:32:17 +04:00
drm_mm_for_each_hole ( hole , mm , hole_start , hole_end ) {
2013-07-06 01:41:03 +04:00
if ( hole_start > node - > start | | hole_end < end )
2012-11-15 15:32:16 +04:00
continue ;
node - > mm = mm ;
node - > allocated = 1 ;
INIT_LIST_HEAD ( & node - > hole_stack ) ;
list_add ( & node - > node_list , & hole - > node_list ) ;
2013-07-06 01:41:03 +04:00
if ( node - > start = = hole_start ) {
2012-11-15 15:32:16 +04:00
hole - > hole_follows = 0 ;
list_del_init ( & hole - > hole_stack ) ;
}
node - > hole_follows = 0 ;
if ( end ! = hole_end ) {
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
node - > hole_follows = 1 ;
}
2013-07-06 01:41:02 +04:00
return 0 ;
2012-11-15 15:32:16 +04:00
}
2013-07-06 01:41:02 +04:00
return - ENOSPC ;
2012-11-15 15:32:16 +04:00
}
2013-07-06 01:41:03 +04:00
EXPORT_SYMBOL ( drm_mm_reserve_node ) ;
2012-11-15 15:32:16 +04:00
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_insert_node_generic - search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for this node
2014-04-02 21:03:57 +04:00
* @ sflags : flags to fine - tune the allocation search
* @ aflags : flags to fine - tune the allocation behavior
2014-01-23 03:39:13 +04:00
*
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
2011-02-18 19:59:14 +03:00
*/
2012-12-08 00:37:06 +04:00
int drm_mm_insert_node_generic ( struct drm_mm * mm , struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size , unsigned alignment ,
2013-07-27 15:36:27 +04:00
unsigned long color ,
2014-04-02 21:03:57 +04:00
enum drm_mm_search_flags sflags ,
enum drm_mm_allocator_flags aflags )
2011-02-18 19:59:14 +03:00
{
struct drm_mm_node * hole_node ;
2012-12-08 00:37:06 +04:00
hole_node = drm_mm_search_free_generic ( mm , size , alignment ,
2014-04-02 21:03:57 +04:00
color , sflags ) ;
2011-02-18 19:59:14 +03:00
if ( ! hole_node )
return - ENOSPC ;
2014-04-02 21:03:57 +04:00
drm_mm_insert_helper ( hole_node , node , size , alignment , color , aflags ) ;
2011-02-18 19:59:14 +03:00
return 0 ;
}
2012-12-08 00:37:06 +04:00
EXPORT_SYMBOL ( drm_mm_insert_node_generic ) ;
2011-02-18 19:59:13 +03:00
static void drm_mm_insert_helper_range ( struct drm_mm_node * hole_node ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size , unsigned alignment ,
2012-07-10 14:15:23 +04:00
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start , u64 end ,
2014-04-02 21:03:57 +04:00
enum drm_mm_allocator_flags flags )
2009-12-07 17:52:56 +03:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = hole_node - > mm ;
2015-01-23 11:05:06 +03:00
u64 hole_start = drm_mm_hole_node_start ( hole_node ) ;
u64 hole_end = drm_mm_hole_node_end ( hole_node ) ;
u64 adj_start = hole_start ;
u64 adj_end = hole_end ;
2009-12-07 17:52:56 +03:00
2011-02-18 19:59:14 +03:00
BUG_ON ( ! hole_node - > hole_follows | | node - > allocated ) ;
2012-07-10 14:15:23 +04:00
if ( adj_start < start )
adj_start = start ;
2012-12-19 20:51:06 +04:00
if ( adj_end > end )
adj_end = end ;
if ( mm - > color_adjust )
mm - > color_adjust ( hole_node , color , & adj_start , & adj_end ) ;
2012-07-10 14:15:23 +04:00
2015-08-16 06:02:28 +03:00
if ( flags & DRM_MM_CREATE_TOP )
adj_start = adj_end - size ;
2012-07-10 14:15:23 +04:00
if ( alignment ) {
2015-01-23 11:05:06 +03:00
u64 tmp = adj_start ;
unsigned rem ;
rem = do_div ( tmp , alignment ) ;
if ( rem ) {
2014-04-02 21:03:57 +04:00
if ( flags & DRM_MM_CREATE_TOP )
2015-01-23 11:05:06 +03:00
adj_start - = rem ;
2014-04-02 21:03:57 +04:00
else
2015-01-23 11:05:06 +03:00
adj_start + = alignment - rem ;
2014-04-02 21:03:57 +04:00
}
2012-07-10 14:15:23 +04:00
}
2011-02-18 19:59:12 +03:00
2012-07-10 14:15:23 +04:00
if ( adj_start = = hole_start ) {
2011-02-18 19:59:12 +03:00
hole_node - > hole_follows = 0 ;
2012-07-10 14:15:23 +04:00
list_del ( & hole_node - > hole_stack ) ;
2009-12-07 17:52:56 +03:00
}
2012-07-10 14:15:23 +04:00
node - > start = adj_start ;
2011-02-18 19:59:12 +03:00
node - > size = size ;
node - > mm = mm ;
2012-07-10 14:15:23 +04:00
node - > color = color ;
2011-02-18 19:59:14 +03:00
node - > allocated = 1 ;
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & node - > hole_stack ) ;
list_add ( & node - > node_list , & hole_node - > node_list ) ;
2014-04-02 21:03:57 +04:00
BUG_ON ( node - > start < start ) ;
BUG_ON ( node - > start < adj_start ) ;
2012-07-10 14:15:23 +04:00
BUG_ON ( node - > start + node - > size > adj_end ) ;
2011-02-18 19:59:12 +03:00
BUG_ON ( node - > start + node - > size > end ) ;
2012-07-10 14:15:23 +04:00
node - > hole_follows = 0 ;
2012-11-15 15:32:17 +04:00
if ( __drm_mm_hole_node_start ( node ) < hole_end ) {
2011-02-18 19:59:12 +03:00
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
node - > hole_follows = 1 ;
2009-12-07 17:52:56 +03:00
}
2011-02-18 19:59:13 +03:00
}
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_insert_node_in_range_generic - ranged search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for this node
* @ start : start of the allowed range for this node
* @ end : end of the allowed range for this node
2014-04-02 21:03:57 +04:00
* @ sflags : flags to fine - tune the allocation search
* @ aflags : flags to fine - tune the allocation behavior
2014-01-23 03:39:13 +04:00
*
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
2006-08-07 15:30:28 +04:00
*/
2012-12-08 00:37:06 +04:00
int drm_mm_insert_node_in_range_generic ( struct drm_mm * mm , struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size , unsigned alignment ,
2014-04-02 21:03:57 +04:00
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start , u64 end ,
2014-04-02 21:03:57 +04:00
enum drm_mm_search_flags sflags ,
enum drm_mm_allocator_flags aflags )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:14 +03:00
struct drm_mm_node * hole_node ;
2012-12-08 00:37:06 +04:00
hole_node = drm_mm_search_free_in_range_generic ( mm ,
size , alignment , color ,
2014-04-02 21:03:57 +04:00
start , end , sflags ) ;
2011-02-18 19:59:14 +03:00
if ( ! hole_node )
return - ENOSPC ;
2012-12-08 00:37:06 +04:00
drm_mm_insert_helper_range ( hole_node , node ,
size , alignment , color ,
2014-04-02 21:03:57 +04:00
start , end , aflags ) ;
2011-02-18 19:59:14 +03:00
return 0 ;
}
2012-12-08 00:37:06 +04:00
EXPORT_SYMBOL ( drm_mm_insert_node_in_range_generic ) ;
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_remove_node - Remove a memory node from the allocator .
* @ node : drm_mm_node to remove
*
* This just removes a node from its drm_mm allocator . The node does not need to
* be cleared again before it can be re - inserted into this or any other drm_mm
* allocator . It is a bug to call this function on a un - allocated node .
2011-02-18 19:59:14 +03:00
*/
void drm_mm_remove_node ( struct drm_mm_node * node )
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = node - > mm ;
struct drm_mm_node * prev_node ;
2006-08-07 15:30:28 +04:00
2013-08-14 05:09:08 +04:00
if ( WARN_ON ( ! node - > allocated ) )
return ;
2011-02-18 19:59:12 +03:00
BUG_ON ( node - > scanned_block | | node - > scanned_prev_free
| | node - > scanned_next_free ) ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
prev_node =
list_entry ( node - > node_list . prev , struct drm_mm_node , node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
if ( node - > hole_follows ) {
2012-11-15 15:32:17 +04:00
BUG_ON ( __drm_mm_hole_node_start ( node ) = =
__drm_mm_hole_node_end ( node ) ) ;
2011-02-18 19:59:12 +03:00
list_del ( & node - > hole_stack ) ;
} else
2012-11-15 15:32:17 +04:00
BUG_ON ( __drm_mm_hole_node_start ( node ) ! =
__drm_mm_hole_node_end ( node ) ) ;
2009-04-08 19:11:16 +04:00
2011-02-18 19:59:12 +03:00
if ( ! prev_node - > hole_follows ) {
prev_node - > hole_follows = 1 ;
list_add ( & prev_node - > hole_stack , & mm - > hole_stack ) ;
} else
list_move ( & prev_node - > hole_stack , & mm - > hole_stack ) ;
list_del ( & node - > node_list ) ;
2011-02-18 19:59:14 +03:00
node - > allocated = 0 ;
}
EXPORT_SYMBOL ( drm_mm_remove_node ) ;
2015-01-23 11:05:06 +03:00
static int check_free_hole ( u64 start , u64 end , u64 size , unsigned alignment )
2010-07-02 18:02:15 +04:00
{
2010-08-26 23:44:17 +04:00
if ( end - start < size )
2010-07-02 18:02:15 +04:00
return 0 ;
if ( alignment ) {
2015-01-23 11:05:06 +03:00
u64 tmp = start ;
unsigned rem ;
rem = do_div ( tmp , alignment ) ;
2015-03-15 22:22:36 +03:00
if ( rem )
2015-01-23 11:05:06 +03:00
start + = alignment - rem ;
2010-07-02 18:02:15 +04:00
}
2012-07-10 14:15:23 +04:00
return end > = start + size ;
2010-07-02 18:02:15 +04:00
}
2013-07-27 15:39:28 +04:00
static struct drm_mm_node * drm_mm_search_free_generic ( const struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2013-07-27 15:39:28 +04:00
unsigned alignment ,
unsigned long color ,
enum drm_mm_search_flags flags )
2006-08-07 15:30:28 +04:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
struct drm_mm_node * best ;
2015-01-23 11:05:06 +03:00
u64 adj_start ;
u64 adj_end ;
u64 best_size ;
2006-08-07 15:30:28 +04:00
2010-07-02 18:02:16 +04:00
BUG_ON ( mm - > scanned_blocks ) ;
2006-08-07 15:30:28 +04:00
best = NULL ;
best_size = ~ 0UL ;
2014-04-02 21:03:57 +04:00
__drm_mm_for_each_hole ( entry , mm , adj_start , adj_end ,
flags & DRM_MM_SEARCH_BELOW ) {
2015-01-23 11:05:06 +03:00
u64 hole_size = adj_end - adj_start ;
2014-03-19 12:37:14 +04:00
2012-07-10 14:15:23 +04:00
if ( mm - > color_adjust ) {
mm - > color_adjust ( entry , color , & adj_start , & adj_end ) ;
if ( adj_end < = adj_start )
continue ;
}
if ( ! check_free_hole ( adj_start , adj_end , size , alignment ) )
2007-01-08 14:25:47 +03:00
continue ;
2013-07-27 15:36:27 +04:00
if ( ! ( flags & DRM_MM_SEARCH_BEST ) )
2010-07-02 18:02:15 +04:00
return entry ;
2007-01-08 14:25:47 +03:00
2014-03-19 12:37:14 +04:00
if ( hole_size < best_size ) {
2010-07-02 18:02:15 +04:00
best = entry ;
2014-03-19 12:37:14 +04:00
best_size = hole_size ;
2006-08-07 15:30:28 +04:00
}
}
return best ;
}
2012-07-10 14:15:23 +04:00
2013-07-27 15:39:28 +04:00
static struct drm_mm_node * drm_mm_search_free_in_range_generic ( const struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2012-07-10 14:15:23 +04:00
unsigned alignment ,
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end ,
2013-07-27 15:36:27 +04:00
enum drm_mm_search_flags flags )
2009-12-07 17:52:56 +03:00
{
struct drm_mm_node * entry ;
struct drm_mm_node * best ;
2015-01-23 11:05:06 +03:00
u64 adj_start ;
u64 adj_end ;
u64 best_size ;
2009-12-07 17:52:56 +03:00
2010-07-02 18:02:16 +04:00
BUG_ON ( mm - > scanned_blocks ) ;
2009-12-07 17:52:56 +03:00
best = NULL ;
best_size = ~ 0UL ;
2014-04-02 21:03:57 +04:00
__drm_mm_for_each_hole ( entry , mm , adj_start , adj_end ,
flags & DRM_MM_SEARCH_BELOW ) {
2015-01-23 11:05:06 +03:00
u64 hole_size = adj_end - adj_start ;
2014-03-19 12:37:14 +04:00
2012-11-15 15:32:17 +04:00
if ( adj_start < start )
adj_start = start ;
if ( adj_end > end )
adj_end = end ;
2012-07-10 14:15:23 +04:00
if ( mm - > color_adjust ) {
mm - > color_adjust ( entry , color , & adj_start , & adj_end ) ;
if ( adj_end < = adj_start )
continue ;
}
2010-08-26 23:44:17 +04:00
if ( ! check_free_hole ( adj_start , adj_end , size , alignment ) )
2009-12-07 17:52:56 +03:00
continue ;
2013-07-27 15:36:27 +04:00
if ( ! ( flags & DRM_MM_SEARCH_BEST ) )
2010-07-02 18:02:15 +04:00
return entry ;
2009-12-07 17:52:56 +03:00
2014-03-19 12:37:14 +04:00
if ( hole_size < best_size ) {
2010-07-02 18:02:15 +04:00
best = entry ;
2014-03-19 12:37:14 +04:00
best_size = hole_size ;
2009-12-07 17:52:56 +03:00
}
}
return best ;
}
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_replace_node - move an allocation from @ old to @ new
* @ old : drm_mm_node to remove from the allocator
* @ new : drm_mm_node which should inherit @ old ' s allocation
*
* This is useful for when drivers embed the drm_mm_node structure and hence
* can ' t move allocations by reassigning pointers . It ' s a combination of remove
* and insert with the guarantee that the allocation start will match .
2011-02-18 19:59:14 +03:00
*/
void drm_mm_replace_node ( struct drm_mm_node * old , struct drm_mm_node * new )
{
list_replace ( & old - > node_list , & new - > node_list ) ;
2011-05-07 01:47:53 +04:00
list_replace ( & old - > hole_stack , & new - > hole_stack ) ;
2011-02-18 19:59:14 +03:00
new - > hole_follows = old - > hole_follows ;
new - > mm = old - > mm ;
new - > start = old - > start ;
new - > size = old - > size ;
2012-07-10 14:15:23 +04:00
new - > color = old - > color ;
2011-02-18 19:59:14 +03:00
old - > allocated = 0 ;
new - > allocated = 1 ;
}
EXPORT_SYMBOL ( drm_mm_replace_node ) ;
2014-01-23 03:31:48 +04:00
/**
* DOC : lru scan roaster
*
* Very often GPUs need to have continuous allocations for a given object . When
* evicting objects to make space for a new one it is therefore not most
* efficient when we simply start to select all objects from the tail of an LRU
* until there ' s a suitable hole : Especially for big objects or nodes that
* otherwise have special allocation constraints there ' s a good chance we evict
* lots of ( smaller ) objects unecessarily .
*
* The DRM range allocator supports this use - case through the scanning
* interfaces . First a scan operation needs to be initialized with
* drm_mm_init_scan ( ) or drm_mm_init_scan_with_range ( ) . The the driver adds
* objects to the roaster ( probably by walking an LRU list , but this can be
* freely implemented ) until a suitable hole is found or there ' s no further
* evitable object .
*
* The the driver must walk through all objects again in exactly the reverse
* order to restore the allocator state . Note that while the allocator is used
* in the scan mode no other operation is allowed .
*
* Finally the driver evicts all objects selected in the scan . Adding and
* removing an object is O ( 1 ) , and since freeing a node is also O ( 1 ) the overall
* complexity is O ( scanned_objects ) . So like the free stack which needs to be
* walked before a scan operation even begins this is linear in the number of
* objects . It doesn ' t seem to hurt badly .
*/
2010-07-02 18:02:16 +04:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_init_scan - initialize lru scanning
* @ mm : drm_mm to scan
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for the allocation
2010-07-02 18:02:16 +04:00
*
* This simply sets up the scanning routines with the parameters for the desired
2014-01-23 03:39:13 +04:00
* hole . Note that there ' s no need to specify allocation flags , since they only
* change the place a node is allocated from within a suitable hole .
2010-07-02 18:02:16 +04:00
*
2014-01-23 03:39:13 +04:00
* Warning :
* As long as the scan list is non - empty , no other operations than
2010-07-02 18:02:16 +04:00
* adding / removing nodes to / from the scan list are allowed .
*/
2012-07-10 14:15:23 +04:00
void drm_mm_init_scan ( struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2012-07-10 14:15:23 +04:00
unsigned alignment ,
unsigned long color )
2010-07-02 18:02:16 +04:00
{
2012-07-10 14:15:23 +04:00
mm - > scan_color = color ;
2010-07-02 18:02:16 +04:00
mm - > scan_alignment = alignment ;
mm - > scan_size = size ;
mm - > scanned_blocks = 0 ;
mm - > scan_hit_start = 0 ;
2012-12-19 20:51:06 +04:00
mm - > scan_hit_end = 0 ;
2010-09-16 17:13:11 +04:00
mm - > scan_check_range = 0 ;
2011-02-18 19:59:15 +03:00
mm - > prev_scanned_node = NULL ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_init_scan ) ;
2010-09-16 17:13:11 +04:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_init_scan - initialize range - restricted lru scanning
* @ mm : drm_mm to scan
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for the allocation
* @ start : start of the allowed range for the allocation
* @ end : end of the allowed range for the allocation
2010-09-16 17:13:11 +04:00
*
* This simply sets up the scanning routines with the parameters for the desired
2014-01-23 03:39:13 +04:00
* hole . Note that there ' s no need to specify allocation flags , since they only
* change the place a node is allocated from within a suitable hole .
2010-09-16 17:13:11 +04:00
*
2014-01-23 03:39:13 +04:00
* Warning :
* As long as the scan list is non - empty , no other operations than
2010-09-16 17:13:11 +04:00
* adding / removing nodes to / from the scan list are allowed .
*/
2012-07-10 14:15:23 +04:00
void drm_mm_init_scan_with_range ( struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2010-09-16 17:13:11 +04:00
unsigned alignment ,
2012-07-10 14:15:23 +04:00
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end )
2010-09-16 17:13:11 +04:00
{
2012-07-10 14:15:23 +04:00
mm - > scan_color = color ;
2010-09-16 17:13:11 +04:00
mm - > scan_alignment = alignment ;
mm - > scan_size = size ;
mm - > scanned_blocks = 0 ;
mm - > scan_hit_start = 0 ;
2012-12-19 20:51:06 +04:00
mm - > scan_hit_end = 0 ;
2010-09-16 17:13:11 +04:00
mm - > scan_start = start ;
mm - > scan_end = end ;
mm - > scan_check_range = 1 ;
2011-02-18 19:59:15 +03:00
mm - > prev_scanned_node = NULL ;
2010-09-16 17:13:11 +04:00
}
EXPORT_SYMBOL ( drm_mm_init_scan_with_range ) ;
2010-07-02 18:02:16 +04:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_scan_add_block - add a node to the scan list
* @ node : drm_mm_node to add
*
2010-07-02 18:02:16 +04:00
* Add a node to the scan list that might be freed to make space for the desired
* hole .
*
2014-01-23 03:39:13 +04:00
* Returns :
* True if a hole has been found , false otherwise .
2010-07-02 18:02:16 +04:00
*/
2014-01-23 03:39:13 +04:00
bool drm_mm_scan_add_block ( struct drm_mm_node * node )
2010-07-02 18:02:16 +04:00
{
struct drm_mm * mm = node - > mm ;
2011-02-18 19:59:12 +03:00
struct drm_mm_node * prev_node ;
2015-01-23 11:05:06 +03:00
u64 hole_start , hole_end ;
u64 adj_start , adj_end ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks + + ;
2011-02-18 19:59:12 +03:00
BUG_ON ( node - > scanned_block ) ;
2010-07-02 18:02:16 +04:00
node - > scanned_block = 1 ;
2011-02-18 19:59:12 +03:00
prev_node = list_entry ( node - > node_list . prev , struct drm_mm_node ,
node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
node - > scanned_preceeds_hole = prev_node - > hole_follows ;
prev_node - > hole_follows = 1 ;
list_del ( & node - > node_list ) ;
node - > node_list . prev = & prev_node - > node_list ;
2011-02-18 19:59:15 +03:00
node - > node_list . next = & mm - > prev_scanned_node - > node_list ;
mm - > prev_scanned_node = node ;
2010-07-02 18:02:16 +04:00
2012-12-19 20:51:06 +04:00
adj_start = hole_start = drm_mm_hole_node_start ( prev_node ) ;
adj_end = hole_end = drm_mm_hole_node_end ( prev_node ) ;
2012-07-10 14:15:23 +04:00
2010-09-16 17:13:11 +04:00
if ( mm - > scan_check_range ) {
2012-07-10 14:15:23 +04:00
if ( adj_start < mm - > scan_start )
adj_start = mm - > scan_start ;
if ( adj_end > mm - > scan_end )
adj_end = mm - > scan_end ;
2010-09-16 17:13:11 +04:00
}
2012-12-19 20:51:06 +04:00
if ( mm - > color_adjust )
mm - > color_adjust ( prev_node , mm - > scan_color ,
& adj_start , & adj_end ) ;
2012-07-10 14:15:23 +04:00
if ( check_free_hole ( adj_start , adj_end ,
2010-08-26 23:44:17 +04:00
mm - > scan_size , mm - > scan_alignment ) ) {
2011-02-18 19:59:12 +03:00
mm - > scan_hit_start = hole_start ;
2012-12-19 20:51:06 +04:00
mm - > scan_hit_end = hole_end ;
2014-01-23 03:39:13 +04:00
return true ;
2010-07-02 18:02:16 +04:00
}
2014-01-23 03:39:13 +04:00
return false ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_scan_add_block ) ;
/**
2014-01-23 03:39:13 +04:00
* drm_mm_scan_remove_block - remove a node from the scan list
* @ node : drm_mm_node to remove
2010-07-02 18:02:16 +04:00
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added , otherwise the internal state of the memory manager will be
* corrupted .
*
* When the scan list is empty , the selected memory nodes can be freed . An
2013-07-27 15:36:27 +04:00
* immediately following drm_mm_search_free with ! DRM_MM_SEARCH_BEST will then
* return the just freed block ( because its at the top of the free_stack list ) .
2010-07-02 18:02:16 +04:00
*
2014-01-23 03:39:13 +04:00
* Returns :
* True if this block should be evicted , false otherwise . Will always
* return false when no hole has been found .
2010-07-02 18:02:16 +04:00
*/
2014-01-23 03:39:13 +04:00
bool drm_mm_scan_remove_block ( struct drm_mm_node * node )
2010-07-02 18:02:16 +04:00
{
struct drm_mm * mm = node - > mm ;
2011-02-18 19:59:12 +03:00
struct drm_mm_node * prev_node ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks - - ;
BUG_ON ( ! node - > scanned_block ) ;
node - > scanned_block = 0 ;
2011-02-18 19:59:12 +03:00
prev_node = list_entry ( node - > node_list . prev , struct drm_mm_node ,
node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
prev_node - > hole_follows = node - > scanned_preceeds_hole ;
list_add ( & node - > node_list , & prev_node - > node_list ) ;
2010-07-02 18:02:16 +04:00
2012-12-19 20:51:06 +04:00
return ( drm_mm_hole_node_end ( node ) > mm - > scan_hit_start & &
node - > start < mm - > scan_hit_end ) ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_scan_remove_block ) ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_clean - checks whether an allocator is clean
* @ mm : drm_mm allocator to check
*
* Returns :
* True if the allocator is completely free , false if there ' s still a node
* allocated in it .
*/
bool drm_mm_clean ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:12 +03:00
struct list_head * head = & mm - > head_node . node_list ;
2006-08-07 15:30:28 +04:00
2007-01-08 14:25:47 +03:00
return ( head - > next - > next = = head ) ;
}
2009-04-08 19:11:16 +04:00
EXPORT_SYMBOL ( drm_mm_clean ) ;
2006-08-07 15:30:28 +04:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_init - initialize a drm - mm allocator
* @ mm : the drm_mm structure to initialize
* @ start : start of the range managed by @ mm
* @ size : end of the range managed by @ mm
*
* Note that @ mm must be cleared to 0 before calling this function .
*/
2015-01-23 11:05:06 +03:00
void drm_mm_init ( struct drm_mm * mm , u64 start , u64 size )
2007-01-08 14:25:47 +03:00
{
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & mm - > hole_stack ) ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks = 0 ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD ( & mm - > head_node . node_list ) ;
INIT_LIST_HEAD ( & mm - > head_node . hole_stack ) ;
mm - > head_node . hole_follows = 1 ;
mm - > head_node . scanned_block = 0 ;
mm - > head_node . scanned_prev_free = 0 ;
mm - > head_node . scanned_next_free = 0 ;
mm - > head_node . mm = mm ;
mm - > head_node . start = start + size ;
mm - > head_node . size = start - mm - > head_node . start ;
list_add_tail ( & mm - > head_node . hole_stack , & mm - > hole_stack ) ;
2012-07-10 14:15:23 +04:00
mm - > color_adjust = NULL ;
2006-08-07 15:30:28 +04:00
}
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_init ) ;
2006-08-07 15:30:28 +04:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_takedown - clean up a drm_mm allocator
* @ mm : drm_mm allocator to clean up
*
* Note that it is a bug to call this function on an allocator which is not
* clean .
*/
2007-07-11 10:53:40 +04:00
void drm_mm_takedown ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2013-07-27 15:39:28 +04:00
WARN ( ! list_empty ( & mm - > head_node . node_list ) ,
" Memory manager not clean during takedown. \n " ) ;
2006-08-07 15:30:28 +04:00
}
2008-11-08 01:05:41 +03:00
EXPORT_SYMBOL ( drm_mm_takedown ) ;
2009-08-26 07:13:37 +04:00
2015-01-23 11:05:06 +03:00
static u64 drm_mm_debug_hole ( struct drm_mm_node * entry ,
const char * prefix )
2009-12-09 23:55:09 +03:00
{
2015-01-23 11:05:06 +03:00
u64 hole_start , hole_end , hole_size ;
2011-02-18 19:59:12 +03:00
2013-07-02 00:01:02 +04:00
if ( entry - > hole_follows ) {
hole_start = drm_mm_hole_node_start ( entry ) ;
hole_end = drm_mm_hole_node_end ( entry ) ;
hole_size = hole_end - hole_start ;
2015-01-23 11:05:06 +03:00
pr_debug ( " %s %#llx-%#llx: %llu: free \n " , prefix , hole_start ,
hole_end , hole_size ) ;
2013-07-02 00:01:02 +04:00
return hole_size ;
}
return 0 ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_debug_table - dump allocator state to dmesg
* @ mm : drm_mm allocator to dump
* @ prefix : prefix to use for dumping to dmesg
*/
2013-07-02 00:01:02 +04:00
void drm_mm_debug_table ( struct drm_mm * mm , const char * prefix )
{
struct drm_mm_node * entry ;
2015-01-23 11:05:06 +03:00
u64 total_used = 0 , total_free = 0 , total = 0 ;
2013-07-02 00:01:02 +04:00
total_free + = drm_mm_debug_hole ( & mm - > head_node , prefix ) ;
2011-02-18 19:59:12 +03:00
drm_mm_for_each_node ( entry , mm ) {
2015-01-23 11:05:06 +03:00
pr_debug ( " %s %#llx-%#llx: %llu: used \n " , prefix , entry - > start ,
entry - > start + entry - > size , entry - > size ) ;
2011-02-18 19:59:12 +03:00
total_used + = entry - > size ;
2013-07-02 00:01:02 +04:00
total_free + = drm_mm_debug_hole ( entry , prefix ) ;
2009-12-09 23:55:09 +03:00
}
2011-02-18 19:59:12 +03:00
total = total_free + total_used ;
2015-01-23 11:05:06 +03:00
pr_debug ( " %s total: %llu, used %llu free %llu \n " , prefix , total ,
total_used , total_free ) ;
2009-12-09 23:55:09 +03:00
}
EXPORT_SYMBOL ( drm_mm_debug_table ) ;
2009-08-26 07:13:37 +04:00
# if defined(CONFIG_DEBUG_FS)
2015-01-23 11:05:06 +03:00
static u64 drm_mm_dump_hole ( struct seq_file * m , struct drm_mm_node * entry )
2009-08-26 07:13:37 +04:00
{
2015-01-23 11:05:06 +03:00
u64 hole_start , hole_end , hole_size ;
2011-02-18 19:59:12 +03:00
2013-04-20 14:08:11 +04:00
if ( entry - > hole_follows ) {
hole_start = drm_mm_hole_node_start ( entry ) ;
hole_end = drm_mm_hole_node_end ( entry ) ;
hole_size = hole_end - hole_start ;
2015-05-28 12:36:27 +03:00
seq_printf ( m , " %#018llx-%#018llx: %llu: free \n " , hole_start ,
2015-01-23 11:05:06 +03:00
hole_end , hole_size ) ;
2013-04-20 14:08:11 +04:00
return hole_size ;
}
return 0 ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_dump_table - dump allocator state to a seq_file
* @ m : seq_file to dump to
* @ mm : drm_mm allocator to dump
*/
2013-04-20 14:08:11 +04:00
int drm_mm_dump_table ( struct seq_file * m , struct drm_mm * mm )
{
struct drm_mm_node * entry ;
2015-01-23 11:05:06 +03:00
u64 total_used = 0 , total_free = 0 , total = 0 ;
2013-04-20 14:08:11 +04:00
total_free + = drm_mm_dump_hole ( m , & mm - > head_node ) ;
2011-02-18 19:59:12 +03:00
drm_mm_for_each_node ( entry , mm ) {
2015-05-28 12:36:27 +03:00
seq_printf ( m , " %#018llx-%#018llx: %llu: used \n " , entry - > start ,
2015-01-23 11:05:06 +03:00
entry - > start + entry - > size , entry - > size ) ;
2011-02-18 19:59:12 +03:00
total_used + = entry - > size ;
2013-04-20 14:08:11 +04:00
total_free + = drm_mm_dump_hole ( m , entry ) ;
2009-08-26 07:13:37 +04:00
}
2011-02-18 19:59:12 +03:00
total = total_free + total_used ;
2015-01-23 11:05:06 +03:00
seq_printf ( m , " total: %llu, used %llu free %llu \n " , total ,
total_used , total_free ) ;
2009-08-26 07:13:37 +04:00
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_dump_table ) ;
# endif