2006-08-07 15:30:28 +04:00
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics , Inc . , Bismarck , ND . , USA .
2016-12-22 11:36:25 +03:00
* Copyright 2016 Intel Corporation
2006-08-07 15:30:28 +04:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Generic simple memory manager implementation . Intended to be used as a base
* class implementation for more advanced memory managers .
*
* Note that the algorithm used is quite simple and there might be substantial
2016-12-22 11:36:25 +03:00
* performance gains if a smarter free list is implemented . Currently it is
* just an unordered stack of free regions . This could easily be improved if
* an RB - tree is used instead . At least if we expect heavy fragmentation .
2006-08-07 15:30:28 +04:00
*
* Aligned allocations can also see improvement .
*
* Authors :
2007-10-20 01:21:04 +04:00
* Thomas Hellström < thomas - at - tungstengraphics - dot - com >
2006-08-07 15:30:28 +04:00
*/
2011-08-31 02:16:33 +04:00
# include <linux/export.h>
2016-08-03 18:04:09 +03:00
# include <linux/interval_tree_generic.h>
2019-05-26 20:35:35 +03:00
# include <linux/seq_file.h>
# include <linux/slab.h>
# include <linux/stacktrace.h>
# include <drm/drm_mm.h>
2007-01-08 14:25:47 +03:00
2014-01-23 03:31:48 +04:00
/**
* DOC : Overview
*
* drm_mm provides a simple range allocator . The drivers are free to use the
* resource allocator from the linux core if it suits them , the upside of drm_mm
* is that it ' s in the DRM core . Which means that it ' s easier to extend for
* some of the crazier special purpose needs of gpus .
*
* The main data struct is & drm_mm , allocations are tracked in & drm_mm_node .
* Drivers are free to embed either of them into their own suitable
2016-12-29 23:48:23 +03:00
* datastructures . drm_mm itself will not do any memory allocations of its own ,
* so if drivers choose not to embed nodes they need to still allocate them
2014-01-23 03:31:48 +04:00
* themselves .
*
* The range allocator also supports reservation of preallocated blocks . This is
* useful for taking over initial mode setting configurations from the firmware ,
* where an object needs to be created which exactly matches the firmware ' s
* scanout target . As long as the range is still free it can be inserted anytime
* after the allocator is initialized , which helps with avoiding looped
2016-12-22 11:36:25 +03:00
* dependencies in the driver load sequence .
2014-01-23 03:31:48 +04:00
*
* drm_mm maintains a stack of most recently freed holes , which of all
* simplistic datastructures seems to be a fairly decent approach to clustering
* allocations and avoiding too much fragmentation . This means free space
* searches are O ( num_holes ) . Given that all the fancy features drm_mm supports
* something better would be fairly complex and since gfx thrashing is a fairly
* steep cliff not a real concern . Removing a node again is O ( 1 ) .
*
* drm_mm supports a few features : Alignment and range restrictions can be
2016-12-29 23:48:23 +03:00
* supplied . Furthermore every & drm_mm_node has a color value ( which is just an
2016-12-22 11:36:25 +03:00
* opaque unsigned long ) which in conjunction with a driver callback can be used
2014-01-23 03:31:48 +04:00
* to implement sophisticated placement restrictions . The i915 DRM driver uses
* this to implement guard pages between incompatible caching domains in the
* graphics TT .
*
2016-12-22 11:36:25 +03:00
* Two behaviors are supported for searching and allocating : bottom - up and
* top - down . The default is bottom - up . Top - down allocation can be used if the
* memory area has different restrictions , or just to reduce fragmentation .
2014-04-02 21:03:57 +04:00
*
2014-01-23 03:31:48 +04:00
* Finally iteration helpers to walk all nodes and all holes are provided as are
* some basic allocator dumpers for debugging .
2016-12-27 13:10:57 +03:00
*
* Note that this range allocator is not thread - safe , drivers need to protect
2017-11-01 17:04:36 +03:00
* modifications with their own locking . The idea behind this is that for a full
2016-12-27 13:10:57 +03:00
* memory manager additional data needs to be protected anyway , hence internal
* locking would be fully redundant .
2014-01-23 03:31:48 +04:00
*/
2016-10-31 12:08:06 +03:00
# ifdef CONFIG_DRM_DEBUG_MM
2016-11-08 14:56:01 +03:00
# include <linux/stackdepot.h>
2016-10-31 12:08:06 +03:00
# define STACKDEPTH 32
# define BUFSZ 4096
static noinline void save_stack ( struct drm_mm_node * node )
{
unsigned long entries [ STACKDEPTH ] ;
2019-04-25 12:45:09 +03:00
unsigned int n ;
2016-10-31 12:08:06 +03:00
2019-04-25 12:45:09 +03:00
n = stack_trace_save ( entries , ARRAY_SIZE ( entries ) , 1 ) ;
2016-10-31 12:08:06 +03:00
/* May be called under spinlock, so avoid sleeping */
2019-04-25 12:45:09 +03:00
node - > stack = stack_depot_save ( entries , n , GFP_NOWAIT ) ;
2016-10-31 12:08:06 +03:00
}
static void show_leaks ( struct drm_mm * mm )
{
struct drm_mm_node * node ;
2019-04-25 12:45:09 +03:00
unsigned long * entries ;
unsigned int nr_entries ;
2016-10-31 12:08:06 +03:00
char * buf ;
buf = kmalloc ( BUFSZ , GFP_KERNEL ) ;
if ( ! buf )
return ;
2016-12-22 11:36:05 +03:00
list_for_each_entry ( node , drm_mm_nodes ( mm ) , node_list ) {
2016-10-31 12:08:06 +03:00
if ( ! node - > stack ) {
DRM_ERROR ( " node [%08llx + %08llx]: unknown owner \n " ,
node - > start , node - > size ) ;
continue ;
}
2019-04-25 12:45:09 +03:00
nr_entries = stack_depot_fetch ( node - > stack , & entries ) ;
stack_trace_snprint ( buf , BUFSZ , entries , nr_entries , 0 ) ;
2016-10-31 12:08:06 +03:00
DRM_ERROR ( " node [%08llx + %08llx]: inserted at \n %s " ,
node - > start , node - > size , buf ) ;
}
kfree ( buf ) ;
}
# undef STACKDEPTH
# undef BUFSZ
# else
static void save_stack ( struct drm_mm_node * node ) { }
static void show_leaks ( struct drm_mm * mm ) { }
# endif
2016-08-03 18:04:09 +03:00
# define START(node) ((node)->start)
# define LAST(node) ((node)->start + (node)->size - 1)
INTERVAL_TREE_DEFINE ( struct drm_mm_node , rb ,
u64 , __subtree_last ,
START , LAST , static inline , drm_mm_interval_tree )
struct drm_mm_node *
2016-12-16 10:46:42 +03:00
__drm_mm_interval_first ( const struct drm_mm * mm , u64 start , u64 last )
2016-08-03 18:04:09 +03:00
{
2017-09-09 02:15:08 +03:00
return drm_mm_interval_tree_iter_first ( ( struct rb_root_cached * ) & mm - > interval_tree ,
2017-02-04 14:19:13 +03:00
start , last ) ? : ( struct drm_mm_node * ) & mm - > head_node ;
2016-08-03 18:04:09 +03:00
}
2016-11-23 17:11:14 +03:00
EXPORT_SYMBOL ( __drm_mm_interval_first ) ;
2016-08-03 18:04:09 +03:00
static void drm_mm_interval_tree_add_node ( struct drm_mm_node * hole_node ,
struct drm_mm_node * node )
{
struct drm_mm * mm = hole_node - > mm ;
struct rb_node * * link , * rb ;
struct drm_mm_node * parent ;
2018-02-20 12:37:38 +03:00
bool leftmost ;
2016-08-03 18:04:09 +03:00
node - > __subtree_last = LAST ( node ) ;
if ( hole_node - > allocated ) {
rb = & hole_node - > rb ;
while ( rb ) {
parent = rb_entry ( rb , struct drm_mm_node , rb ) ;
if ( parent - > __subtree_last > = node - > __subtree_last )
break ;
parent - > __subtree_last = node - > __subtree_last ;
rb = rb_parent ( rb ) ;
}
rb = & hole_node - > rb ;
link = & hole_node - > rb . rb_right ;
2017-09-09 02:15:08 +03:00
leftmost = false ;
2016-08-03 18:04:09 +03:00
} else {
rb = NULL ;
2017-09-09 02:15:08 +03:00
link = & mm - > interval_tree . rb_root . rb_node ;
2018-02-20 12:37:38 +03:00
leftmost = true ;
2016-08-03 18:04:09 +03:00
}
while ( * link ) {
rb = * link ;
parent = rb_entry ( rb , struct drm_mm_node , rb ) ;
if ( parent - > __subtree_last < node - > __subtree_last )
parent - > __subtree_last = node - > __subtree_last ;
2018-02-20 12:37:38 +03:00
if ( node - > start < parent - > start ) {
2016-08-03 18:04:09 +03:00
link = & parent - > rb . rb_left ;
2018-02-20 12:37:38 +03:00
} else {
2016-08-03 18:04:09 +03:00
link = & parent - > rb . rb_right ;
2018-02-20 12:37:38 +03:00
leftmost = false ;
2017-09-09 02:15:08 +03:00
}
2016-08-03 18:04:09 +03:00
}
rb_link_node ( & node - > rb , rb , link ) ;
2017-09-09 02:15:08 +03:00
rb_insert_augmented_cached ( & node - > rb , & mm - > interval_tree , leftmost ,
& drm_mm_interval_tree_augment ) ;
2016-08-03 18:04:09 +03:00
}
2017-02-03 00:04:38 +03:00
# define RB_INSERT(root, member, expr) do { \
struct rb_node * * link = & root . rb_node , * rb = NULL ; \
u64 x = expr ( node ) ; \
while ( * link ) { \
rb = * link ; \
if ( x < expr ( rb_entry ( rb , struct drm_mm_node , member ) ) ) \
link = & rb - > rb_left ; \
else \
link = & rb - > rb_right ; \
} \
rb_link_node ( & node - > member , rb , link ) ; \
rb_insert_color ( & node - > member , & root ) ; \
} while ( 0 )
# define HOLE_SIZE(NODE) ((NODE)->hole_size)
# define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
2018-05-21 11:21:28 +03:00
static u64 rb_to_hole_size ( struct rb_node * rb )
{
return rb_entry ( rb , struct drm_mm_node , rb_hole_size ) - > hole_size ;
}
static void insert_hole_size ( struct rb_root_cached * root ,
struct drm_mm_node * node )
{
struct rb_node * * link = & root - > rb_root . rb_node , * rb = NULL ;
u64 x = node - > hole_size ;
bool first = true ;
while ( * link ) {
rb = * link ;
if ( x > rb_to_hole_size ( rb ) ) {
link = & rb - > rb_left ;
} else {
link = & rb - > rb_right ;
first = false ;
}
}
rb_link_node ( & node - > rb_hole_size , rb , link ) ;
rb_insert_color_cached ( & node - > rb_hole_size , root , first ) ;
}
2017-02-03 00:04:38 +03:00
static void add_hole ( struct drm_mm_node * node )
2006-08-07 15:30:28 +04:00
{
2017-02-03 00:04:38 +03:00
struct drm_mm * mm = node - > mm ;
2011-02-18 19:59:12 +03:00
2017-02-03 00:04:38 +03:00
node - > hole_size =
__drm_mm_hole_node_end ( node ) - __drm_mm_hole_node_start ( node ) ;
DRM_MM_BUG_ON ( ! drm_mm_hole_follows ( node ) ) ;
2011-02-18 19:59:14 +03:00
2018-05-21 11:21:28 +03:00
insert_hole_size ( & mm - > holes_size , node ) ;
2017-02-03 00:04:38 +03:00
RB_INSERT ( mm - > holes_addr , rb_hole_addr , HOLE_ADDR ) ;
2007-01-08 14:25:47 +03:00
2017-02-03 00:04:38 +03:00
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
}
2016-12-22 11:36:39 +03:00
2017-02-03 00:04:38 +03:00
static void rm_hole ( struct drm_mm_node * node )
{
DRM_MM_BUG_ON ( ! drm_mm_hole_follows ( node ) ) ;
2014-04-02 21:03:57 +04:00
2017-02-03 00:04:38 +03:00
list_del ( & node - > hole_stack ) ;
2018-05-21 11:21:28 +03:00
rb_erase_cached ( & node - > rb_hole_size , & node - > mm - > holes_size ) ;
2017-02-03 00:04:38 +03:00
rb_erase ( & node - > rb_hole_addr , & node - > mm - > holes_addr ) ;
node - > hole_size = 0 ;
2015-01-23 11:05:06 +03:00
2017-02-03 00:04:38 +03:00
DRM_MM_BUG_ON ( drm_mm_hole_follows ( node ) ) ;
}
static inline struct drm_mm_node * rb_hole_size_to_node ( struct rb_node * rb )
{
return rb_entry_safe ( rb , struct drm_mm_node , rb_hole_size ) ;
}
static inline struct drm_mm_node * rb_hole_addr_to_node ( struct rb_node * rb )
{
return rb_entry_safe ( rb , struct drm_mm_node , rb_hole_addr ) ;
}
static inline u64 rb_hole_size ( struct rb_node * rb )
{
return rb_entry ( rb , struct drm_mm_node , rb_hole_size ) - > hole_size ;
}
static struct drm_mm_node * best_hole ( struct drm_mm * mm , u64 size )
{
2018-05-21 11:21:28 +03:00
struct rb_node * rb = mm - > holes_size . rb_root . rb_node ;
struct drm_mm_node * best = NULL ;
2017-02-03 00:04:38 +03:00
2018-05-21 11:21:28 +03:00
do {
struct drm_mm_node * node =
rb_entry ( rb , struct drm_mm_node , rb_hole_size ) ;
2017-02-03 00:04:38 +03:00
2018-05-21 11:21:28 +03:00
if ( size < = node - > hole_size ) {
best = node ;
rb = rb - > rb_right ;
2017-02-03 00:04:38 +03:00
} else {
2018-05-21 11:21:28 +03:00
rb = rb - > rb_left ;
2014-04-02 21:03:57 +04:00
}
2018-05-21 11:21:28 +03:00
} while ( rb ) ;
2012-07-10 14:15:23 +04:00
2018-05-21 11:21:28 +03:00
return best ;
2017-02-03 00:04:38 +03:00
}
static struct drm_mm_node * find_hole ( struct drm_mm * mm , u64 addr )
{
2018-05-21 11:21:28 +03:00
struct rb_node * rb = mm - > holes_addr . rb_node ;
2017-02-03 00:04:38 +03:00
struct drm_mm_node * node = NULL ;
2018-05-21 11:21:28 +03:00
while ( rb ) {
2017-02-03 00:04:38 +03:00
u64 hole_start ;
2018-05-21 11:21:28 +03:00
node = rb_hole_addr_to_node ( rb ) ;
2017-02-03 00:04:38 +03:00
hole_start = __drm_mm_hole_node_start ( node ) ;
if ( addr < hole_start )
2018-05-21 11:21:28 +03:00
rb = node - > rb_hole_addr . rb_left ;
2017-02-03 00:04:38 +03:00
else if ( addr > hole_start + node - > hole_size )
2018-05-21 11:21:28 +03:00
rb = node - > rb_hole_addr . rb_right ;
2017-02-03 00:04:38 +03:00
else
break ;
2012-07-10 14:15:23 +04:00
}
2011-02-18 19:59:12 +03:00
2017-02-03 00:04:38 +03:00
return node ;
}
2006-08-07 15:30:28 +04:00
2017-02-03 00:04:38 +03:00
static struct drm_mm_node *
first_hole ( struct drm_mm * mm ,
u64 start , u64 end , u64 size ,
enum drm_mm_insert_mode mode )
{
switch ( mode ) {
default :
case DRM_MM_INSERT_BEST :
return best_hole ( mm , size ) ;
2016-08-03 18:04:09 +03:00
2017-02-03 00:04:38 +03:00
case DRM_MM_INSERT_LOW :
return find_hole ( mm , start ) ;
2011-02-18 19:59:12 +03:00
2017-02-03 00:04:38 +03:00
case DRM_MM_INSERT_HIGH :
return find_hole ( mm , end ) ;
case DRM_MM_INSERT_EVICT :
return list_first_entry_or_null ( & mm - > hole_stack ,
struct drm_mm_node ,
hole_stack ) ;
2007-01-08 14:25:47 +03:00
}
2017-02-03 00:04:38 +03:00
}
2016-10-31 12:08:06 +03:00
2017-02-03 00:04:38 +03:00
static struct drm_mm_node *
next_hole ( struct drm_mm * mm ,
struct drm_mm_node * node ,
enum drm_mm_insert_mode mode )
{
switch ( mode ) {
default :
case DRM_MM_INSERT_BEST :
2018-05-21 11:21:28 +03:00
return rb_hole_size_to_node ( rb_prev ( & node - > rb_hole_size ) ) ;
2017-02-03 00:04:38 +03:00
case DRM_MM_INSERT_LOW :
return rb_hole_addr_to_node ( rb_next ( & node - > rb_hole_addr ) ) ;
case DRM_MM_INSERT_HIGH :
return rb_hole_addr_to_node ( rb_prev ( & node - > rb_hole_addr ) ) ;
case DRM_MM_INSERT_EVICT :
node = list_next_entry ( node , hole_stack ) ;
return & node - > hole_stack = = & mm - > hole_stack ? NULL : node ;
}
2011-02-18 19:59:13 +03:00
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_reserve_node - insert an pre - initialized node
* @ mm : drm_mm allocator to insert @ node into
* @ node : drm_mm_node to insert
*
2016-12-29 23:48:23 +03:00
* This functions inserts an already set - up & drm_mm_node into the allocator ,
* meaning that start , size and color must be set by the caller . All other
* fields must be cleared to 0. This is useful to initialize the allocator with
* preallocated objects which must be set - up before the range allocator can be
* set - up , e . g . when taking over a firmware framebuffer .
2014-01-23 03:39:13 +04:00
*
* Returns :
* 0 on success , - ENOSPC if there ' s no hole where @ node is .
*/
2013-07-06 01:41:03 +04:00
int drm_mm_reserve_node ( struct drm_mm * mm , struct drm_mm_node * node )
2012-11-15 15:32:16 +04:00
{
2016-08-03 18:04:09 +03:00
u64 end = node - > start + node - > size ;
2013-07-06 01:41:02 +04:00
struct drm_mm_node * hole ;
2016-08-03 18:04:09 +03:00
u64 hole_start , hole_end ;
2016-11-23 17:11:15 +03:00
u64 adj_start , adj_end ;
2013-07-06 01:41:03 +04:00
2016-05-18 23:17:19 +03:00
end = node - > start + node - > size ;
2016-12-22 11:36:26 +03:00
if ( unlikely ( end < = node - > start ) )
return - ENOSPC ;
2016-05-18 23:17:19 +03:00
2013-07-06 01:41:03 +04:00
/* Find the relevant hole to add our node to */
2017-02-03 00:04:38 +03:00
hole = find_hole ( mm , node - > start ) ;
if ( ! hole )
2016-08-03 18:04:09 +03:00
return - ENOSPC ;
2012-11-15 15:32:16 +04:00
2016-11-23 17:11:15 +03:00
adj_start = hole_start = __drm_mm_hole_node_start ( hole ) ;
2017-02-03 00:04:38 +03:00
adj_end = hole_end = hole_start + hole - > hole_size ;
2016-11-23 17:11:15 +03:00
if ( mm - > color_adjust )
mm - > color_adjust ( hole , node - > color , & adj_start , & adj_end ) ;
if ( adj_start > node - > start | | adj_end < end )
2016-08-03 18:04:09 +03:00
return - ENOSPC ;
2012-11-15 15:32:16 +04:00
2016-08-03 18:04:09 +03:00
node - > mm = mm ;
2012-11-15 15:32:16 +04:00
2016-08-03 18:04:09 +03:00
list_add ( & node - > node_list , & hole - > node_list ) ;
drm_mm_interval_tree_add_node ( hole , node ) ;
2017-02-03 00:04:38 +03:00
node - > allocated = true ;
node - > hole_size = 0 ;
2016-08-03 18:04:09 +03:00
2017-02-03 00:04:38 +03:00
rm_hole ( hole ) ;
if ( node - > start > hole_start )
add_hole ( hole ) ;
if ( end < hole_end )
add_hole ( node ) ;
2012-11-15 15:32:16 +04:00
2016-10-31 12:08:06 +03:00
save_stack ( node ) ;
2016-08-03 18:04:09 +03:00
return 0 ;
2012-11-15 15:32:16 +04:00
}
2013-07-06 01:41:03 +04:00
EXPORT_SYMBOL ( drm_mm_reserve_node ) ;
2012-11-15 15:32:16 +04:00
2018-05-21 11:21:28 +03:00
static u64 rb_to_hole_size_or_zero ( struct rb_node * rb )
{
return rb ? rb_to_hole_size ( rb ) : 0 ;
}
2011-02-18 19:59:14 +03:00
/**
2017-02-03 00:04:38 +03:00
* drm_mm_insert_node_in_range - ranged search for space and insert @ node
2014-01-23 03:39:13 +04:00
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for this node
2017-02-03 00:04:38 +03:00
* @ range_start : start of the allowed range for this node
* @ range_end : end of the allowed range for this node
* @ mode : fine - tune the allocation search and placement
2014-01-23 03:39:13 +04:00
*
2016-12-29 23:48:23 +03:00
* The preallocated @ node must be cleared to 0.
2014-01-23 03:39:13 +04:00
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
2006-08-07 15:30:28 +04:00
*/
2017-02-03 00:04:38 +03:00
int drm_mm_insert_node_in_range ( struct drm_mm * const mm ,
struct drm_mm_node * const node ,
u64 size , u64 alignment ,
unsigned long color ,
u64 range_start , u64 range_end ,
enum drm_mm_insert_mode mode )
2006-08-07 15:30:28 +04:00
{
2017-02-03 00:04:38 +03:00
struct drm_mm_node * hole ;
u64 remainder_mask ;
2018-05-21 11:21:29 +03:00
bool once ;
2011-02-18 19:59:14 +03:00
2017-02-03 00:04:38 +03:00
DRM_MM_BUG_ON ( range_start > = range_end ) ;
2016-08-03 21:26:28 +03:00
2017-02-03 00:04:38 +03:00
if ( unlikely ( size = = 0 | | range_end - range_start < size ) )
2011-02-18 19:59:14 +03:00
return - ENOSPC ;
2018-05-21 11:21:28 +03:00
if ( rb_to_hole_size_or_zero ( rb_first_cached ( & mm - > holes_size ) ) < size )
return - ENOSPC ;
2017-02-03 00:04:38 +03:00
if ( alignment < = 1 )
alignment = 0 ;
2018-05-21 11:21:29 +03:00
once = mode & DRM_MM_INSERT_ONCE ;
mode & = ~ DRM_MM_INSERT_ONCE ;
2017-02-03 00:04:38 +03:00
remainder_mask = is_power_of_2 ( alignment ) ? alignment - 1 : 0 ;
2018-05-21 11:21:29 +03:00
for ( hole = first_hole ( mm , range_start , range_end , size , mode ) ;
hole ;
hole = once ? NULL : next_hole ( mm , hole , mode ) ) {
2017-02-03 00:04:38 +03:00
u64 hole_start = __drm_mm_hole_node_start ( hole ) ;
u64 hole_end = hole_start + hole - > hole_size ;
u64 adj_start , adj_end ;
u64 col_start , col_end ;
if ( mode = = DRM_MM_INSERT_LOW & & hole_start > = range_end )
break ;
if ( mode = = DRM_MM_INSERT_HIGH & & hole_end < = range_start )
break ;
col_start = hole_start ;
col_end = hole_end ;
if ( mm - > color_adjust )
mm - > color_adjust ( hole , color , & col_start , & col_end ) ;
adj_start = max ( col_start , range_start ) ;
adj_end = min ( col_end , range_end ) ;
if ( adj_end < = adj_start | | adj_end - adj_start < size )
continue ;
if ( mode = = DRM_MM_INSERT_HIGH )
adj_start = adj_end - size ;
if ( alignment ) {
u64 rem ;
if ( likely ( remainder_mask ) )
rem = adj_start & remainder_mask ;
else
div64_u64_rem ( adj_start , alignment , & rem ) ;
if ( rem ) {
adj_start - = rem ;
if ( mode ! = DRM_MM_INSERT_HIGH )
adj_start + = alignment ;
if ( adj_start < max ( col_start , range_start ) | |
min ( col_end , range_end ) - adj_start < size )
continue ;
if ( adj_end < = adj_start | |
adj_end - adj_start < size )
continue ;
}
}
node - > mm = mm ;
node - > size = size ;
node - > start = adj_start ;
node - > color = color ;
node - > hole_size = 0 ;
list_add ( & node - > node_list , & hole - > node_list ) ;
drm_mm_interval_tree_add_node ( hole , node ) ;
node - > allocated = true ;
rm_hole ( hole ) ;
if ( adj_start > hole_start )
add_hole ( hole ) ;
if ( adj_start + size < hole_end )
add_hole ( node ) ;
save_stack ( node ) ;
return 0 ;
}
return - ENOSPC ;
2011-02-18 19:59:14 +03:00
}
2017-02-03 00:04:38 +03:00
EXPORT_SYMBOL ( drm_mm_insert_node_in_range ) ;
2012-12-08 00:37:06 +04:00
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_remove_node - Remove a memory node from the allocator .
* @ node : drm_mm_node to remove
*
* This just removes a node from its drm_mm allocator . The node does not need to
* be cleared again before it can be re - inserted into this or any other drm_mm
2016-12-22 11:36:25 +03:00
* allocator . It is a bug to call this function on a unallocated node .
2011-02-18 19:59:14 +03:00
*/
void drm_mm_remove_node ( struct drm_mm_node * node )
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = node - > mm ;
struct drm_mm_node * prev_node ;
2006-08-07 15:30:28 +04:00
2016-12-22 11:36:06 +03:00
DRM_MM_BUG_ON ( ! node - > allocated ) ;
2016-12-22 11:36:35 +03:00
DRM_MM_BUG_ON ( node - > scanned_block ) ;
2006-08-07 15:30:28 +04:00
2017-02-03 00:04:38 +03:00
prev_node = list_prev_entry ( node , node_list ) ;
2012-11-15 15:32:17 +04:00
2017-02-03 00:04:38 +03:00
if ( drm_mm_hole_follows ( node ) )
rm_hole ( node ) ;
2011-02-18 19:59:12 +03:00
2016-08-03 18:04:09 +03:00
drm_mm_interval_tree_remove ( node , & mm - > interval_tree ) ;
2011-02-18 19:59:12 +03:00
list_del ( & node - > node_list ) ;
2017-02-03 00:04:38 +03:00
node - > allocated = false ;
2010-07-02 18:02:15 +04:00
2017-02-03 00:04:38 +03:00
if ( drm_mm_hole_follows ( prev_node ) )
rm_hole ( prev_node ) ;
add_hole ( prev_node ) ;
2009-12-07 17:52:56 +03:00
}
2017-02-03 00:04:38 +03:00
EXPORT_SYMBOL ( drm_mm_remove_node ) ;
2009-12-07 17:52:56 +03:00
2011-02-18 19:59:14 +03:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_replace_node - move an allocation from @ old to @ new
* @ old : drm_mm_node to remove from the allocator
* @ new : drm_mm_node which should inherit @ old ' s allocation
*
* This is useful for when drivers embed the drm_mm_node structure and hence
* can ' t move allocations by reassigning pointers . It ' s a combination of remove
* and insert with the guarantee that the allocation start will match .
2011-02-18 19:59:14 +03:00
*/
void drm_mm_replace_node ( struct drm_mm_node * old , struct drm_mm_node * new )
{
2017-12-15 02:32:28 +03:00
struct drm_mm * mm = old - > mm ;
2016-12-22 11:36:06 +03:00
DRM_MM_BUG_ON ( ! old - > allocated ) ;
2017-02-03 00:04:38 +03:00
* new = * old ;
2011-02-18 19:59:14 +03:00
list_replace ( & old - > node_list , & new - > node_list ) ;
2017-12-15 02:32:28 +03:00
rb_replace_node_cached ( & old - > rb , & new - > rb , & mm - > interval_tree ) ;
2017-02-03 00:04:38 +03:00
if ( drm_mm_hole_follows ( old ) ) {
list_replace ( & old - > hole_stack , & new - > hole_stack ) ;
2018-05-21 11:21:28 +03:00
rb_replace_node_cached ( & old - > rb_hole_size ,
& new - > rb_hole_size ,
& mm - > holes_size ) ;
2017-02-03 00:04:38 +03:00
rb_replace_node ( & old - > rb_hole_addr ,
& new - > rb_hole_addr ,
2017-12-15 02:32:28 +03:00
& mm - > holes_addr ) ;
2017-02-03 00:04:38 +03:00
}
old - > allocated = false ;
new - > allocated = true ;
2011-02-18 19:59:14 +03:00
}
EXPORT_SYMBOL ( drm_mm_replace_node ) ;
2014-01-23 03:31:48 +04:00
/**
2016-12-29 23:48:23 +03:00
* DOC : lru scan roster
2014-01-23 03:31:48 +04:00
*
* Very often GPUs need to have continuous allocations for a given object . When
* evicting objects to make space for a new one it is therefore not most
* efficient when we simply start to select all objects from the tail of an LRU
* until there ' s a suitable hole : Especially for big objects or nodes that
* otherwise have special allocation constraints there ' s a good chance we evict
2016-12-22 11:36:25 +03:00
* lots of ( smaller ) objects unnecessarily .
2014-01-23 03:31:48 +04:00
*
* The DRM range allocator supports this use - case through the scanning
* interfaces . First a scan operation needs to be initialized with
2016-12-22 11:36:29 +03:00
* drm_mm_scan_init ( ) or drm_mm_scan_init_with_range ( ) . The driver adds
2016-12-29 23:48:23 +03:00
* objects to the roster , probably by walking an LRU list , but this can be
* freely implemented . Eviction candiates are added using
* drm_mm_scan_add_block ( ) until a suitable hole is found or there are no
2017-01-25 09:26:46 +03:00
* further evictable objects . Eviction roster metadata is tracked in & struct
* drm_mm_scan .
2014-01-23 03:31:48 +04:00
*
2016-12-22 11:36:25 +03:00
* The driver must walk through all objects again in exactly the reverse
2014-01-23 03:31:48 +04:00
* order to restore the allocator state . Note that while the allocator is used
* in the scan mode no other operation is allowed .
*
2016-12-22 11:36:36 +03:00
* Finally the driver evicts all objects selected ( drm_mm_scan_remove_block ( )
* reported true ) in the scan , and any overlapping nodes after color adjustment
2016-12-29 23:48:23 +03:00
* ( drm_mm_scan_color_evict ( ) ) . Adding and removing an object is O ( 1 ) , and
2016-12-22 11:36:36 +03:00
* since freeing a node is also O ( 1 ) the overall complexity is
* O ( scanned_objects ) . So like the free stack which needs to be walked before a
* scan operation even begins this is linear in the number of objects . It
* doesn ' t seem to hurt too badly .
2014-01-23 03:31:48 +04:00
*/
2010-09-16 17:13:11 +04:00
/**
2016-12-22 11:36:29 +03:00
* drm_mm_scan_init_with_range - initialize range - restricted lru scanning
* @ scan : scan state
2014-01-23 03:39:13 +04:00
* @ mm : drm_mm to scan
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for the allocation
* @ start : start of the allowed range for the allocation
* @ end : end of the allowed range for the allocation
2017-02-03 00:04:38 +03:00
* @ mode : fine - tune the allocation search and placement
2010-09-16 17:13:11 +04:00
*
* This simply sets up the scanning routines with the parameters for the desired
2016-12-22 11:36:33 +03:00
* hole .
2010-09-16 17:13:11 +04:00
*
2014-01-23 03:39:13 +04:00
* Warning :
* As long as the scan list is non - empty , no other operations than
2010-09-16 17:13:11 +04:00
* adding / removing nodes to / from the scan list are allowed .
*/
2016-12-22 11:36:29 +03:00
void drm_mm_scan_init_with_range ( struct drm_mm_scan * scan ,
struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2016-12-22 11:36:24 +03:00
u64 alignment ,
2012-07-10 14:15:23 +04:00
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
2016-12-22 11:36:33 +03:00
u64 end ,
2017-02-03 00:04:38 +03:00
enum drm_mm_insert_mode mode )
2010-09-16 17:13:11 +04:00
{
2016-12-22 11:36:28 +03:00
DRM_MM_BUG_ON ( start > = end ) ;
DRM_MM_BUG_ON ( ! size | | size > end - start ) ;
2016-12-22 11:36:29 +03:00
DRM_MM_BUG_ON ( mm - > scan_active ) ;
scan - > mm = mm ;
2016-12-22 11:36:34 +03:00
if ( alignment < = 1 )
alignment = 0 ;
2016-12-22 11:36:29 +03:00
scan - > color = color ;
scan - > alignment = alignment ;
2016-12-22 11:36:34 +03:00
scan - > remainder_mask = is_power_of_2 ( alignment ) ? alignment - 1 : 0 ;
2016-12-22 11:36:29 +03:00
scan - > size = size ;
2017-02-03 00:04:38 +03:00
scan - > mode = mode ;
2016-12-22 11:36:29 +03:00
DRM_MM_BUG_ON ( end < = start ) ;
scan - > range_start = start ;
scan - > range_end = end ;
2016-12-22 11:36:28 +03:00
2016-12-22 11:36:29 +03:00
scan - > hit_start = U64_MAX ;
scan - > hit_end = 0 ;
2010-09-16 17:13:11 +04:00
}
2016-12-22 11:36:29 +03:00
EXPORT_SYMBOL ( drm_mm_scan_init_with_range ) ;
2010-09-16 17:13:11 +04:00
2010-07-02 18:02:16 +04:00
/**
2014-01-23 03:39:13 +04:00
* drm_mm_scan_add_block - add a node to the scan list
2016-12-28 13:51:20 +03:00
* @ scan : the active drm_mm scanner
2014-01-23 03:39:13 +04:00
* @ node : drm_mm_node to add
*
2010-07-02 18:02:16 +04:00
* Add a node to the scan list that might be freed to make space for the desired
* hole .
*
2014-01-23 03:39:13 +04:00
* Returns :
* True if a hole has been found , false otherwise .
2010-07-02 18:02:16 +04:00
*/
2016-12-22 11:36:29 +03:00
bool drm_mm_scan_add_block ( struct drm_mm_scan * scan ,
struct drm_mm_node * node )
2010-07-02 18:02:16 +04:00
{
2016-12-22 11:36:29 +03:00
struct drm_mm * mm = scan - > mm ;
2016-12-22 11:36:30 +03:00
struct drm_mm_node * hole ;
2015-01-23 11:05:06 +03:00
u64 hole_start , hole_end ;
2016-12-22 11:36:32 +03:00
u64 col_start , col_end ;
2015-01-23 11:05:06 +03:00
u64 adj_start , adj_end ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:29 +03:00
DRM_MM_BUG_ON ( node - > mm ! = mm ) ;
DRM_MM_BUG_ON ( ! node - > allocated ) ;
2016-12-22 11:36:06 +03:00
DRM_MM_BUG_ON ( node - > scanned_block ) ;
2016-12-22 11:36:33 +03:00
node - > scanned_block = true ;
2016-12-22 11:36:29 +03:00
mm - > scan_active + + ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:35 +03:00
/* Remove this block from the node_list so that we enlarge the hole
* ( distance between the end of our previous node and the start of
* or next ) , without poisoning the link so that we can restore it
* later in drm_mm_scan_remove_block ( ) .
*/
2016-12-22 11:36:30 +03:00
hole = list_prev_entry ( node , node_list ) ;
2016-12-22 11:36:35 +03:00
DRM_MM_BUG_ON ( list_next_entry ( hole , node_list ) ! = node ) ;
__list_del_entry ( & node - > node_list ) ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:32 +03:00
hole_start = __drm_mm_hole_node_start ( hole ) ;
hole_end = __drm_mm_hole_node_end ( hole ) ;
2010-09-16 17:13:11 +04:00
2016-12-22 11:36:32 +03:00
col_start = hole_start ;
col_end = hole_end ;
2012-12-19 20:51:06 +04:00
if ( mm - > color_adjust )
2016-12-22 11:36:32 +03:00
mm - > color_adjust ( hole , scan - > color , & col_start , & col_end ) ;
adj_start = max ( col_start , scan - > range_start ) ;
adj_end = min ( col_end , scan - > range_end ) ;
2016-12-22 11:36:33 +03:00
if ( adj_end < = adj_start | | adj_end - adj_start < scan - > size )
return false ;
2017-02-03 00:04:38 +03:00
if ( scan - > mode = = DRM_MM_INSERT_HIGH )
2016-12-22 11:36:33 +03:00
adj_start = adj_end - scan - > size ;
if ( scan - > alignment ) {
u64 rem ;
2016-12-22 11:36:34 +03:00
if ( likely ( scan - > remainder_mask ) )
rem = adj_start & scan - > remainder_mask ;
else
div64_u64_rem ( adj_start , scan - > alignment , & rem ) ;
2016-12-22 11:36:33 +03:00
if ( rem ) {
adj_start - = rem ;
2017-02-03 00:04:38 +03:00
if ( scan - > mode ! = DRM_MM_INSERT_HIGH )
2016-12-22 11:36:33 +03:00
adj_start + = scan - > alignment ;
if ( adj_start < max ( col_start , scan - > range_start ) | |
min ( col_end , scan - > range_end ) - adj_start < scan - > size )
return false ;
if ( adj_end < = adj_start | |
adj_end - adj_start < scan - > size )
return false ;
}
}
2012-12-19 20:51:06 +04:00
2016-12-22 11:36:36 +03:00
scan - > hit_start = adj_start ;
scan - > hit_end = adj_start + scan - > size ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:33 +03:00
DRM_MM_BUG_ON ( scan - > hit_start > = scan - > hit_end ) ;
DRM_MM_BUG_ON ( scan - > hit_start < hole_start ) ;
DRM_MM_BUG_ON ( scan - > hit_end > hole_end ) ;
return true ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_scan_add_block ) ;
/**
2014-01-23 03:39:13 +04:00
* drm_mm_scan_remove_block - remove a node from the scan list
2016-12-28 13:51:20 +03:00
* @ scan : the active drm_mm scanner
2014-01-23 03:39:13 +04:00
* @ node : drm_mm_node to remove
2010-07-02 18:02:16 +04:00
*
2016-12-29 23:48:23 +03:00
* Nodes * * must * * be removed in exactly the reverse order from the scan list as
* they have been added ( e . g . using list_add ( ) as they are added and then
* list_for_each ( ) over that eviction list to remove ) , otherwise the internal
2016-12-22 11:36:25 +03:00
* state of the memory manager will be corrupted .
2010-07-02 18:02:16 +04:00
*
* When the scan list is empty , the selected memory nodes can be freed . An
2016-12-29 23:48:23 +03:00
* immediately following drm_mm_insert_node_in_range_generic ( ) or one of the
* simpler versions of that function with ! DRM_MM_SEARCH_BEST will then return
2019-02-02 04:23:26 +03:00
* the just freed block ( because it ' s at the top of the free_stack list ) .
2010-07-02 18:02:16 +04:00
*
2014-01-23 03:39:13 +04:00
* Returns :
* True if this block should be evicted , false otherwise . Will always
* return false when no hole has been found .
2010-07-02 18:02:16 +04:00
*/
2016-12-22 11:36:29 +03:00
bool drm_mm_scan_remove_block ( struct drm_mm_scan * scan ,
struct drm_mm_node * node )
2010-07-02 18:02:16 +04:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm_node * prev_node ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:29 +03:00
DRM_MM_BUG_ON ( node - > mm ! = scan - > mm ) ;
2016-12-22 11:36:06 +03:00
DRM_MM_BUG_ON ( ! node - > scanned_block ) ;
2016-12-22 11:36:33 +03:00
node - > scanned_block = false ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:29 +03:00
DRM_MM_BUG_ON ( ! node - > mm - > scan_active ) ;
node - > mm - > scan_active - - ;
2016-12-22 11:36:35 +03:00
/* During drm_mm_scan_add_block() we decoupled this node leaving
* its pointers intact . Now that the caller is walking back along
* the eviction list we can restore this block into its rightful
* place on the full node_list . To confirm that the caller is walking
* backwards correctly we check that prev_node - > next = = node - > next ,
* i . e . both believe the same node should be on the other side of the
* hole .
*/
2016-12-22 11:36:29 +03:00
prev_node = list_prev_entry ( node , node_list ) ;
2016-12-22 11:36:35 +03:00
DRM_MM_BUG_ON ( list_next_entry ( prev_node , node_list ) ! =
list_next_entry ( node , node_list ) ) ;
2011-02-18 19:59:12 +03:00
list_add ( & node - > node_list , & prev_node - > node_list ) ;
2010-07-02 18:02:16 +04:00
2016-12-22 11:36:33 +03:00
return ( node - > start + node - > size > scan - > hit_start & &
2016-12-22 11:36:29 +03:00
node - > start < scan - > hit_end ) ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_scan_remove_block ) ;
2016-12-22 11:36:36 +03:00
/**
* drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
* @ scan : drm_mm scan with target hole
*
* After completing an eviction scan and removing the selected nodes , we may
* need to remove a few more nodes from either side of the target hole if
* mm . color_adjust is being used .
*
* Returns :
* A node to evict , or NULL if there are no overlapping nodes .
*/
struct drm_mm_node * drm_mm_scan_color_evict ( struct drm_mm_scan * scan )
{
struct drm_mm * mm = scan - > mm ;
struct drm_mm_node * hole ;
u64 hole_start , hole_end ;
DRM_MM_BUG_ON ( list_empty ( & mm - > hole_stack ) ) ;
if ( ! mm - > color_adjust )
return NULL ;
2018-02-19 14:35:43 +03:00
/*
* The hole found during scanning should ideally be the first element
* in the hole_stack list , but due to side - effects in the driver it
* may not be .
*/
list_for_each_entry ( hole , & mm - > hole_stack , hole_stack ) {
hole_start = __drm_mm_hole_node_start ( hole ) ;
hole_end = hole_start + hole - > hole_size ;
if ( hole_start < = scan - > hit_start & &
hole_end > = scan - > hit_end )
break ;
}
/* We should only be called after we found the hole previously */
DRM_MM_BUG_ON ( & hole - > hole_stack = = & mm - > hole_stack ) ;
if ( unlikely ( & hole - > hole_stack = = & mm - > hole_stack ) )
return NULL ;
2016-12-22 11:36:36 +03:00
DRM_MM_BUG_ON ( hole_start > scan - > hit_start ) ;
DRM_MM_BUG_ON ( hole_end < scan - > hit_end ) ;
mm - > color_adjust ( hole , scan - > color , & hole_start , & hole_end ) ;
if ( hole_start > scan - > hit_start )
return hole ;
if ( hole_end < scan - > hit_end )
return list_next_entry ( hole , node_list ) ;
return NULL ;
}
EXPORT_SYMBOL ( drm_mm_scan_color_evict ) ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_init - initialize a drm - mm allocator
* @ mm : the drm_mm structure to initialize
* @ start : start of the range managed by @ mm
* @ size : end of the range managed by @ mm
*
* Note that @ mm must be cleared to 0 before calling this function .
*/
2016-12-16 10:46:42 +03:00
void drm_mm_init ( struct drm_mm * mm , u64 start , u64 size )
2007-01-08 14:25:47 +03:00
{
2016-12-22 11:36:28 +03:00
DRM_MM_BUG_ON ( start + size < = start ) ;
2017-02-03 00:04:38 +03:00
mm - > color_adjust = NULL ;
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & mm - > hole_stack ) ;
2017-09-09 02:15:08 +03:00
mm - > interval_tree = RB_ROOT_CACHED ;
2018-05-21 11:21:28 +03:00
mm - > holes_size = RB_ROOT_CACHED ;
2017-02-03 00:04:38 +03:00
mm - > holes_addr = RB_ROOT ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD ( & mm - > head_node . node_list ) ;
2017-02-03 00:04:38 +03:00
mm - > head_node . allocated = false ;
2011-02-18 19:59:12 +03:00
mm - > head_node . mm = mm ;
mm - > head_node . start = start + size ;
2017-02-03 00:04:38 +03:00
mm - > head_node . size = - size ;
add_hole ( & mm - > head_node ) ;
2011-02-18 19:59:12 +03:00
2017-02-03 00:04:38 +03:00
mm - > scan_active = 0 ;
2006-08-07 15:30:28 +04:00
}
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_init ) ;
2006-08-07 15:30:28 +04:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_takedown - clean up a drm_mm allocator
* @ mm : drm_mm allocator to clean up
*
* Note that it is a bug to call this function on an allocator which is not
* clean .
*/
2016-10-31 12:08:06 +03:00
void drm_mm_takedown ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2016-12-22 11:36:27 +03:00
if ( WARN ( ! drm_mm_clean ( mm ) ,
2016-10-31 12:08:06 +03:00
" Memory manager not clean during takedown. \n " ) )
show_leaks ( mm ) ;
2006-08-07 15:30:28 +04:00
}
2008-11-08 01:05:41 +03:00
EXPORT_SYMBOL ( drm_mm_takedown ) ;
2009-08-26 07:13:37 +04:00
2016-12-29 14:09:24 +03:00
static u64 drm_mm_dump_hole ( struct drm_printer * p , const struct drm_mm_node * entry )
2009-12-09 23:55:09 +03:00
{
2017-02-03 00:04:38 +03:00
u64 start , size ;
size = entry - > hole_size ;
if ( size ) {
start = drm_mm_hole_node_start ( entry ) ;
drm_printf ( p , " %#018llx-%#018llx: %llu: free \n " ,
start , start + size , size ) ;
2013-04-20 14:08:11 +04:00
}
2017-02-03 00:04:38 +03:00
return size ;
2013-04-20 14:08:11 +04:00
}
2014-01-23 03:39:13 +04:00
/**
2016-12-29 14:09:24 +03:00
* drm_mm_print - print allocator state
* @ mm : drm_mm allocator to print
* @ p : DRM printer to use
2014-01-23 03:39:13 +04:00
*/
2016-12-29 14:09:24 +03:00
void drm_mm_print ( const struct drm_mm * mm , struct drm_printer * p )
2013-04-20 14:08:11 +04:00
{
2016-12-16 10:46:42 +03:00
const struct drm_mm_node * entry ;
2015-01-23 11:05:06 +03:00
u64 total_used = 0 , total_free = 0 , total = 0 ;
2013-04-20 14:08:11 +04:00
2016-12-29 14:09:24 +03:00
total_free + = drm_mm_dump_hole ( p , & mm - > head_node ) ;
2011-02-18 19:59:12 +03:00
drm_mm_for_each_node ( entry , mm ) {
2016-12-29 14:09:24 +03:00
drm_printf ( p , " %#018llx-%#018llx: %llu: used \n " , entry - > start ,
2015-01-23 11:05:06 +03:00
entry - > start + entry - > size , entry - > size ) ;
2011-02-18 19:59:12 +03:00
total_used + = entry - > size ;
2016-12-29 14:09:24 +03:00
total_free + = drm_mm_dump_hole ( p , entry ) ;
2009-08-26 07:13:37 +04:00
}
2011-02-18 19:59:12 +03:00
total = total_free + total_used ;
2016-12-29 14:09:24 +03:00
drm_printf ( p , " total: %llu, used %llu free %llu \n " , total ,
2015-01-23 11:05:06 +03:00
total_used , total_free ) ;
2009-08-26 07:13:37 +04:00
}
2016-12-29 14:09:24 +03:00
EXPORT_SYMBOL ( drm_mm_print ) ;