2009-04-08 19:11:16 +04:00
/**************************************************************************
*
* Copyright 2006 - 2008 Tungsten Graphics , Inc . , Cedar Park , TX . USA .
2016-12-22 11:36:25 +03:00
* Copyright 2016 Intel Corporation
2009-04-08 19:11:16 +04:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors :
* Thomas Hellstrom < thomas - at - tungstengraphics - dot - com >
*/
# ifndef _DRM_MM_H_
# define _DRM_MM_H_
/*
* Generic range manager structs
*/
2013-07-25 20:02:31 +04:00
# include <linux/bug.h>
2016-08-03 18:04:09 +03:00
# include <linux/rbtree.h>
2021-11-10 13:24:23 +03:00
# include <linux/limits.h>
2017-02-04 02:16:44 +03:00
# include <linux/mm_types.h>
2009-04-08 19:11:16 +04:00
# include <linux/list.h>
2013-07-25 20:02:31 +04:00
# include <linux/spinlock.h>
2016-10-31 12:08:06 +03:00
# ifdef CONFIG_DRM_DEBUG_MM
# include <linux/stackdepot.h>
# endif
2021-11-10 13:24:23 +03:00
# include <linux/types.h>
2016-12-29 14:09:24 +03:00
# include <drm/drm_print.h>
2009-04-08 19:11:16 +04:00
2016-12-22 11:36:06 +03:00
# ifdef CONFIG_DRM_DEBUG_MM
# define DRM_MM_BUG_ON(expr) BUG_ON(expr)
# else
# define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
# endif
2017-02-03 00:04:38 +03:00
/**
* enum drm_mm_insert_mode - control search and allocation behaviour
*
* The & struct drm_mm range manager supports finding a suitable modes using
* a number of search trees . These trees are oranised by size , by address and
* in most recent eviction order . This allows the user to find either the
* smallest hole to reuse , the lowest or highest address to reuse , or simply
* reuse the most recent eviction that fits . When allocating the & drm_mm_node
* from within the hole , the & drm_mm_insert_mode also dictate whether to
* allocate the lowest matching address or the highest .
*/
enum drm_mm_insert_mode {
/**
* @ DRM_MM_INSERT_BEST :
*
* Search for the smallest hole ( within the search range ) that fits
* the desired node .
*
* Allocates the node from the bottom of the found hole .
*/
DRM_MM_INSERT_BEST = 0 ,
2013-07-27 15:36:27 +04:00
2017-02-03 00:04:38 +03:00
/**
* @ DRM_MM_INSERT_LOW :
*
* Search for the lowest hole ( address closest to 0 , within the search
* range ) that fits the desired node .
*
* Allocates the node from the bottom of the found hole .
*/
DRM_MM_INSERT_LOW ,
2014-04-02 21:03:57 +04:00
2017-02-03 00:04:38 +03:00
/**
* @ DRM_MM_INSERT_HIGH :
*
* Search for the highest hole ( address closest to U64_MAX , within the
* search range ) that fits the desired node .
*
* Allocates the node from the * top * of the found hole . The specified
* alignment for the node is applied to the base of the node
* ( & drm_mm_node . start ) .
*/
DRM_MM_INSERT_HIGH ,
/**
* @ DRM_MM_INSERT_EVICT :
*
* Search for the most recently evicted hole ( within the search range )
* that fits the desired node . This is appropriate for use immediately
* after performing an eviction scan ( see drm_mm_scan_init ( ) ) and
* removing the selected nodes to form a hole .
*
* Allocates the node from the bottom of the found hole .
*/
DRM_MM_INSERT_EVICT ,
2018-05-21 11:21:29 +03:00
/**
* @ DRM_MM_INSERT_ONCE :
*
* Only check the first hole for suitablity and report - ENOSPC
* immediately otherwise , rather than check every hole until a
* suitable one is found . Can only be used in conjunction with another
* search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW .
*/
DRM_MM_INSERT_ONCE = BIT ( 31 ) ,
/**
* @ DRM_MM_INSERT_HIGHEST :
*
* Only check the highest hole ( the hole with the largest address ) and
* insert the node at the top of the hole or report - ENOSPC if
* unsuitable .
*
* Does not search all holes .
*/
DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE ,
/**
* @ DRM_MM_INSERT_LOWEST :
*
* Only check the lowest hole ( the hole with the smallest address ) and
* insert the node at the bottom of the hole or report - ENOSPC if
* unsuitable .
*
* Does not search all holes .
*/
DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE ,
2017-02-03 00:04:38 +03:00
} ;
2014-04-02 21:03:57 +04:00
2016-12-29 23:48:23 +03:00
/**
* struct drm_mm_node - allocated block in the DRM allocator
*
* This represents an allocated block in a & drm_mm allocator . Except for
* pre - reserved nodes inserted using drm_mm_reserve_node ( ) the structure is
* entirely opaque and should only be accessed through the provided funcions .
* Since allocation of these nodes is entirely handled by the driver they can be
* embedded .
*/
2009-04-08 19:11:16 +04:00
struct drm_mm_node {
2016-12-29 23:48:23 +03:00
/** @color: Opaque driver-private tag. */
unsigned long color ;
/** @start: Start address of the allocated block. */
u64 start ;
/** @size: Size of the allocated block. */
u64 size ;
/* private: */
2017-02-03 00:04:38 +03:00
struct drm_mm * mm ;
2010-07-02 18:02:14 +04:00
struct list_head node_list ;
2011-02-18 19:59:12 +03:00
struct list_head hole_stack ;
2016-08-03 18:04:09 +03:00
struct rb_node rb ;
2017-02-03 00:04:38 +03:00
struct rb_node rb_hole_size ;
struct rb_node rb_hole_addr ;
2016-08-03 18:04:09 +03:00
u64 __subtree_last ;
2017-02-03 00:04:38 +03:00
u64 hole_size ;
drm/mm: optimize rb_hole_addr rbtree search
Userspace can severely fragment rb_hole_addr rbtree by manipulating
alignment while allocating buffers. Fragmented rb_hole_addr rbtree
would result in large delays while allocating buffer object for a
userspace application. It takes long time to find suitable hole
because if we fail to find a suitable hole in the first attempt
then we look for neighbouring nodes using rb_prev()/rb_next().
Traversing rbtree using rb_prev()/rb_next() can take really long
time if the tree is fragmented.
This patch improves searches in fragmented rb_hole_addr rbtree by
modifying it to an augmented rbtree which will store an extra field
in drm_mm_node, subtree_max_hole. Each drm_mm_node now stores maximum
hole size for its subtree in drm_mm_node->subtree_max_hole. Using
drm_mm_node->subtree_max_hole, it is possible to eliminate a complete
subtree if that subtree is unable to serve a request hence reducing
number of rb_prev()/rb_next() used.
With this patch applied, 1 million bo allocs on amdgpu took ~8 sec,
compared to 50k bo allocs which took 28 sec without it.
partial test code:
int test_fragmentation(void)
{
int i = 0;
uint32_t minor_version;
uint32_t major_version;
struct amdgpu_bo_alloc_request request = {};
amdgpu_bo_handle vram_handle[MAX_ALLOC] = {};
amdgpu_device_handle device_handle;
request.alloc_size = 4096;
request.phys_alignment = 8192;
request.preferred_heap = AMDGPU_GEM_DOMAIN_VRAM;
int fd = open("/dev/dri/card0", O_RDWR | O_CLOEXEC);
amdgpu_device_initialize(fd, &major_version, &minor_version,
&device_handle);
for (i = 0; i < MAX_ALLOC; i++) {
amdgpu_bo_alloc(device_handle, &request, &vram_handle[i]);
}
for (i = 0; i < MAX_ALLOC; i++)
amdgpu_bo_free(vram_handle[i]);
return 0;
}
v2:
Use RB_DECLARE_CALLBACKS_MAX to maintain subtree_max_hole
v3:
insert_hole_addr() should be static a function
fix return value of next_hole_high_addr()/next_hole_low_addr()
Reported-by: kbuild test robot <lkp@intel.com>
v4:
Fix commit message.
Signed-off-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/364341/
Signed-off-by: Christian König <christian.koenig@amd.com>
2020-05-04 18:40:35 +03:00
u64 subtree_max_hole ;
2019-10-04 00:00:59 +03:00
unsigned long flags ;
# define DRM_MM_NODE_ALLOCATED_BIT 0
# define DRM_MM_NODE_SCANNED_BIT 1
2016-10-31 12:08:06 +03:00
# ifdef CONFIG_DRM_DEBUG_MM
depot_stack_handle_t stack ;
# endif
2009-04-08 19:11:16 +04:00
} ;
2016-12-29 23:48:23 +03:00
/**
* struct drm_mm - DRM allocator
*
* DRM range allocator with a few special functions and features geared towards
* managing GPU memory . Except for the @ color_adjust callback the structure is
* entirely opaque and should only be accessed through the provided functions
* and macros . This structure can be embedded into larger driver structures .
*/
2009-04-08 19:11:16 +04:00
struct drm_mm {
2016-12-29 23:48:23 +03:00
/**
* @ color_adjust :
*
* Optional driver callback to further apply restrictions on a hole . The
* node argument points at the node containing the hole from which the
* block would be allocated ( see drm_mm_hole_follows ( ) and friends ) . The
* other arguments are the size of the block to be allocated . The driver
* can adjust the start and end as needed to e . g . insert guard pages .
*/
void ( * color_adjust ) ( const struct drm_mm_node * node ,
unsigned long color ,
u64 * start , u64 * end ) ;
/* private: */
2011-03-31 05:57:33 +04:00
/* List of all memory nodes that immediately precede a free hole. */
2011-02-18 19:59:12 +03:00
struct list_head hole_stack ;
/* head_node.node_list is the list of all memory nodes, ordered
* according to the ( increasing ) start address of the memory node . */
struct drm_mm_node head_node ;
2016-08-03 18:04:09 +03:00
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
2017-09-09 02:15:08 +03:00
struct rb_root_cached interval_tree ;
2018-05-21 11:21:28 +03:00
struct rb_root_cached holes_size ;
2017-02-03 00:04:38 +03:00
struct rb_root holes_addr ;
2016-08-03 18:04:09 +03:00
2016-12-22 11:36:29 +03:00
unsigned long scan_active ;
} ;
2016-12-29 23:48:23 +03:00
/**
* struct drm_mm_scan - DRM allocator eviction roaster data
*
* This structure tracks data needed for the eviction roaster set up using
* drm_mm_scan_init ( ) , and used with drm_mm_scan_add_block ( ) and
* drm_mm_scan_remove_block ( ) . The structure is entirely opaque and should only
* be accessed through the provided functions and macros . It is meant to be
* allocated temporarily by the driver on the stack .
*/
2016-12-22 11:36:29 +03:00
struct drm_mm_scan {
2016-12-29 23:48:23 +03:00
/* private: */
2016-12-22 11:36:29 +03:00
struct drm_mm * mm ;
u64 size ;
u64 alignment ;
2016-12-22 11:36:34 +03:00
u64 remainder_mask ;
2016-12-22 11:36:29 +03:00
u64 range_start ;
u64 range_end ;
u64 hit_start ;
u64 hit_end ;
unsigned long color ;
2017-02-03 00:04:38 +03:00
enum drm_mm_insert_mode mode ;
2009-04-08 19:11:16 +04:00
} ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_node_allocated - checks whether a node is allocated
* @ node : drm_mm_node to check
*
2016-12-22 11:36:25 +03:00
* Drivers are required to clear a node prior to using it with the
* drm_mm range manager .
*
* Drivers should use this helper for proper encapsulation of drm_mm
2014-01-23 03:39:13 +04:00
* internals .
*
* Returns :
* True if the @ node is allocated .
*/
2016-12-16 10:46:42 +03:00
static inline bool drm_mm_node_allocated ( const struct drm_mm_node * node )
2011-02-18 19:59:14 +03:00
{
2019-10-04 00:00:59 +03:00
return test_bit ( DRM_MM_NODE_ALLOCATED_BIT , & node - > flags ) ;
2011-02-18 19:59:14 +03:00
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_initialized - checks whether an allocator is initialized
* @ mm : drm_mm to check
*
2016-12-22 11:36:25 +03:00
* Drivers should clear the struct drm_mm prior to initialisation if they
* want to use this function .
*
* Drivers should use this helper for proper encapsulation of drm_mm
2014-01-23 03:39:13 +04:00
* internals .
*
* Returns :
* True if the @ mm is initialized .
*/
2016-12-16 10:46:42 +03:00
static inline bool drm_mm_initialized ( const struct drm_mm * mm )
2011-02-18 19:59:11 +03:00
{
2020-03-09 15:15:29 +03:00
return READ_ONCE ( mm - > hole_stack . next ) ;
2011-02-18 19:59:11 +03:00
}
2012-11-15 15:32:17 +04:00
2016-12-22 11:36:37 +03:00
/**
* drm_mm_hole_follows - checks whether a hole follows this node
* @ node : drm_mm_node to check
*
* Holes are embedded into the drm_mm using the tail of a drm_mm_node .
* If you wish to know whether a hole follows this particular node ,
2016-12-29 23:48:23 +03:00
* query this function . See also drm_mm_hole_node_start ( ) and
* drm_mm_hole_node_end ( ) .
2016-12-22 11:36:37 +03:00
*
* Returns :
* True if a hole follows the @ node .
*/
static inline bool drm_mm_hole_follows ( const struct drm_mm_node * node )
{
2017-02-03 00:04:38 +03:00
return node - > hole_size ;
2016-12-22 11:36:37 +03:00
}
2016-12-16 10:46:42 +03:00
static inline u64 __drm_mm_hole_node_start ( const struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
return hole_node - > start + hole_node - > size ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_hole_node_start - computes the start of the hole following @ node
* @ hole_node : drm_mm_node which implicitly tracks the following hole
*
2016-12-22 11:36:25 +03:00
* This is useful for driver - specific debug dumpers . Otherwise drivers should
* not inspect holes themselves . Drivers must check first whether a hole indeed
2016-12-22 11:36:37 +03:00
* follows by looking at drm_mm_hole_follows ( )
2014-01-23 03:39:13 +04:00
*
* Returns :
* Start of the subsequent hole .
*/
2016-12-16 10:46:42 +03:00
static inline u64 drm_mm_hole_node_start ( const struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
2016-12-22 11:36:37 +03:00
DRM_MM_BUG_ON ( ! drm_mm_hole_follows ( hole_node ) ) ;
2012-11-15 15:32:17 +04:00
return __drm_mm_hole_node_start ( hole_node ) ;
}
2016-12-16 10:46:42 +03:00
static inline u64 __drm_mm_hole_node_end ( const struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
2015-11-25 16:23:07 +03:00
return list_next_entry ( hole_node , node_list ) - > start ;
2012-11-15 15:32:17 +04:00
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_hole_node_end - computes the end of the hole following @ node
* @ hole_node : drm_mm_node which implicitly tracks the following hole
*
2016-12-22 11:36:25 +03:00
* This is useful for driver - specific debug dumpers . Otherwise drivers should
* not inspect holes themselves . Drivers must check first whether a hole indeed
2016-12-22 11:36:37 +03:00
* follows by looking at drm_mm_hole_follows ( ) .
2014-01-23 03:39:13 +04:00
*
* Returns :
* End of the subsequent hole .
*/
2016-12-16 10:46:42 +03:00
static inline u64 drm_mm_hole_node_end ( const struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
return __drm_mm_hole_node_end ( hole_node ) ;
}
2016-12-22 11:36:05 +03:00
/**
* drm_mm_nodes - list of nodes under the drm_mm range manager
2020-09-17 05:04:32 +03:00
* @ mm : the struct drm_mm range manager
2016-12-22 11:36:05 +03:00
*
* As the drm_mm range manager hides its node_list deep with its
* structure , extracting it looks painful and repetitive . This is
* not expected to be used outside of the drm_mm_for_each_node ( )
* macros and similar internal functions .
*
* Returns :
* The node list , may be empty .
*/
# define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
2016-12-16 10:46:41 +03:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_for_each_node - iterator to walk over all allocated nodes
2016-12-29 23:48:23 +03:00
* @ entry : & struct drm_mm_node to assign to in each iteration step
* @ mm : & drm_mm allocator to walk
2014-01-23 03:39:13 +04:00
*
* This iterator walks over all nodes in the range allocator . It is implemented
2016-12-29 23:48:23 +03:00
* with list_for_each ( ) , so not save against removal of elements .
2014-01-23 03:39:13 +04:00
*/
2016-12-16 10:46:41 +03:00
# define drm_mm_for_each_node(entry, mm) \
2016-12-22 11:36:05 +03:00
list_for_each_entry ( entry , drm_mm_nodes ( mm ) , node_list )
2016-12-16 10:46:41 +03:00
/**
* drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
2016-12-29 23:48:23 +03:00
* @ entry : & struct drm_mm_node to assign to in each iteration step
* @ next : & struct drm_mm_node to store the next step
* @ mm : & drm_mm allocator to walk
2016-12-16 10:46:41 +03:00
*
* This iterator walks over all nodes in the range allocator . It is implemented
2016-12-29 23:48:23 +03:00
* with list_for_each_safe ( ) , so save against removal of elements .
2016-12-16 10:46:41 +03:00
*/
# define drm_mm_for_each_node_safe(entry, next, mm) \
2016-12-22 11:36:05 +03:00
list_for_each_entry_safe ( entry , next , drm_mm_nodes ( mm ) , node_list )
2012-11-15 15:32:17 +04:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_for_each_hole - iterator to walk over all holes
2017-02-03 00:04:38 +03:00
* @ pos : & drm_mm_node used internally to track progress
2016-12-29 23:48:23 +03:00
* @ mm : & drm_mm allocator to walk
2014-01-23 03:39:13 +04:00
* @ hole_start : ulong variable to assign the hole start to on each iteration
* @ hole_end : ulong variable to assign the hole end to on each iteration
*
* This iterator walks over all holes in the range allocator . It is implemented
2016-12-29 23:48:23 +03:00
* with list_for_each ( ) , so not save against removal of elements . @ entry is used
2014-01-23 03:39:13 +04:00
* internally and will not reflect a real drm_mm_node for the very first hole .
* Hence users of this iterator may not access it .
*
* Implementation Note :
* We need to inline list_for_each_entry in order to be able to set hole_start
* and hole_end on each iteration while keeping the macro sane .
2012-11-15 15:32:17 +04:00
*/
2017-02-03 00:04:38 +03:00
# define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
for ( pos = list_first_entry ( & ( mm ) - > hole_stack , \
typeof ( * pos ) , hole_stack ) ; \
& pos - > hole_stack ! = & ( mm ) - > hole_stack ? \
hole_start = drm_mm_hole_node_start ( pos ) , \
hole_end = hole_start + pos - > hole_size , \
1 : 0 ; \
pos = list_next_entry ( pos , hole_stack ) )
2014-04-02 21:03:57 +04:00
2009-04-08 19:11:16 +04:00
/*
* Basic range manager support ( drm_mm . c )
*/
2014-01-23 03:39:13 +04:00
int drm_mm_reserve_node ( struct drm_mm * mm , struct drm_mm_node * node ) ;
2017-02-03 00:04:38 +03:00
int drm_mm_insert_node_in_range ( struct drm_mm * mm ,
struct drm_mm_node * node ,
u64 size ,
u64 alignment ,
unsigned long color ,
u64 start ,
u64 end ,
enum drm_mm_insert_mode mode ) ;
2013-07-27 15:36:27 +04:00
2016-12-22 11:36:39 +03:00
/**
* drm_mm_insert_node_generic - search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for this node
2017-02-03 00:04:38 +03:00
* @ mode : fine - tune the allocation search and placement
2016-12-22 11:36:39 +03:00
*
2017-11-01 17:04:45 +03:00
* This is a simplified version of drm_mm_insert_node_in_range ( ) with no
2016-12-29 23:48:23 +03:00
* range restrictions applied .
*
2016-12-22 11:36:39 +03:00
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
*/
static inline int
drm_mm_insert_node_generic ( struct drm_mm * mm , struct drm_mm_node * node ,
u64 size , u64 alignment ,
unsigned long color ,
2017-02-03 00:04:38 +03:00
enum drm_mm_insert_mode mode )
2016-12-22 11:36:39 +03:00
{
2017-02-03 00:04:38 +03:00
return drm_mm_insert_node_in_range ( mm , node ,
size , alignment , color ,
0 , U64_MAX , mode ) ;
2016-12-22 11:36:39 +03:00
}
/**
* drm_mm_insert_node - search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
*
* This is a simplified version of drm_mm_insert_node_generic ( ) with @ color set
* to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
*/
static inline int drm_mm_insert_node ( struct drm_mm * mm ,
struct drm_mm_node * node ,
2017-02-03 00:04:38 +03:00
u64 size )
2016-12-22 11:36:39 +03:00
{
2017-02-03 00:04:38 +03:00
return drm_mm_insert_node_generic ( mm , node , size , 0 , 0 , 0 ) ;
2016-12-22 11:36:39 +03:00
}
2014-01-23 03:39:13 +04:00
void drm_mm_remove_node ( struct drm_mm_node * node ) ;
void drm_mm_replace_node ( struct drm_mm_node * old , struct drm_mm_node * new ) ;
2016-12-16 10:46:42 +03:00
void drm_mm_init ( struct drm_mm * mm , u64 start , u64 size ) ;
2014-01-23 03:39:13 +04:00
void drm_mm_takedown ( struct drm_mm * mm ) ;
2016-12-22 11:36:27 +03:00
/**
* drm_mm_clean - checks whether an allocator is clean
* @ mm : drm_mm allocator to check
*
* Returns :
* True if the allocator is completely free , false if there ' s still a node
* allocated in it .
*/
static inline bool drm_mm_clean ( const struct drm_mm * mm )
{
return list_empty ( drm_mm_nodes ( mm ) ) ;
}
2009-04-08 19:11:16 +04:00
2016-08-03 18:04:09 +03:00
struct drm_mm_node *
2016-12-16 10:46:42 +03:00
__drm_mm_interval_first ( const struct drm_mm * mm , u64 start , u64 last ) ;
2016-08-03 18:04:09 +03:00
2016-11-23 17:11:14 +03:00
/**
* drm_mm_for_each_node_in_range - iterator to walk over a range of
* allocated nodes
2016-11-27 14:16:23 +03:00
* @ node__ : drm_mm_node structure to assign to in each iteration step
* @ mm__ : drm_mm allocator to walk
* @ start__ : starting offset , the first node will overlap this
* @ end__ : ending offset , the last node will start before this ( but may overlap )
2016-11-23 17:11:14 +03:00
*
* This iterator walks over all nodes in the range allocator that lie
* between @ start and @ end . It is implemented similarly to list_for_each ( ) ,
* but using the internal interval tree to accelerate the search for the
* starting node , and so not safe against removal of elements . It assumes
* that @ end is within ( or is the upper limit of ) the drm_mm allocator .
2017-02-04 14:19:13 +03:00
* If [ @ start , @ end ] are beyond the range of the drm_mm , the iterator may walk
* over the special _unallocated_ & drm_mm . head_node , and may even continue
* indefinitely .
2016-11-23 17:11:14 +03:00
*/
2016-11-27 14:16:23 +03:00
# define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
for ( node__ = __drm_mm_interval_first ( ( mm__ ) , ( start__ ) , ( end__ ) - 1 ) ; \
2017-02-04 14:19:13 +03:00
node__ - > start < ( end__ ) ; \
2016-11-27 14:16:23 +03:00
node__ = list_next_entry ( node__ , node_list ) )
2016-08-03 18:04:09 +03:00
2016-12-22 11:36:29 +03:00
void drm_mm_scan_init_with_range ( struct drm_mm_scan * scan ,
struct drm_mm * mm ,
2016-12-22 11:36:33 +03:00
u64 size , u64 alignment , unsigned long color ,
u64 start , u64 end ,
2017-02-03 00:04:38 +03:00
enum drm_mm_insert_mode mode ) ;
2016-12-22 11:36:31 +03:00
/**
* drm_mm_scan_init - initialize lru scanning
* @ scan : scan state
* @ mm : drm_mm to scan
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ color : opaque tag value to use for the allocation
2017-02-03 00:04:38 +03:00
* @ mode : fine - tune the allocation search and placement
2016-12-22 11:36:31 +03:00
*
2016-12-29 23:48:23 +03:00
* This is a simplified version of drm_mm_scan_init_with_range ( ) with no range
* restrictions applied .
*
2016-12-22 11:36:31 +03:00
* This simply sets up the scanning routines with the parameters for the desired
2016-12-22 11:36:33 +03:00
* hole .
2016-12-22 11:36:31 +03:00
*
* Warning :
* As long as the scan list is non - empty , no other operations than
* adding / removing nodes to / from the scan list are allowed .
*/
static inline void drm_mm_scan_init ( struct drm_mm_scan * scan ,
struct drm_mm * mm ,
u64 size ,
u64 alignment ,
2016-12-22 11:36:33 +03:00
unsigned long color ,
2017-02-03 00:04:38 +03:00
enum drm_mm_insert_mode mode )
2016-12-22 11:36:31 +03:00
{
2016-12-22 11:36:33 +03:00
drm_mm_scan_init_with_range ( scan , mm ,
size , alignment , color ,
2017-02-03 00:04:38 +03:00
0 , U64_MAX , mode ) ;
2016-12-22 11:36:31 +03:00
}
2016-12-22 11:36:29 +03:00
bool drm_mm_scan_add_block ( struct drm_mm_scan * scan ,
struct drm_mm_node * node ) ;
bool drm_mm_scan_remove_block ( struct drm_mm_scan * scan ,
struct drm_mm_node * node ) ;
2016-12-22 11:36:36 +03:00
struct drm_mm_node * drm_mm_scan_color_evict ( struct drm_mm_scan * scan ) ;
2010-07-02 18:02:16 +04:00
2016-12-29 14:09:24 +03:00
void drm_mm_print ( const struct drm_mm * mm , struct drm_printer * p ) ;
2009-08-26 07:13:37 +04:00
2009-04-08 19:11:16 +04:00
# endif