2009-04-08 19:11:16 +04:00
/**************************************************************************
*
* Copyright 2006 - 2008 Tungsten Graphics , Inc . , Cedar Park , TX . USA .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors :
* Thomas Hellstrom < thomas - at - tungstengraphics - dot - com >
*/
# ifndef _DRM_MM_H_
# define _DRM_MM_H_
/*
* Generic range manager structs
*/
2013-07-25 20:02:31 +04:00
# include <linux/bug.h>
# include <linux/kernel.h>
2009-04-08 19:11:16 +04:00
# include <linux/list.h>
2013-07-25 20:02:31 +04:00
# include <linux/spinlock.h>
2009-09-08 05:32:08 +04:00
# ifdef CONFIG_DEBUG_FS
# include <linux/seq_file.h>
# endif
2009-04-08 19:11:16 +04:00
2013-07-27 15:36:27 +04:00
enum drm_mm_search_flags {
DRM_MM_SEARCH_DEFAULT = 0 ,
DRM_MM_SEARCH_BEST = 1 < < 0 ,
2014-04-02 21:03:57 +04:00
DRM_MM_SEARCH_BELOW = 1 < < 1 ,
2013-07-27 15:36:27 +04:00
} ;
2014-04-02 21:03:57 +04:00
enum drm_mm_allocator_flags {
DRM_MM_CREATE_DEFAULT = 0 ,
DRM_MM_CREATE_TOP = 1 < < 0 ,
} ;
# define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
# define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
2009-04-08 19:11:16 +04:00
struct drm_mm_node {
2010-07-02 18:02:14 +04:00
struct list_head node_list ;
2011-02-18 19:59:12 +03:00
struct list_head hole_stack ;
unsigned hole_follows : 1 ;
2010-07-02 18:02:16 +04:00
unsigned scanned_block : 1 ;
unsigned scanned_prev_free : 1 ;
unsigned scanned_next_free : 1 ;
2011-02-18 19:59:12 +03:00
unsigned scanned_preceeds_hole : 1 ;
2011-02-18 19:59:14 +03:00
unsigned allocated : 1 ;
2012-07-10 14:15:23 +04:00
unsigned long color ;
2015-01-23 11:05:06 +03:00
u64 start ;
u64 size ;
2009-04-08 19:11:16 +04:00
struct drm_mm * mm ;
} ;
struct drm_mm {
2011-03-31 05:57:33 +04:00
/* List of all memory nodes that immediately precede a free hole. */
2011-02-18 19:59:12 +03:00
struct list_head hole_stack ;
/* head_node.node_list is the list of all memory nodes, ordered
* according to the ( increasing ) start address of the memory node . */
struct drm_mm_node head_node ;
2010-09-16 17:13:11 +04:00
unsigned int scan_check_range : 1 ;
2010-07-02 18:02:16 +04:00
unsigned scan_alignment ;
2012-07-10 14:15:23 +04:00
unsigned long scan_color ;
2015-01-23 11:05:06 +03:00
u64 scan_size ;
u64 scan_hit_start ;
u64 scan_hit_end ;
2010-07-02 18:02:16 +04:00
unsigned scanned_blocks ;
2015-01-23 11:05:06 +03:00
u64 scan_start ;
u64 scan_end ;
2011-02-18 19:59:15 +03:00
struct drm_mm_node * prev_scanned_node ;
2012-07-10 14:15:23 +04:00
void ( * color_adjust ) ( struct drm_mm_node * node , unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 * start , u64 * end ) ;
2009-04-08 19:11:16 +04:00
} ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_node_allocated - checks whether a node is allocated
* @ node : drm_mm_node to check
*
* Drivers should use this helpers for proper encapusulation of drm_mm
* internals .
*
* Returns :
* True if the @ node is allocated .
*/
2011-02-18 19:59:14 +03:00
static inline bool drm_mm_node_allocated ( struct drm_mm_node * node )
{
return node - > allocated ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_initialized - checks whether an allocator is initialized
* @ mm : drm_mm to check
*
* Drivers should use this helpers for proper encapusulation of drm_mm
* internals .
*
* Returns :
* True if the @ mm is initialized .
*/
2011-02-18 19:59:11 +03:00
static inline bool drm_mm_initialized ( struct drm_mm * mm )
{
2011-02-18 19:59:12 +03:00
return mm - > hole_stack . next ;
2011-02-18 19:59:11 +03:00
}
2012-11-15 15:32:17 +04:00
2015-01-23 11:05:06 +03:00
static inline u64 __drm_mm_hole_node_start ( struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
return hole_node - > start + hole_node - > size ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_hole_node_start - computes the start of the hole following @ node
* @ hole_node : drm_mm_node which implicitly tracks the following hole
*
* This is useful for driver - sepific debug dumpers . Otherwise drivers should not
* inspect holes themselves . Drivers must check first whether a hole indeed
* follows by looking at node - > hole_follows .
*
* Returns :
* Start of the subsequent hole .
*/
2015-01-23 11:05:06 +03:00
static inline u64 drm_mm_hole_node_start ( struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
BUG_ON ( ! hole_node - > hole_follows ) ;
return __drm_mm_hole_node_start ( hole_node ) ;
}
2015-01-23 11:05:06 +03:00
static inline u64 __drm_mm_hole_node_end ( struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
return list_entry ( hole_node - > node_list . next ,
struct drm_mm_node , node_list ) - > start ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_hole_node_end - computes the end of the hole following @ node
* @ hole_node : drm_mm_node which implicitly tracks the following hole
*
* This is useful for driver - sepific debug dumpers . Otherwise drivers should not
* inspect holes themselves . Drivers must check first whether a hole indeed
* follows by looking at node - > hole_follows .
*
* Returns :
* End of the subsequent hole .
*/
2015-01-23 11:05:06 +03:00
static inline u64 drm_mm_hole_node_end ( struct drm_mm_node * hole_node )
2012-11-15 15:32:17 +04:00
{
return __drm_mm_hole_node_end ( hole_node ) ;
}
2014-01-23 03:39:13 +04:00
/**
* drm_mm_for_each_node - iterator to walk over all allocated nodes
* @ entry : drm_mm_node structure to assign to in each iteration step
* @ mm : drm_mm allocator to walk
*
* This iterator walks over all nodes in the range allocator . It is implemented
* with list_for_each , so not save against removal of elements .
*/
2011-02-18 19:59:12 +03:00
# define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
& ( mm ) - > head_node . node_list , \
2011-05-07 01:47:53 +04:00
node_list )
2012-11-15 15:32:17 +04:00
2014-01-23 03:39:13 +04:00
/**
* drm_mm_for_each_hole - iterator to walk over all holes
* @ entry : drm_mm_node used internally to track progress
* @ mm : drm_mm allocator to walk
* @ hole_start : ulong variable to assign the hole start to on each iteration
* @ hole_end : ulong variable to assign the hole end to on each iteration
*
* This iterator walks over all holes in the range allocator . It is implemented
* with list_for_each , so not save against removal of elements . @ entry is used
* internally and will not reflect a real drm_mm_node for the very first hole .
* Hence users of this iterator may not access it .
*
* Implementation Note :
* We need to inline list_for_each_entry in order to be able to set hole_start
* and hole_end on each iteration while keeping the macro sane .
2014-04-02 21:03:57 +04:00
*
* The __drm_mm_for_each_hole version is similar , but with added support for
* going backwards .
2012-11-15 15:32:17 +04:00
*/
# define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
for ( entry = list_entry ( ( mm ) - > hole_stack . next , struct drm_mm_node , hole_stack ) ; \
& entry - > hole_stack ! = & ( mm ) - > hole_stack ? \
hole_start = drm_mm_hole_node_start ( entry ) , \
hole_end = drm_mm_hole_node_end ( entry ) , \
1 : 0 ; \
entry = list_entry ( entry - > hole_stack . next , struct drm_mm_node , hole_stack ) )
2014-04-02 21:03:57 +04:00
# define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
for ( entry = list_entry ( ( backwards ) ? ( mm ) - > hole_stack . prev : ( mm ) - > hole_stack . next , struct drm_mm_node , hole_stack ) ; \
& entry - > hole_stack ! = & ( mm ) - > hole_stack ? \
hole_start = drm_mm_hole_node_start ( entry ) , \
hole_end = drm_mm_hole_node_end ( entry ) , \
1 : 0 ; \
entry = list_entry ( ( backwards ) ? entry - > hole_stack . prev : entry - > hole_stack . next , struct drm_mm_node , hole_stack ) )
2009-04-08 19:11:16 +04:00
/*
* Basic range manager support ( drm_mm . c )
*/
2014-01-23 03:39:13 +04:00
int drm_mm_reserve_node ( struct drm_mm * mm , struct drm_mm_node * node ) ;
int drm_mm_insert_node_generic ( struct drm_mm * mm ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size ,
2014-01-23 03:39:13 +04:00
unsigned alignment ,
unsigned long color ,
2014-04-02 21:03:57 +04:00
enum drm_mm_search_flags sflags ,
enum drm_mm_allocator_flags aflags ) ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_insert_node - search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ flags : flags to fine - tune the allocation
*
* This is a simplified version of drm_mm_insert_node_generic ( ) with @ color set
* to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
*/
2013-07-27 15:36:27 +04:00
static inline int drm_mm_insert_node ( struct drm_mm * mm ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size ,
2013-07-27 15:36:27 +04:00
unsigned alignment ,
enum drm_mm_search_flags flags )
{
2014-04-02 21:03:57 +04:00
return drm_mm_insert_node_generic ( mm , node , size , alignment , 0 , flags ,
DRM_MM_CREATE_DEFAULT ) ;
2013-07-27 15:36:27 +04:00
}
2014-01-23 03:39:13 +04:00
int drm_mm_insert_node_in_range_generic ( struct drm_mm * mm ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size ,
2014-01-23 03:39:13 +04:00
unsigned alignment ,
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end ,
2014-04-02 21:03:57 +04:00
enum drm_mm_search_flags sflags ,
enum drm_mm_allocator_flags aflags ) ;
2014-01-23 03:39:13 +04:00
/**
* drm_mm_insert_node_in_range - ranged search for space and insert @ node
* @ mm : drm_mm to allocate from
* @ node : preallocate node to insert
* @ size : size of the allocation
* @ alignment : alignment of the allocation
* @ start : start of the allowed range for this node
* @ end : end of the allowed range for this node
* @ flags : flags to fine - tune the allocation
*
* This is a simplified version of drm_mm_insert_node_in_range_generic ( ) with
* @ color set to 0.
*
* The preallocated node must be cleared to 0.
*
* Returns :
* 0 on success , - ENOSPC if there ' s no suitable hole .
*/
2013-07-27 15:36:27 +04:00
static inline int drm_mm_insert_node_in_range ( struct drm_mm * mm ,
struct drm_mm_node * node ,
2015-01-23 11:05:06 +03:00
u64 size ,
2013-07-27 15:36:27 +04:00
unsigned alignment ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end ,
2013-07-27 15:36:27 +04:00
enum drm_mm_search_flags flags )
{
return drm_mm_insert_node_in_range_generic ( mm , node , size , alignment ,
2014-04-02 21:03:57 +04:00
0 , start , end , flags ,
DRM_MM_CREATE_DEFAULT ) ;
2013-07-27 15:36:27 +04:00
}
2014-01-23 03:39:13 +04:00
void drm_mm_remove_node ( struct drm_mm_node * node ) ;
void drm_mm_replace_node ( struct drm_mm_node * old , struct drm_mm_node * new ) ;
void drm_mm_init ( struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 size ) ;
2014-01-23 03:39:13 +04:00
void drm_mm_takedown ( struct drm_mm * mm ) ;
bool drm_mm_clean ( struct drm_mm * mm ) ;
2009-04-08 19:11:16 +04:00
2012-07-10 14:15:23 +04:00
void drm_mm_init_scan ( struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2012-07-10 14:15:23 +04:00
unsigned alignment ,
unsigned long color ) ;
void drm_mm_init_scan_with_range ( struct drm_mm * mm ,
2015-01-23 11:05:06 +03:00
u64 size ,
2010-09-16 17:13:11 +04:00
unsigned alignment ,
2012-07-10 14:15:23 +04:00
unsigned long color ,
2015-01-23 11:05:06 +03:00
u64 start ,
u64 end ) ;
2014-01-23 03:39:13 +04:00
bool drm_mm_scan_add_block ( struct drm_mm_node * node ) ;
bool drm_mm_scan_remove_block ( struct drm_mm_node * node ) ;
2010-07-02 18:02:16 +04:00
2014-01-23 03:39:13 +04:00
void drm_mm_debug_table ( struct drm_mm * mm , const char * prefix ) ;
2009-08-26 07:13:37 +04:00
# ifdef CONFIG_DEBUG_FS
int drm_mm_dump_table ( struct seq_file * m , struct drm_mm * mm ) ;
# endif
2009-04-08 19:11:16 +04:00
# endif