2006-08-07 15:30:28 +04:00
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics , Inc . , Bismarck , ND . , USA .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Generic simple memory manager implementation . Intended to be used as a base
* class implementation for more advanced memory managers .
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented . Currently it is just an
* unordered stack of free regions . This could easily be improved if an RB - tree
* is used instead . At least if we expect heavy fragmentation .
*
* Aligned allocations can also see improvement .
*
* Authors :
2007-10-20 01:21:04 +04:00
* Thomas Hellström < thomas - at - tungstengraphics - dot - com >
2006-08-07 15:30:28 +04:00
*/
# include "drmP.h"
2009-04-08 19:11:16 +04:00
# include "drm_mm.h"
2007-01-08 14:25:47 +03:00
# include <linux/slab.h>
2009-08-26 07:13:37 +04:00
# include <linux/seq_file.h>
2007-01-08 14:25:47 +03:00
2009-04-08 19:11:16 +04:00
# define MM_UNUSED_TARGET 4
static struct drm_mm_node * drm_mm_kmalloc ( struct drm_mm * mm , int atomic )
{
struct drm_mm_node * child ;
if ( atomic )
2010-07-02 18:02:16 +04:00
child = kzalloc ( sizeof ( * child ) , GFP_ATOMIC ) ;
2009-04-08 19:11:16 +04:00
else
2010-07-02 18:02:16 +04:00
child = kzalloc ( sizeof ( * child ) , GFP_KERNEL ) ;
2009-04-08 19:11:16 +04:00
if ( unlikely ( child = = NULL ) ) {
spin_lock ( & mm - > unused_lock ) ;
if ( list_empty ( & mm - > unused_nodes ) )
child = NULL ;
else {
child =
list_entry ( mm - > unused_nodes . next ,
2011-02-18 19:59:12 +03:00
struct drm_mm_node , node_list ) ;
list_del ( & child - > node_list ) ;
2009-04-08 19:11:16 +04:00
- - mm - > num_unused ;
}
spin_unlock ( & mm - > unused_lock ) ;
}
return child ;
}
2009-11-13 22:56:58 +03:00
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
* drm_mm : memory manager struct we are pre - allocating for
*
* Returns 0 on success or - ENOMEM if allocation fails .
*/
2009-04-08 19:11:16 +04:00
int drm_mm_pre_get ( struct drm_mm * mm )
{
struct drm_mm_node * node ;
spin_lock ( & mm - > unused_lock ) ;
while ( mm - > num_unused < MM_UNUSED_TARGET ) {
spin_unlock ( & mm - > unused_lock ) ;
2010-07-02 18:02:16 +04:00
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
2009-04-08 19:11:16 +04:00
spin_lock ( & mm - > unused_lock ) ;
if ( unlikely ( node = = NULL ) ) {
int ret = ( mm - > num_unused < 2 ) ? - ENOMEM : 0 ;
spin_unlock ( & mm - > unused_lock ) ;
return ret ;
}
+ + mm - > num_unused ;
2011-02-18 19:59:12 +03:00
list_add_tail ( & node - > node_list , & mm - > unused_nodes ) ;
2009-04-08 19:11:16 +04:00
}
spin_unlock ( & mm - > unused_lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_pre_get ) ;
2007-01-08 14:25:47 +03:00
2011-02-18 19:59:12 +03:00
static inline unsigned long drm_mm_hole_node_start ( struct drm_mm_node * hole_node )
2007-01-08 14:25:47 +03:00
{
2011-02-18 19:59:12 +03:00
return hole_node - > start + hole_node - > size ;
2007-01-08 14:25:47 +03:00
}
2011-02-18 19:59:12 +03:00
static inline unsigned long drm_mm_hole_node_end ( struct drm_mm_node * hole_node )
2007-01-08 14:25:47 +03:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm_node * next_node =
list_entry ( hole_node - > node_list . next , struct drm_mm_node ,
node_list ) ;
2007-01-08 14:25:47 +03:00
2011-02-18 19:59:12 +03:00
return next_node - > start ;
2007-01-08 14:25:47 +03:00
}
2011-02-18 19:59:13 +03:00
static void drm_mm_insert_helper ( struct drm_mm_node * hole_node ,
struct drm_mm_node * node ,
unsigned long size , unsigned alignment )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = hole_node - > mm ;
unsigned long tmp = 0 , wasted = 0 ;
unsigned long hole_start = drm_mm_hole_node_start ( hole_node ) ;
unsigned long hole_end = drm_mm_hole_node_end ( hole_node ) ;
2011-02-18 19:59:14 +03:00
BUG_ON ( ! hole_node - > hole_follows | | node - > allocated ) ;
2006-08-07 15:30:28 +04:00
if ( alignment )
2011-02-18 19:59:12 +03:00
tmp = hole_start % alignment ;
2007-01-08 14:25:47 +03:00
2011-02-18 19:59:12 +03:00
if ( ! tmp ) {
hole_node - > hole_follows = 0 ;
list_del_init ( & hole_node - > hole_stack ) ;
} else
wasted = alignment - tmp ;
node - > start = hole_start + wasted ;
node - > size = size ;
node - > mm = mm ;
2011-02-18 19:59:14 +03:00
node - > allocated = 1 ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & node - > hole_stack ) ;
list_add ( & node - > node_list , & hole_node - > node_list ) ;
BUG_ON ( node - > start + node - > size > hole_end ) ;
if ( node - > start + node - > size < hole_end ) {
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
node - > hole_follows = 1 ;
2006-08-07 15:30:28 +04:00
} else {
2011-02-18 19:59:12 +03:00
node - > hole_follows = 0 ;
2007-01-08 14:25:47 +03:00
}
2011-02-18 19:59:13 +03:00
}
struct drm_mm_node * drm_mm_get_block_generic ( struct drm_mm_node * hole_node ,
unsigned long size ,
unsigned alignment ,
int atomic )
{
struct drm_mm_node * node ;
node = drm_mm_kmalloc ( hole_node - > mm , atomic ) ;
if ( unlikely ( node = = NULL ) )
return NULL ;
drm_mm_insert_helper ( hole_node , node , size , alignment ) ;
2006-08-07 15:30:28 +04:00
2009-05-22 17:14:22 +04:00
return node ;
2006-08-07 15:30:28 +04:00
}
2009-06-17 14:29:56 +04:00
EXPORT_SYMBOL ( drm_mm_get_block_generic ) ;
2009-04-08 19:11:16 +04:00
2011-02-18 19:59:14 +03:00
/**
* Search for free space and insert a preallocated memory node . Returns
* - ENOSPC if no suitable free area is available . The preallocated memory node
* must be cleared .
*/
int drm_mm_insert_node ( struct drm_mm * mm , struct drm_mm_node * node ,
unsigned long size , unsigned alignment )
{
struct drm_mm_node * hole_node ;
hole_node = drm_mm_search_free ( mm , size , alignment , 0 ) ;
if ( ! hole_node )
return - ENOSPC ;
drm_mm_insert_helper ( hole_node , node , size , alignment ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_insert_node ) ;
2011-02-18 19:59:13 +03:00
static void drm_mm_insert_helper_range ( struct drm_mm_node * hole_node ,
struct drm_mm_node * node ,
unsigned long size , unsigned alignment ,
unsigned long start , unsigned long end )
2009-12-07 17:52:56 +03:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = hole_node - > mm ;
unsigned long tmp = 0 , wasted = 0 ;
unsigned long hole_start = drm_mm_hole_node_start ( hole_node ) ;
unsigned long hole_end = drm_mm_hole_node_end ( hole_node ) ;
2009-12-07 17:52:56 +03:00
2011-02-18 19:59:14 +03:00
BUG_ON ( ! hole_node - > hole_follows | | node - > allocated ) ;
2011-02-18 19:59:12 +03:00
if ( hole_start < start )
wasted + = start - hole_start ;
2009-12-07 17:52:56 +03:00
if ( alignment )
2011-02-18 19:59:12 +03:00
tmp = ( hole_start + wasted ) % alignment ;
2009-12-07 17:52:56 +03:00
if ( tmp )
wasted + = alignment - tmp ;
2011-02-18 19:59:12 +03:00
if ( ! wasted ) {
hole_node - > hole_follows = 0 ;
list_del_init ( & hole_node - > hole_stack ) ;
2009-12-07 17:52:56 +03:00
}
2011-02-18 19:59:12 +03:00
node - > start = hole_start + wasted ;
node - > size = size ;
node - > mm = mm ;
2011-02-18 19:59:14 +03:00
node - > allocated = 1 ;
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & node - > hole_stack ) ;
list_add ( & node - > node_list , & hole_node - > node_list ) ;
BUG_ON ( node - > start + node - > size > hole_end ) ;
BUG_ON ( node - > start + node - > size > end ) ;
if ( node - > start + node - > size < hole_end ) {
list_add ( & node - > hole_stack , & mm - > hole_stack ) ;
node - > hole_follows = 1 ;
2009-12-07 17:52:56 +03:00
} else {
2011-02-18 19:59:12 +03:00
node - > hole_follows = 0 ;
2009-12-07 17:52:56 +03:00
}
2011-02-18 19:59:13 +03:00
}
struct drm_mm_node * drm_mm_get_block_range_generic ( struct drm_mm_node * hole_node ,
unsigned long size ,
unsigned alignment ,
unsigned long start ,
unsigned long end ,
int atomic )
{
struct drm_mm_node * node ;
node = drm_mm_kmalloc ( hole_node - > mm , atomic ) ;
if ( unlikely ( node = = NULL ) )
return NULL ;
drm_mm_insert_helper_range ( hole_node , node , size , alignment ,
start , end ) ;
2009-12-07 17:52:56 +03:00
return node ;
}
EXPORT_SYMBOL ( drm_mm_get_block_range_generic ) ;
2011-02-18 19:59:14 +03:00
/**
* Search for free space and insert a preallocated memory node . Returns
* - ENOSPC if no suitable free area is available . This is for range
* restricted allocations . The preallocated memory node must be cleared .
2006-08-07 15:30:28 +04:00
*/
2011-02-18 19:59:14 +03:00
int drm_mm_insert_node_in_range ( struct drm_mm * mm , struct drm_mm_node * node ,
unsigned long size , unsigned alignment ,
unsigned long start , unsigned long end )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:14 +03:00
struct drm_mm_node * hole_node ;
hole_node = drm_mm_search_free_in_range ( mm , size , alignment ,
start , end , 0 ) ;
if ( ! hole_node )
return - ENOSPC ;
drm_mm_insert_helper_range ( hole_node , node , size , alignment ,
start , end ) ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:14 +03:00
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_insert_node_in_range ) ;
/**
* Remove a memory node from the allocator .
*/
void drm_mm_remove_node ( struct drm_mm_node * node )
{
2011-02-18 19:59:12 +03:00
struct drm_mm * mm = node - > mm ;
struct drm_mm_node * prev_node ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
BUG_ON ( node - > scanned_block | | node - > scanned_prev_free
| | node - > scanned_next_free ) ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
prev_node =
list_entry ( node - > node_list . prev , struct drm_mm_node , node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
if ( node - > hole_follows ) {
BUG_ON ( drm_mm_hole_node_start ( node )
= = drm_mm_hole_node_end ( node ) ) ;
list_del ( & node - > hole_stack ) ;
} else
BUG_ON ( drm_mm_hole_node_start ( node )
! = drm_mm_hole_node_end ( node ) ) ;
2009-04-08 19:11:16 +04:00
2011-02-18 19:59:12 +03:00
if ( ! prev_node - > hole_follows ) {
prev_node - > hole_follows = 1 ;
list_add ( & prev_node - > hole_stack , & mm - > hole_stack ) ;
} else
list_move ( & prev_node - > hole_stack , & mm - > hole_stack ) ;
list_del ( & node - > node_list ) ;
2011-02-18 19:59:14 +03:00
node - > allocated = 0 ;
}
EXPORT_SYMBOL ( drm_mm_remove_node ) ;
/*
* Remove a memory node from the allocator and free the allocated struct
* drm_mm_node . Only to be used on a struct drm_mm_node obtained by one of the
* drm_mm_get_block functions .
*/
void drm_mm_put_block ( struct drm_mm_node * node )
{
struct drm_mm * mm = node - > mm ;
drm_mm_remove_node ( node ) ;
2011-02-18 19:59:12 +03:00
spin_lock ( & mm - > unused_lock ) ;
if ( mm - > num_unused < MM_UNUSED_TARGET ) {
list_add ( & node - > node_list , & mm - > unused_nodes ) ;
+ + mm - > num_unused ;
} else
kfree ( node ) ;
spin_unlock ( & mm - > unused_lock ) ;
}
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_put_block ) ;
2006-08-07 15:30:28 +04:00
2010-08-26 23:44:17 +04:00
static int check_free_hole ( unsigned long start , unsigned long end ,
unsigned long size , unsigned alignment )
2010-07-02 18:02:15 +04:00
{
unsigned wasted = 0 ;
2010-08-26 23:44:17 +04:00
if ( end - start < size )
2010-07-02 18:02:15 +04:00
return 0 ;
if ( alignment ) {
2010-08-26 23:44:17 +04:00
unsigned tmp = start % alignment ;
2010-07-02 18:02:15 +04:00
if ( tmp )
wasted = alignment - tmp ;
}
2010-08-26 23:44:17 +04:00
if ( end > = start + size + wasted ) {
2010-07-02 18:02:15 +04:00
return 1 ;
}
return 0 ;
}
2009-04-08 19:11:16 +04:00
struct drm_mm_node * drm_mm_search_free ( const struct drm_mm * mm ,
unsigned long size ,
unsigned alignment , int best_match )
2006-08-07 15:30:28 +04:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
struct drm_mm_node * best ;
2006-08-07 15:30:28 +04:00
unsigned long best_size ;
2010-07-02 18:02:16 +04:00
BUG_ON ( mm - > scanned_blocks ) ;
2006-08-07 15:30:28 +04:00
best = NULL ;
best_size = ~ 0UL ;
2011-02-18 19:59:12 +03:00
list_for_each_entry ( entry , & mm - > hole_stack , hole_stack ) {
BUG_ON ( ! entry - > hole_follows ) ;
if ( ! check_free_hole ( drm_mm_hole_node_start ( entry ) ,
drm_mm_hole_node_end ( entry ) ,
2010-08-26 23:44:17 +04:00
size , alignment ) )
2007-01-08 14:25:47 +03:00
continue ;
2010-07-02 18:02:15 +04:00
if ( ! best_match )
return entry ;
2007-01-08 14:25:47 +03:00
2010-07-02 18:02:15 +04:00
if ( entry - > size < best_size ) {
best = entry ;
best_size = entry - > size ;
2006-08-07 15:30:28 +04:00
}
}
return best ;
}
2009-04-08 19:11:16 +04:00
EXPORT_SYMBOL ( drm_mm_search_free ) ;
2006-08-07 15:30:28 +04:00
2009-12-07 17:52:56 +03:00
struct drm_mm_node * drm_mm_search_free_in_range ( const struct drm_mm * mm ,
unsigned long size ,
unsigned alignment ,
unsigned long start ,
unsigned long end ,
int best_match )
{
struct drm_mm_node * entry ;
struct drm_mm_node * best ;
unsigned long best_size ;
2010-07-02 18:02:16 +04:00
BUG_ON ( mm - > scanned_blocks ) ;
2009-12-07 17:52:56 +03:00
best = NULL ;
best_size = ~ 0UL ;
2011-02-18 19:59:12 +03:00
list_for_each_entry ( entry , & mm - > hole_stack , hole_stack ) {
unsigned long adj_start = drm_mm_hole_node_start ( entry ) < start ?
start : drm_mm_hole_node_start ( entry ) ;
unsigned long adj_end = drm_mm_hole_node_end ( entry ) > end ?
end : drm_mm_hole_node_end ( entry ) ;
2009-12-07 17:52:56 +03:00
2011-02-18 19:59:12 +03:00
BUG_ON ( ! entry - > hole_follows ) ;
2010-08-26 23:44:17 +04:00
if ( ! check_free_hole ( adj_start , adj_end , size , alignment ) )
2009-12-07 17:52:56 +03:00
continue ;
2010-07-02 18:02:15 +04:00
if ( ! best_match )
return entry ;
2009-12-07 17:52:56 +03:00
2010-07-02 18:02:15 +04:00
if ( entry - > size < best_size ) {
best = entry ;
best_size = entry - > size ;
2009-12-07 17:52:56 +03:00
}
}
return best ;
}
EXPORT_SYMBOL ( drm_mm_search_free_in_range ) ;
2011-02-18 19:59:14 +03:00
/**
* Moves an allocation . To be used with embedded struct drm_mm_node .
*/
void drm_mm_replace_node ( struct drm_mm_node * old , struct drm_mm_node * new )
{
list_replace ( & old - > node_list , & new - > node_list ) ;
2011-05-07 01:47:53 +04:00
list_replace ( & old - > hole_stack , & new - > hole_stack ) ;
2011-02-18 19:59:14 +03:00
new - > hole_follows = old - > hole_follows ;
new - > mm = old - > mm ;
new - > start = old - > start ;
new - > size = old - > size ;
old - > allocated = 0 ;
new - > allocated = 1 ;
}
EXPORT_SYMBOL ( drm_mm_replace_node ) ;
2010-07-02 18:02:16 +04:00
/**
* Initializa lru scanning .
*
* This simply sets up the scanning routines with the parameters for the desired
* hole .
*
* Warning : As long as the scan list is non - empty , no other operations than
* adding / removing nodes to / from the scan list are allowed .
*/
void drm_mm_init_scan ( struct drm_mm * mm , unsigned long size ,
unsigned alignment )
{
mm - > scan_alignment = alignment ;
mm - > scan_size = size ;
mm - > scanned_blocks = 0 ;
mm - > scan_hit_start = 0 ;
mm - > scan_hit_size = 0 ;
2010-09-16 17:13:11 +04:00
mm - > scan_check_range = 0 ;
2011-02-18 19:59:15 +03:00
mm - > prev_scanned_node = NULL ;
2010-07-02 18:02:16 +04:00
}
EXPORT_SYMBOL ( drm_mm_init_scan ) ;
2010-09-16 17:13:11 +04:00
/**
* Initializa lru scanning .
*
* This simply sets up the scanning routines with the parameters for the desired
* hole . This version is for range - restricted scans .
*
* Warning : As long as the scan list is non - empty , no other operations than
* adding / removing nodes to / from the scan list are allowed .
*/
void drm_mm_init_scan_with_range ( struct drm_mm * mm , unsigned long size ,
unsigned alignment ,
unsigned long start ,
unsigned long end )
{
mm - > scan_alignment = alignment ;
mm - > scan_size = size ;
mm - > scanned_blocks = 0 ;
mm - > scan_hit_start = 0 ;
mm - > scan_hit_size = 0 ;
mm - > scan_start = start ;
mm - > scan_end = end ;
mm - > scan_check_range = 1 ;
2011-02-18 19:59:15 +03:00
mm - > prev_scanned_node = NULL ;
2010-09-16 17:13:11 +04:00
}
EXPORT_SYMBOL ( drm_mm_init_scan_with_range ) ;
2010-07-02 18:02:16 +04:00
/**
* Add a node to the scan list that might be freed to make space for the desired
* hole .
*
* Returns non - zero , if a hole has been found , zero otherwise .
*/
int drm_mm_scan_add_block ( struct drm_mm_node * node )
{
struct drm_mm * mm = node - > mm ;
2011-02-18 19:59:12 +03:00
struct drm_mm_node * prev_node ;
unsigned long hole_start , hole_end ;
2010-09-16 17:13:11 +04:00
unsigned long adj_start ;
unsigned long adj_end ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks + + ;
2011-02-18 19:59:12 +03:00
BUG_ON ( node - > scanned_block ) ;
2010-07-02 18:02:16 +04:00
node - > scanned_block = 1 ;
2011-02-18 19:59:12 +03:00
prev_node = list_entry ( node - > node_list . prev , struct drm_mm_node ,
node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
node - > scanned_preceeds_hole = prev_node - > hole_follows ;
prev_node - > hole_follows = 1 ;
list_del ( & node - > node_list ) ;
node - > node_list . prev = & prev_node - > node_list ;
2011-02-18 19:59:15 +03:00
node - > node_list . next = & mm - > prev_scanned_node - > node_list ;
mm - > prev_scanned_node = node ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
hole_start = drm_mm_hole_node_start ( prev_node ) ;
hole_end = drm_mm_hole_node_end ( prev_node ) ;
2010-09-16 17:13:11 +04:00
if ( mm - > scan_check_range ) {
2011-02-18 19:59:12 +03:00
adj_start = hole_start < mm - > scan_start ?
mm - > scan_start : hole_start ;
adj_end = hole_end > mm - > scan_end ?
mm - > scan_end : hole_end ;
2010-09-16 17:13:11 +04:00
} else {
2011-02-18 19:59:12 +03:00
adj_start = hole_start ;
adj_end = hole_end ;
2010-09-16 17:13:11 +04:00
}
if ( check_free_hole ( adj_start , adj_end ,
2010-08-26 23:44:17 +04:00
mm - > scan_size , mm - > scan_alignment ) ) {
2011-02-18 19:59:12 +03:00
mm - > scan_hit_start = hole_start ;
mm - > scan_hit_size = hole_end ;
2010-07-02 18:02:16 +04:00
return 1 ;
}
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_scan_add_block ) ;
/**
* Remove a node from the scan list .
*
* Nodes _must_ be removed in the exact same order from the scan list as they
* have been added , otherwise the internal state of the memory manager will be
* corrupted .
*
* When the scan list is empty , the selected memory nodes can be freed . An
2011-03-31 05:57:33 +04:00
* immediately following drm_mm_search_free with best_match = 0 will then return
2010-07-02 18:02:16 +04:00
* the just freed block ( because its at the top of the free_stack list ) .
*
* Returns one if this block should be evicted , zero otherwise . Will always
* return zero when no hole has been found .
*/
int drm_mm_scan_remove_block ( struct drm_mm_node * node )
{
struct drm_mm * mm = node - > mm ;
2011-02-18 19:59:12 +03:00
struct drm_mm_node * prev_node ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks - - ;
BUG_ON ( ! node - > scanned_block ) ;
node - > scanned_block = 0 ;
2011-02-18 19:59:12 +03:00
prev_node = list_entry ( node - > node_list . prev , struct drm_mm_node ,
node_list ) ;
2010-07-02 18:02:16 +04:00
2011-02-18 19:59:12 +03:00
prev_node - > hole_follows = node - > scanned_preceeds_hole ;
INIT_LIST_HEAD ( & node - > node_list ) ;
list_add ( & node - > node_list , & prev_node - > node_list ) ;
2010-07-02 18:02:16 +04:00
/* Only need to check for containement because start&size for the
* complete resulting free block ( not just the desired part ) is
* stored . */
if ( node - > start > = mm - > scan_hit_start & &
node - > start + node - > size
< = mm - > scan_hit_start + mm - > scan_hit_size ) {
return 1 ;
}
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_scan_remove_block ) ;
2007-07-11 10:53:40 +04:00
int drm_mm_clean ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:12 +03:00
struct list_head * head = & mm - > head_node . node_list ;
2006-08-07 15:30:28 +04:00
2007-01-08 14:25:47 +03:00
return ( head - > next - > next = = head ) ;
}
2009-04-08 19:11:16 +04:00
EXPORT_SYMBOL ( drm_mm_clean ) ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
int drm_mm_init ( struct drm_mm * mm , unsigned long start , unsigned long size )
2007-01-08 14:25:47 +03:00
{
2011-02-18 19:59:12 +03:00
INIT_LIST_HEAD ( & mm - > hole_stack ) ;
2009-04-08 19:11:16 +04:00
INIT_LIST_HEAD ( & mm - > unused_nodes ) ;
mm - > num_unused = 0 ;
2010-07-02 18:02:16 +04:00
mm - > scanned_blocks = 0 ;
2009-04-08 19:11:16 +04:00
spin_lock_init ( & mm - > unused_lock ) ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD ( & mm - > head_node . node_list ) ;
INIT_LIST_HEAD ( & mm - > head_node . hole_stack ) ;
mm - > head_node . hole_follows = 1 ;
mm - > head_node . scanned_block = 0 ;
mm - > head_node . scanned_prev_free = 0 ;
mm - > head_node . scanned_next_free = 0 ;
mm - > head_node . mm = mm ;
mm - > head_node . start = start + size ;
mm - > head_node . size = start - mm - > head_node . start ;
list_add_tail ( & mm - > head_node . hole_stack , & mm - > hole_stack ) ;
return 0 ;
2006-08-07 15:30:28 +04:00
}
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_init ) ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
void drm_mm_takedown ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2011-02-18 19:59:12 +03:00
struct drm_mm_node * entry , * next ;
2006-08-07 15:30:28 +04:00
2011-02-18 19:59:12 +03:00
if ( ! list_empty ( & mm - > head_node . node_list ) ) {
2006-08-07 15:30:28 +04:00
DRM_ERROR ( " Memory manager not clean. Delaying takedown \n " ) ;
return ;
}
2009-04-08 19:11:16 +04:00
spin_lock ( & mm - > unused_lock ) ;
2011-02-18 19:59:12 +03:00
list_for_each_entry_safe ( entry , next , & mm - > unused_nodes , node_list ) {
list_del ( & entry - > node_list ) ;
2009-04-08 19:11:16 +04:00
kfree ( entry ) ;
- - mm - > num_unused ;
}
spin_unlock ( & mm - > unused_lock ) ;
2006-08-07 15:30:28 +04:00
2009-04-08 19:11:16 +04:00
BUG_ON ( mm - > num_unused ! = 0 ) ;
2006-08-07 15:30:28 +04:00
}
2008-11-08 01:05:41 +03:00
EXPORT_SYMBOL ( drm_mm_takedown ) ;
2009-08-26 07:13:37 +04:00
2009-12-09 23:55:09 +03:00
void drm_mm_debug_table ( struct drm_mm * mm , const char * prefix )
{
struct drm_mm_node * entry ;
2011-02-18 19:59:12 +03:00
unsigned long total_used = 0 , total_free = 0 , total = 0 ;
unsigned long hole_start , hole_end , hole_size ;
hole_start = drm_mm_hole_node_start ( & mm - > head_node ) ;
hole_end = drm_mm_hole_node_end ( & mm - > head_node ) ;
hole_size = hole_end - hole_start ;
if ( hole_size )
printk ( KERN_DEBUG " %s 0x%08lx-0x%08lx: %8lu: free \n " ,
prefix , hole_start , hole_end ,
hole_size ) ;
total_free + = hole_size ;
drm_mm_for_each_node ( entry , mm ) {
printk ( KERN_DEBUG " %s 0x%08lx-0x%08lx: %8lu: used \n " ,
2009-12-09 23:55:09 +03:00
prefix , entry - > start , entry - > start + entry - > size ,
2011-02-18 19:59:12 +03:00
entry - > size ) ;
total_used + = entry - > size ;
if ( entry - > hole_follows ) {
hole_start = drm_mm_hole_node_start ( entry ) ;
hole_end = drm_mm_hole_node_end ( entry ) ;
hole_size = hole_end - hole_start ;
printk ( KERN_DEBUG " %s 0x%08lx-0x%08lx: %8lu: free \n " ,
prefix , hole_start , hole_end ,
hole_size ) ;
total_free + = hole_size ;
}
2009-12-09 23:55:09 +03:00
}
2011-02-18 19:59:12 +03:00
total = total_free + total_used ;
printk ( KERN_DEBUG " %s total: %lu, used %lu free %lu \n " , prefix , total ,
2009-12-09 23:55:09 +03:00
total_used , total_free ) ;
}
EXPORT_SYMBOL ( drm_mm_debug_table ) ;
2009-08-26 07:13:37 +04:00
# if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table ( struct seq_file * m , struct drm_mm * mm )
{
struct drm_mm_node * entry ;
2011-02-18 19:59:12 +03:00
unsigned long total_used = 0 , total_free = 0 , total = 0 ;
unsigned long hole_start , hole_end , hole_size ;
hole_start = drm_mm_hole_node_start ( & mm - > head_node ) ;
hole_end = drm_mm_hole_node_end ( & mm - > head_node ) ;
hole_size = hole_end - hole_start ;
if ( hole_size )
seq_printf ( m , " 0x%08lx-0x%08lx: 0x%08lx: free \n " ,
hole_start , hole_end , hole_size ) ;
total_free + = hole_size ;
drm_mm_for_each_node ( entry , mm ) {
seq_printf ( m , " 0x%08lx-0x%08lx: 0x%08lx: used \n " ,
entry - > start , entry - > start + entry - > size ,
entry - > size ) ;
total_used + = entry - > size ;
if ( entry - > hole_follows ) {
2011-05-07 01:47:53 +04:00
hole_start = drm_mm_hole_node_start ( entry ) ;
hole_end = drm_mm_hole_node_end ( entry ) ;
2011-02-18 19:59:12 +03:00
hole_size = hole_end - hole_start ;
seq_printf ( m , " 0x%08lx-0x%08lx: 0x%08lx: free \n " ,
hole_start , hole_end , hole_size ) ;
total_free + = hole_size ;
}
2009-08-26 07:13:37 +04:00
}
2011-02-18 19:59:12 +03:00
total = total_free + total_used ;
seq_printf ( m , " total: %lu, used %lu free %lu \n " , total , total_used , total_free ) ;
2009-08-26 07:13:37 +04:00
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_dump_table ) ;
# endif