2006-08-07 15:30:28 +04:00
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics , Inc . , Bismarck , ND . , USA .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Generic simple memory manager implementation . Intended to be used as a base
* class implementation for more advanced memory managers .
*
* Note that the algorithm used is quite simple and there might be substantial
* performance gains if a smarter free list is implemented . Currently it is just an
* unordered stack of free regions . This could easily be improved if an RB - tree
* is used instead . At least if we expect heavy fragmentation .
*
* Aligned allocations can also see improvement .
*
* Authors :
2007-10-20 01:21:04 +04:00
* Thomas Hellström < thomas - at - tungstengraphics - dot - com >
2006-08-07 15:30:28 +04:00
*/
# include "drmP.h"
2009-04-08 19:11:16 +04:00
# include "drm_mm.h"
2007-01-08 14:25:47 +03:00
# include <linux/slab.h>
2009-04-08 19:11:16 +04:00
# define MM_UNUSED_TARGET 4
2007-07-11 10:53:40 +04:00
unsigned long drm_mm_tail_space ( struct drm_mm * mm )
2007-01-08 14:25:47 +03:00
{
struct list_head * tail_node ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
2007-01-08 14:25:47 +03:00
tail_node = mm - > ml_entry . prev ;
2007-07-11 10:53:40 +04:00
entry = list_entry ( tail_node , struct drm_mm_node , ml_entry ) ;
2007-01-08 14:25:47 +03:00
if ( ! entry - > free )
return 0 ;
return entry - > size ;
}
2007-07-11 10:53:40 +04:00
int drm_mm_remove_space_from_tail ( struct drm_mm * mm , unsigned long size )
2007-01-08 14:25:47 +03:00
{
struct list_head * tail_node ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
2007-01-08 14:25:47 +03:00
tail_node = mm - > ml_entry . prev ;
2007-07-11 10:53:40 +04:00
entry = list_entry ( tail_node , struct drm_mm_node , ml_entry ) ;
2007-01-08 14:25:47 +03:00
if ( ! entry - > free )
return - ENOMEM ;
if ( entry - > size < = size )
return - ENOMEM ;
entry - > size - = size ;
return 0 ;
}
2009-04-08 19:11:16 +04:00
static struct drm_mm_node * drm_mm_kmalloc ( struct drm_mm * mm , int atomic )
{
struct drm_mm_node * child ;
if ( atomic )
child = kmalloc ( sizeof ( * child ) , GFP_ATOMIC ) ;
else
child = kmalloc ( sizeof ( * child ) , GFP_KERNEL ) ;
if ( unlikely ( child = = NULL ) ) {
spin_lock ( & mm - > unused_lock ) ;
if ( list_empty ( & mm - > unused_nodes ) )
child = NULL ;
else {
child =
list_entry ( mm - > unused_nodes . next ,
struct drm_mm_node , fl_entry ) ;
list_del ( & child - > fl_entry ) ;
- - mm - > num_unused ;
}
spin_unlock ( & mm - > unused_lock ) ;
}
return child ;
}
int drm_mm_pre_get ( struct drm_mm * mm )
{
struct drm_mm_node * node ;
spin_lock ( & mm - > unused_lock ) ;
while ( mm - > num_unused < MM_UNUSED_TARGET ) {
spin_unlock ( & mm - > unused_lock ) ;
node = kmalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
spin_lock ( & mm - > unused_lock ) ;
if ( unlikely ( node = = NULL ) ) {
int ret = ( mm - > num_unused < 2 ) ? - ENOMEM : 0 ;
spin_unlock ( & mm - > unused_lock ) ;
return ret ;
}
+ + mm - > num_unused ;
list_add_tail ( & node - > fl_entry , & mm - > unused_nodes ) ;
}
spin_unlock ( & mm - > unused_lock ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_mm_pre_get ) ;
2007-01-08 14:25:47 +03:00
2007-07-11 10:53:40 +04:00
static int drm_mm_create_tail_node ( struct drm_mm * mm ,
2009-04-08 19:11:16 +04:00
unsigned long start ,
unsigned long size , int atomic )
2007-01-08 14:25:47 +03:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm_node * child ;
2007-01-08 14:25:47 +03:00
2009-04-08 19:11:16 +04:00
child = drm_mm_kmalloc ( mm , atomic ) ;
if ( unlikely ( child = = NULL ) )
2007-01-08 14:25:47 +03:00
return - ENOMEM ;
child - > free = 1 ;
child - > size = size ;
child - > start = start ;
child - > mm = mm ;
list_add_tail ( & child - > ml_entry , & mm - > ml_entry ) ;
list_add_tail ( & child - > fl_entry , & mm - > fl_entry ) ;
return 0 ;
}
2009-04-08 19:11:16 +04:00
int drm_mm_add_space_to_tail ( struct drm_mm * mm , unsigned long size , int atomic )
2007-01-08 14:25:47 +03:00
{
struct list_head * tail_node ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
2007-01-08 14:25:47 +03:00
tail_node = mm - > ml_entry . prev ;
2007-07-11 10:53:40 +04:00
entry = list_entry ( tail_node , struct drm_mm_node , ml_entry ) ;
2007-01-08 14:25:47 +03:00
if ( ! entry - > free ) {
2009-04-08 19:11:16 +04:00
return drm_mm_create_tail_node ( mm , entry - > start + entry - > size ,
size , atomic ) ;
2007-01-08 14:25:47 +03:00
}
entry - > size + = size ;
return 0 ;
}
2007-07-11 10:53:40 +04:00
static struct drm_mm_node * drm_mm_split_at_start ( struct drm_mm_node * parent ,
2009-04-08 19:11:16 +04:00
unsigned long size ,
int atomic )
2007-01-08 14:25:47 +03:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm_node * child ;
2007-01-08 14:25:47 +03:00
2009-04-08 19:11:16 +04:00
child = drm_mm_kmalloc ( parent - > mm , atomic ) ;
if ( unlikely ( child = = NULL ) )
2007-01-08 14:25:47 +03:00
return NULL ;
INIT_LIST_HEAD ( & child - > fl_entry ) ;
child - > free = 0 ;
child - > size = size ;
child - > start = parent - > start ;
child - > mm = parent - > mm ;
list_add_tail ( & child - > ml_entry , & parent - > ml_entry ) ;
INIT_LIST_HEAD ( & child - > fl_entry ) ;
parent - > size - = size ;
parent - > start + = size ;
return child ;
}
2006-08-07 15:30:28 +04:00
2009-05-22 17:14:22 +04:00
struct drm_mm_node * drm_mm_get_block ( struct drm_mm_node * node ,
unsigned long size , unsigned alignment )
2006-08-07 15:30:28 +04:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm_node * align_splitoff = NULL ;
2007-01-08 14:25:47 +03:00
unsigned tmp = 0 ;
2006-08-07 15:30:28 +04:00
if ( alignment )
2009-05-22 17:14:22 +04:00
tmp = node - > start % alignment ;
2007-01-08 14:25:47 +03:00
if ( tmp ) {
2009-04-08 19:11:16 +04:00
align_splitoff =
2009-05-22 17:14:22 +04:00
drm_mm_split_at_start ( node , alignment - tmp , 0 ) ;
2009-04-08 19:11:16 +04:00
if ( unlikely ( align_splitoff = = NULL ) )
2007-01-08 14:25:47 +03:00
return NULL ;
}
2006-08-07 15:30:28 +04:00
2009-05-22 17:14:22 +04:00
if ( node - > size = = size ) {
list_del_init ( & node - > fl_entry ) ;
node - > free = 0 ;
2006-08-07 15:30:28 +04:00
} else {
2009-05-22 17:14:22 +04:00
node = drm_mm_split_at_start ( node , size , 0 ) ;
2007-01-08 14:25:47 +03:00
}
2006-08-07 15:30:28 +04:00
2007-01-08 14:25:47 +03:00
if ( align_splitoff )
drm_mm_put_block ( align_splitoff ) ;
2006-08-07 15:30:28 +04:00
2009-05-22 17:14:22 +04:00
return node ;
2006-08-07 15:30:28 +04:00
}
2009-04-08 19:11:16 +04:00
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_get_block ) ;
2006-08-07 15:30:28 +04:00
2009-04-08 19:11:16 +04:00
struct drm_mm_node * drm_mm_get_block_atomic ( struct drm_mm_node * parent ,
unsigned long size ,
unsigned alignment )
{
struct drm_mm_node * align_splitoff = NULL ;
struct drm_mm_node * child ;
unsigned tmp = 0 ;
if ( alignment )
tmp = parent - > start % alignment ;
if ( tmp ) {
align_splitoff =
drm_mm_split_at_start ( parent , alignment - tmp , 1 ) ;
if ( unlikely ( align_splitoff = = NULL ) )
return NULL ;
}
if ( parent - > size = = size ) {
list_del_init ( & parent - > fl_entry ) ;
parent - > free = 0 ;
return parent ;
} else {
child = drm_mm_split_at_start ( parent , size , 1 ) ;
}
if ( align_splitoff )
drm_mm_put_block ( align_splitoff ) ;
return child ;
}
EXPORT_SYMBOL ( drm_mm_get_block_atomic ) ;
2006-08-07 15:30:28 +04:00
/*
* Put a block . Merge with the previous and / or next block if they are free .
* Otherwise add to the free stack .
*/
2009-04-08 19:11:16 +04:00
void drm_mm_put_block ( struct drm_mm_node * cur )
2006-08-07 15:30:28 +04:00
{
2007-07-11 10:53:40 +04:00
struct drm_mm * mm = cur - > mm ;
2006-08-07 15:30:28 +04:00
struct list_head * cur_head = & cur - > ml_entry ;
2007-01-08 14:25:47 +03:00
struct list_head * root_head = & mm - > ml_entry ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * prev_node = NULL ;
struct drm_mm_node * next_node ;
2006-08-07 15:30:28 +04:00
2006-08-14 05:35:15 +04:00
int merged = 0 ;
2006-08-07 15:30:28 +04:00
if ( cur_head - > prev ! = root_head ) {
2009-04-08 19:11:16 +04:00
prev_node =
list_entry ( cur_head - > prev , struct drm_mm_node , ml_entry ) ;
2006-08-07 15:30:28 +04:00
if ( prev_node - > free ) {
prev_node - > size + = cur - > size ;
2006-08-14 05:35:15 +04:00
merged = 1 ;
2006-08-07 15:30:28 +04:00
}
}
if ( cur_head - > next ! = root_head ) {
2009-04-08 19:11:16 +04:00
next_node =
list_entry ( cur_head - > next , struct drm_mm_node , ml_entry ) ;
2006-08-07 15:30:28 +04:00
if ( next_node - > free ) {
if ( merged ) {
prev_node - > size + = next_node - > size ;
list_del ( & next_node - > ml_entry ) ;
list_del ( & next_node - > fl_entry ) ;
2009-04-08 19:11:16 +04:00
if ( mm - > num_unused < MM_UNUSED_TARGET ) {
list_add ( & next_node - > fl_entry ,
& mm - > unused_nodes ) ;
+ + mm - > num_unused ;
} else
kfree ( next_node ) ;
2006-08-07 15:30:28 +04:00
} else {
next_node - > size + = cur - > size ;
next_node - > start = cur - > start ;
2006-08-14 05:35:15 +04:00
merged = 1 ;
2006-08-07 15:30:28 +04:00
}
}
}
if ( ! merged ) {
2006-08-14 05:35:15 +04:00
cur - > free = 1 ;
2007-01-08 14:25:47 +03:00
list_add ( & cur - > fl_entry , & mm - > fl_entry ) ;
2006-08-07 15:30:28 +04:00
} else {
list_del ( & cur - > ml_entry ) ;
2009-04-08 19:11:16 +04:00
if ( mm - > num_unused < MM_UNUSED_TARGET ) {
list_add ( & cur - > fl_entry , & mm - > unused_nodes ) ;
+ + mm - > num_unused ;
} else
kfree ( cur ) ;
2006-08-07 15:30:28 +04:00
}
}
2009-04-08 19:11:16 +04:00
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_put_block ) ;
2006-08-07 15:30:28 +04:00
2009-04-08 19:11:16 +04:00
struct drm_mm_node * drm_mm_search_free ( const struct drm_mm * mm ,
unsigned long size ,
unsigned alignment , int best_match )
2006-08-07 15:30:28 +04:00
{
struct list_head * list ;
2007-01-08 14:25:47 +03:00
const struct list_head * free_stack = & mm - > fl_entry ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
struct drm_mm_node * best ;
2006-08-07 15:30:28 +04:00
unsigned long best_size ;
2007-01-08 14:25:47 +03:00
unsigned wasted ;
2006-08-07 15:30:28 +04:00
best = NULL ;
best_size = ~ 0UL ;
list_for_each ( list , free_stack ) {
2007-07-11 10:53:40 +04:00
entry = list_entry ( list , struct drm_mm_node , fl_entry ) ;
2007-01-08 14:25:47 +03:00
wasted = 0 ;
if ( entry - > size < size )
continue ;
if ( alignment ) {
register unsigned tmp = entry - > start % alignment ;
if ( tmp )
wasted + = alignment - tmp ;
}
if ( entry - > size > = size + wasted ) {
2006-08-07 15:30:28 +04:00
if ( ! best_match )
return entry ;
if ( size < best_size ) {
best = entry ;
best_size = entry - > size ;
}
}
}
return best ;
}
2009-04-08 19:11:16 +04:00
EXPORT_SYMBOL ( drm_mm_search_free ) ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
int drm_mm_clean ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2007-01-08 14:25:47 +03:00
struct list_head * head = & mm - > ml_entry ;
2006-08-07 15:30:28 +04:00
2007-01-08 14:25:47 +03:00
return ( head - > next - > next = = head ) ;
}
2009-04-08 19:11:16 +04:00
EXPORT_SYMBOL ( drm_mm_clean ) ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
int drm_mm_init ( struct drm_mm * mm , unsigned long start , unsigned long size )
2007-01-08 14:25:47 +03:00
{
INIT_LIST_HEAD ( & mm - > ml_entry ) ;
INIT_LIST_HEAD ( & mm - > fl_entry ) ;
2009-04-08 19:11:16 +04:00
INIT_LIST_HEAD ( & mm - > unused_nodes ) ;
mm - > num_unused = 0 ;
spin_lock_init ( & mm - > unused_lock ) ;
2006-08-07 15:30:28 +04:00
2009-04-08 19:11:16 +04:00
return drm_mm_create_tail_node ( mm , start , size , 0 ) ;
2006-08-07 15:30:28 +04:00
}
2008-07-30 23:06:12 +04:00
EXPORT_SYMBOL ( drm_mm_init ) ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
void drm_mm_takedown ( struct drm_mm * mm )
2006-08-07 15:30:28 +04:00
{
2007-01-08 14:25:47 +03:00
struct list_head * bnode = mm - > fl_entry . next ;
2007-07-11 10:53:40 +04:00
struct drm_mm_node * entry ;
2009-04-08 19:11:16 +04:00
struct drm_mm_node * next ;
2006-08-07 15:30:28 +04:00
2007-07-11 10:53:40 +04:00
entry = list_entry ( bnode , struct drm_mm_node , fl_entry ) ;
2006-08-07 15:30:28 +04:00
2007-01-08 14:25:47 +03:00
if ( entry - > ml_entry . next ! = & mm - > ml_entry | |
entry - > fl_entry . next ! = & mm - > fl_entry ) {
2006-08-07 15:30:28 +04:00
DRM_ERROR ( " Memory manager not clean. Delaying takedown \n " ) ;
return ;
}
list_del ( & entry - > fl_entry ) ;
list_del ( & entry - > ml_entry ) ;
2009-04-08 19:11:16 +04:00
kfree ( entry ) ;
spin_lock ( & mm - > unused_lock ) ;
list_for_each_entry_safe ( entry , next , & mm - > unused_nodes , fl_entry ) {
list_del ( & entry - > fl_entry ) ;
kfree ( entry ) ;
- - mm - > num_unused ;
}
spin_unlock ( & mm - > unused_lock ) ;
2006-08-07 15:30:28 +04:00
2009-04-08 19:11:16 +04:00
BUG_ON ( mm - > num_unused ! = 0 ) ;
2006-08-07 15:30:28 +04:00
}
2008-11-08 01:05:41 +03:00
EXPORT_SYMBOL ( drm_mm_takedown ) ;