2010-08-05 10:48:18 +10:00
/**************************************************************************
*
2010-10-29 10:46:45 +02:00
* Copyright ( c ) 2007 - 2010 VMware , Inc . , Palo Alto , CA . , USA
2010-08-05 10:48:18 +10:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_module.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
# include <drm/drm_mm.h>
2010-08-05 10:48:18 +10:00
# include <linux/slab.h>
2010-10-29 10:46:45 +02:00
# include <linux/spinlock.h>
2010-08-05 10:48:18 +10:00
# include <linux/module.h>
2010-10-29 10:46:45 +02:00
/**
* Currently we use a spinlock for the lock , but a mutex * may * be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns .
*/
struct ttm_range_manager {
struct drm_mm mm ;
spinlock_t lock ;
} ;
2010-08-05 10:48:18 +10:00
static int ttm_bo_man_get_node ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem )
{
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman = ( struct ttm_range_manager * ) man - > priv ;
struct drm_mm * mm = & rman - > mm ;
2010-08-05 10:48:18 +10:00
struct drm_mm_node * node = NULL ;
unsigned long lpfn ;
int ret ;
lpfn = placement - > lpfn ;
if ( ! lpfn )
lpfn = man - > size ;
do {
ret = drm_mm_pre_get ( mm ) ;
if ( unlikely ( ret ) )
return ret ;
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
node = drm_mm_search_free_in_range ( mm ,
mem - > num_pages , mem - > page_alignment ,
2013-07-27 13:36:27 +02:00
placement - > fpfn , lpfn ,
DRM_MM_SEARCH_BEST ) ;
2010-08-05 10:48:18 +10:00
if ( unlikely ( node = = NULL ) ) {
2010-10-29 10:46:45 +02:00
spin_unlock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
return 0 ;
}
node = drm_mm_get_block_atomic_range ( node , mem - > num_pages ,
2010-10-29 10:46:45 +02:00
mem - > page_alignment ,
placement - > fpfn ,
lpfn ) ;
spin_unlock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
} while ( node = = NULL ) ;
mem - > mm_node = node ;
mem - > start = node - > start ;
return 0 ;
}
static void ttm_bo_man_put_node ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem )
{
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman = ( struct ttm_range_manager * ) man - > priv ;
2010-08-05 10:48:18 +10:00
if ( mem - > mm_node ) {
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
drm_mm_put_block ( mem - > mm_node ) ;
2010-10-29 10:46:45 +02:00
spin_unlock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
mem - > mm_node = NULL ;
}
}
static int ttm_bo_man_init ( struct ttm_mem_type_manager * man ,
unsigned long p_size )
{
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman ;
2010-08-05 10:48:18 +10:00
2010-10-29 10:46:45 +02:00
rman = kzalloc ( sizeof ( * rman ) , GFP_KERNEL ) ;
if ( ! rman )
2010-08-05 10:48:18 +10:00
return - ENOMEM ;
2013-07-01 20:32:58 +02:00
drm_mm_init ( & rman - > mm , 0 , p_size ) ;
2010-10-29 10:46:45 +02:00
spin_lock_init ( & rman - > lock ) ;
man - > priv = rman ;
2010-08-05 10:48:18 +10:00
return 0 ;
}
static int ttm_bo_man_takedown ( struct ttm_mem_type_manager * man )
{
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman = ( struct ttm_range_manager * ) man - > priv ;
struct drm_mm * mm = & rman - > mm ;
2010-08-05 10:48:18 +10:00
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
if ( drm_mm_clean ( mm ) ) {
drm_mm_takedown ( mm ) ;
2010-10-29 10:46:45 +02:00
spin_unlock ( & rman - > lock ) ;
kfree ( rman ) ;
2010-08-05 10:48:18 +10:00
man - > priv = NULL ;
2010-10-29 10:46:45 +02:00
return 0 ;
}
spin_unlock ( & rman - > lock ) ;
return - EBUSY ;
2010-08-05 10:48:18 +10:00
}
static void ttm_bo_man_debug ( struct ttm_mem_type_manager * man ,
const char * prefix )
{
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman = ( struct ttm_range_manager * ) man - > priv ;
2010-08-05 10:48:18 +10:00
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
drm_mm_debug_table ( & rman - > mm , prefix ) ;
spin_unlock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
}
const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
ttm_bo_man_init ,
ttm_bo_man_takedown ,
ttm_bo_man_get_node ,
ttm_bo_man_put_node ,
ttm_bo_man_debug
} ;
EXPORT_SYMBOL ( ttm_bo_manager_func ) ;