2018-05-02 15:46:21 +02:00
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2010-08-05 10:48:18 +10:00
/**************************************************************************
*
2010-10-29 10:46:45 +02:00
* Copyright ( c ) 2007 - 2010 VMware , Inc . , Palo Alto , CA . , USA
2010-08-05 10:48:18 +10:00
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_module.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
# include <drm/drm_mm.h>
2010-08-05 10:48:18 +10:00
# include <linux/slab.h>
2010-10-29 10:46:45 +02:00
# include <linux/spinlock.h>
2010-08-05 10:48:18 +10:00
# include <linux/module.h>
2010-10-29 10:46:45 +02:00
/**
* Currently we use a spinlock for the lock , but a mutex * may * be
* more appropriate to reduce scheduling latency if the range manager
* ends up with very fragmented allocation patterns .
*/
struct ttm_range_manager {
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager manager ;
2010-10-29 10:46:45 +02:00
struct drm_mm mm ;
spinlock_t lock ;
} ;
2020-08-04 12:56:31 +10:00
static inline struct ttm_range_manager * to_range_manager ( struct ttm_resource_manager * man )
2020-08-04 12:56:20 +10:00
{
return container_of ( man , struct ttm_range_manager , manager ) ;
}
2020-08-03 15:06:38 +02:00
static int ttm_range_man_alloc ( struct ttm_resource_manager * man ,
struct ttm_buffer_object * bo ,
const struct ttm_place * place ,
struct ttm_resource * mem )
2010-08-05 10:48:18 +10:00
{
2020-08-04 12:56:20 +10:00
struct ttm_range_manager * rman = to_range_manager ( man ) ;
2010-10-29 10:46:45 +02:00
struct drm_mm * mm = & rman - > mm ;
2017-02-02 21:04:38 +00:00
struct drm_mm_node * node ;
enum drm_mm_insert_mode mode ;
2010-08-05 10:48:18 +10:00
unsigned long lpfn ;
int ret ;
2014-08-27 13:16:04 +02:00
lpfn = place - > lpfn ;
2010-08-05 10:48:18 +10:00
if ( ! lpfn )
lpfn = man - > size ;
2013-07-27 13:37:59 +02:00
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return - ENOMEM ;
2017-02-02 21:04:38 +00:00
mode = DRM_MM_INSERT_BEST ;
if ( place - > flags & TTM_PL_FLAG_TOPDOWN )
mode = DRM_MM_INSERT_HIGH ;
2014-04-02 20:03:57 +03:00
2013-07-27 13:37:59 +02:00
spin_lock ( & rman - > lock ) ;
2017-02-02 21:04:38 +00:00
ret = drm_mm_insert_node_in_range ( mm , node ,
mem - > num_pages ,
2014-04-02 20:03:57 +03:00
mem - > page_alignment , 0 ,
2017-02-02 21:04:38 +00:00
place - > fpfn , lpfn , mode ) ;
2013-07-27 13:37:59 +02:00
spin_unlock ( & rman - > lock ) ;
if ( unlikely ( ret ) ) {
kfree ( node ) ;
} else {
mem - > mm_node = node ;
mem - > start = node - > start ;
}
2010-08-05 10:48:18 +10:00
2020-06-16 14:33:23 +02:00
return ret ;
2010-08-05 10:48:18 +10:00
}
2020-08-03 15:06:38 +02:00
static void ttm_range_man_free ( struct ttm_resource_manager * man ,
struct ttm_resource * mem )
2010-08-05 10:48:18 +10:00
{
2020-08-04 12:56:20 +10:00
struct ttm_range_manager * rman = to_range_manager ( man ) ;
2010-08-05 10:48:18 +10:00
if ( mem - > mm_node ) {
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
2013-07-27 13:37:59 +02:00
drm_mm_remove_node ( mem - > mm_node ) ;
2010-10-29 10:46:45 +02:00
spin_unlock ( & rman - > lock ) ;
2013-07-27 13:37:59 +02:00
kfree ( mem - > mm_node ) ;
2010-08-05 10:48:18 +10:00
mem - > mm_node = NULL ;
}
}
2020-08-04 12:56:31 +10:00
static const struct ttm_resource_manager_func ttm_range_manager_func ;
2020-08-04 12:55:56 +10:00
int ttm_range_man_init ( struct ttm_bo_device * bdev ,
2020-09-11 16:01:21 +02:00
unsigned type , bool use_tt ,
2020-08-04 12:55:56 +10:00
unsigned long p_size )
2010-08-05 10:48:18 +10:00
{
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager * man ;
2010-10-29 10:46:45 +02:00
struct ttm_range_manager * rman ;
2010-08-05 10:48:18 +10:00
2010-10-29 10:46:45 +02:00
rman = kzalloc ( sizeof ( * rman ) , GFP_KERNEL ) ;
if ( ! rman )
2010-08-05 10:48:18 +10:00
return - ENOMEM ;
2020-08-04 12:56:20 +10:00
man = & rman - > manager ;
man - > use_tt = use_tt ;
2020-08-04 12:56:30 +10:00
man - > func = & ttm_range_manager_func ;
2020-08-04 12:56:19 +10:00
2020-08-04 12:56:31 +10:00
ttm_resource_manager_init ( man , p_size ) ;
2020-08-04 12:56:19 +10:00
2013-07-01 20:32:58 +02:00
drm_mm_init ( & rman - > mm , 0 , p_size ) ;
2010-10-29 10:46:45 +02:00
spin_lock_init ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
2020-08-04 12:56:20 +10:00
ttm_set_driver_manager ( bdev , type , & rman - > manager ) ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_set_used ( man , true ) ;
2020-08-04 12:55:47 +10:00
return 0 ;
}
EXPORT_SYMBOL ( ttm_range_man_init ) ;
2020-08-04 12:55:59 +10:00
int ttm_range_man_fini ( struct ttm_bo_device * bdev ,
2020-08-04 12:56:19 +10:00
unsigned type )
2020-08-04 12:55:59 +10:00
{
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager * man = ttm_manager_type ( bdev , type ) ;
2020-08-04 12:56:20 +10:00
struct ttm_range_manager * rman = to_range_manager ( man ) ;
2020-08-04 12:56:07 +10:00
struct drm_mm * mm = & rman - > mm ;
2020-08-04 12:55:59 +10:00
int ret ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_set_used ( man , false ) ;
2020-08-04 12:55:59 +10:00
2020-10-01 15:21:00 +02:00
ret = ttm_resource_manager_evict_all ( bdev , man ) ;
2020-08-04 12:55:59 +10:00
if ( ret )
return ret ;
2020-08-04 12:56:07 +10:00
spin_lock ( & rman - > lock ) ;
drm_mm_clean ( mm ) ;
drm_mm_takedown ( mm ) ;
spin_unlock ( & rman - > lock ) ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_cleanup ( man ) ;
2020-08-04 12:56:20 +10:00
ttm_set_driver_manager ( bdev , type , NULL ) ;
kfree ( rman ) ;
2020-08-04 12:55:59 +10:00
return 0 ;
}
EXPORT_SYMBOL ( ttm_range_man_fini ) ;
2020-08-04 12:56:31 +10:00
static void ttm_range_man_debug ( struct ttm_resource_manager * man ,
2020-08-04 12:56:30 +10:00
struct drm_printer * printer )
2010-08-05 10:48:18 +10:00
{
2020-08-04 12:56:20 +10:00
struct ttm_range_manager * rman = to_range_manager ( man ) ;
2010-08-05 10:48:18 +10:00
2010-10-29 10:46:45 +02:00
spin_lock ( & rman - > lock ) ;
2017-08-07 11:13:41 +02:00
drm_mm_print ( & rman - > mm , printer ) ;
2010-10-29 10:46:45 +02:00
spin_unlock ( & rman - > lock ) ;
2010-08-05 10:48:18 +10:00
}
2020-08-04 12:56:31 +10:00
static const struct ttm_resource_manager_func ttm_range_manager_func = {
2020-08-03 15:06:38 +02:00
. alloc = ttm_range_man_alloc ,
. free = ttm_range_man_free ,
2020-08-04 12:56:30 +10:00
. debug = ttm_range_man_debug
2010-08-05 10:48:18 +10:00
} ;