2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2010-10-26 21:21:47 +02:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2007 - 2010 VMware , Inc . , Palo Alto , CA . , USA
2010-10-26 21:21:47 +02:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
# include "vmwgfx_drv.h"
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_module.h>
# include <drm/ttm/ttm_bo_driver.h>
# include <drm/ttm/ttm_placement.h>
2010-10-26 21:21:47 +02:00
# include <linux/idr.h>
# include <linux/spinlock.h>
# include <linux/kernel.h>
struct vmwgfx_gmrid_man {
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager manager ;
2010-10-26 21:21:47 +02:00
spinlock_t lock ;
struct ida gmr_ida ;
uint32_t max_gmr_ids ;
2011-08-31 07:42:53 +00:00
uint32_t max_gmr_pages ;
uint32_t used_gmr_pages ;
2010-10-26 21:21:47 +02:00
} ;
2020-08-04 12:56:31 +10:00
static struct vmwgfx_gmrid_man * to_gmrid_manager ( struct ttm_resource_manager * man )
2020-08-04 12:56:22 +10:00
{
return container_of ( man , struct vmwgfx_gmrid_man , manager ) ;
}
2020-08-04 12:56:31 +10:00
static int vmw_gmrid_man_get_node ( struct ttm_resource_manager * man ,
2010-10-26 21:21:47 +02:00
struct ttm_buffer_object * bo ,
2014-08-27 13:16:04 +02:00
const struct ttm_place * place ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * mem )
2010-10-26 21:21:47 +02:00
{
2020-08-04 12:56:22 +10:00
struct vmwgfx_gmrid_man * gman = to_gmrid_manager ( man ) ;
2010-10-26 21:21:47 +02:00
int id ;
2018-06-18 16:00:05 -04:00
id = ida_alloc_max ( & gman - > gmr_ida , gman - > max_gmr_ids - 1 , GFP_KERNEL ) ;
if ( id < 0 )
2019-02-28 10:29:54 -08:00
return ( id ! = - ENOMEM ? 0 : id ) ;
2018-06-18 16:00:05 -04:00
2011-08-31 07:42:53 +00:00
spin_lock ( & gman - > lock ) ;
if ( gman - > max_gmr_pages > 0 ) {
gman - > used_gmr_pages + = bo - > num_pages ;
if ( unlikely ( gman - > used_gmr_pages > gman - > max_gmr_pages ) )
2018-06-18 16:00:05 -04:00
goto nospace ;
2011-08-31 07:42:53 +00:00
}
2010-10-26 21:21:47 +02:00
2018-06-18 16:00:05 -04:00
mem - > mm_node = gman ;
mem - > start = id ;
mem - > num_pages = bo - > num_pages ;
2011-08-31 07:42:53 +00:00
spin_unlock ( & gman - > lock ) ;
return 0 ;
2010-10-26 21:21:47 +02:00
2018-06-18 16:00:05 -04:00
nospace :
2011-08-31 07:42:53 +00:00
gman - > used_gmr_pages - = bo - > num_pages ;
spin_unlock ( & gman - > lock ) ;
2018-06-18 16:00:05 -04:00
ida_free ( & gman - > gmr_ida , id ) ;
2020-06-16 14:33:23 +02:00
return - ENOSPC ;
2010-10-26 21:21:47 +02:00
}
2020-08-04 12:56:31 +10:00
static void vmw_gmrid_man_put_node ( struct ttm_resource_manager * man ,
2020-08-04 12:56:32 +10:00
struct ttm_resource * mem )
2010-10-26 21:21:47 +02:00
{
2020-08-04 12:56:22 +10:00
struct vmwgfx_gmrid_man * gman = to_gmrid_manager ( man ) ;
2010-10-26 21:21:47 +02:00
if ( mem - > mm_node ) {
2018-06-18 16:00:05 -04:00
ida_free ( & gman - > gmr_ida , mem - > start ) ;
2010-10-26 21:21:47 +02:00
spin_lock ( & gman - > lock ) ;
2011-08-31 07:42:53 +00:00
gman - > used_gmr_pages - = mem - > num_pages ;
2010-10-26 21:21:47 +02:00
spin_unlock ( & gman - > lock ) ;
mem - > mm_node = NULL ;
}
}
2020-08-04 12:56:31 +10:00
static const struct ttm_resource_manager_func vmw_gmrid_manager_func ;
2020-08-04 12:55:54 +10:00
int vmw_gmrid_man_init ( struct vmw_private * dev_priv , int type )
2010-10-26 21:21:47 +02:00
{
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager * man ;
2010-10-26 21:21:47 +02:00
struct vmwgfx_gmrid_man * gman =
kzalloc ( sizeof ( * gman ) , GFP_KERNEL ) ;
2016-11-08 17:30:31 +05:30
if ( unlikely ( ! gman ) )
2010-10-26 21:21:47 +02:00
return - ENOMEM ;
2020-08-04 12:56:22 +10:00
man = & gman - > manager ;
2020-08-04 12:55:54 +10:00
man - > func = & vmw_gmrid_manager_func ;
man - > available_caching = TTM_PL_FLAG_CACHED ;
man - > default_caching = TTM_PL_FLAG_CACHED ;
/* TODO: This is most likely not correct */
man - > use_tt = true ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_init ( man , 0 ) ;
2010-10-26 21:21:47 +02:00
spin_lock_init ( & gman - > lock ) ;
2011-08-31 07:42:53 +00:00
gman - > used_gmr_pages = 0 ;
2010-10-26 21:21:47 +02:00
ida_init ( & gman - > gmr_ida ) ;
2012-11-21 11:06:22 +01:00
2020-08-04 12:55:54 +10:00
switch ( type ) {
2012-11-21 11:06:22 +01:00
case VMW_PL_GMR :
gman - > max_gmr_ids = dev_priv - > max_gmr_ids ;
gman - > max_gmr_pages = dev_priv - > max_gmr_pages ;
break ;
case VMW_PL_MOB :
gman - > max_gmr_ids = VMWGFX_NUM_MOB ;
gman - > max_gmr_pages = dev_priv - > max_mob_pages ;
break ;
default :
BUG ( ) ;
}
2020-08-04 12:56:22 +10:00
ttm_set_driver_manager ( & dev_priv - > bdev , type , & gman - > manager ) ;
2020-08-04 12:56:31 +10:00
ttm_resource_manager_set_used ( man , true ) ;
2010-10-26 21:21:47 +02:00
return 0 ;
}
2020-08-04 12:56:06 +10:00
void vmw_gmrid_man_fini ( struct vmw_private * dev_priv , int type )
2010-10-26 21:21:47 +02:00
{
2020-08-04 12:56:31 +10:00
struct ttm_resource_manager * man = ttm_manager_type ( & dev_priv - > bdev , type ) ;
2020-08-04 12:56:22 +10:00
struct vmwgfx_gmrid_man * gman = to_gmrid_manager ( man ) ;
2010-10-26 21:21:47 +02:00
2020-08-04 12:56:31 +10:00
ttm_resource_manager_set_used ( man , false ) ;
2020-08-04 12:56:06 +10:00
2020-08-04 12:56:31 +10:00
ttm_resource_manager_force_list_clean ( & dev_priv - > bdev , man ) ;
2020-08-04 12:56:06 +10:00
2020-08-04 12:56:31 +10:00
ttm_resource_manager_cleanup ( man ) ;
2020-08-04 12:56:22 +10:00
ttm_set_driver_manager ( & dev_priv - > bdev , type , NULL ) ;
ida_destroy ( & gman - > gmr_ida ) ;
kfree ( gman ) ;
2010-10-26 21:21:47 +02:00
}
2020-08-04 12:56:31 +10:00
static const struct ttm_resource_manager_func vmw_gmrid_manager_func = {
2020-08-03 15:06:38 +02:00
. alloc = vmw_gmrid_man_get_node ,
. free = vmw_gmrid_man_put_node ,
2010-10-26 21:21:47 +02:00
} ;