2020-08-27 17:54:40 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# include "rxe.h"
# include "rxe_loc.h"
/* info about object pools
* note that mr and mw share a single index space
* so that one can map an lkey to the correct type of object
*/
struct rxe_type_info rxe_type_info [ RXE_NUM_TYPES ] = {
[ RXE_TYPE_UC ] = {
. name = " rxe-uc " ,
. size = sizeof ( struct rxe_ucontext ) ,
2019-02-12 21:39:16 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_PD ] = {
. name = " rxe-pd " ,
. size = sizeof ( struct rxe_pd ) ,
2019-02-03 15:55:51 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_AH ] = {
. name = " rxe-ah " ,
. size = sizeof ( struct rxe_ah ) ,
2020-12-17 02:15:44 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_SRQ ] = {
. name = " rxe-srq " ,
. size = sizeof ( struct rxe_srq ) ,
2019-04-03 16:42:43 +03:00
. flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
. min_index = RXE_MIN_SRQ_INDEX ,
. max_index = RXE_MAX_SRQ_INDEX ,
} ,
[ RXE_TYPE_QP ] = {
. name = " rxe-qp " ,
. size = sizeof ( struct rxe_qp ) ,
. cleanup = rxe_qp_cleanup ,
. flags = RXE_POOL_INDEX ,
. min_index = RXE_MIN_QP_INDEX ,
. max_index = RXE_MAX_QP_INDEX ,
} ,
[ RXE_TYPE_CQ ] = {
. name = " rxe-cq " ,
. size = sizeof ( struct rxe_cq ) ,
2019-05-28 14:37:29 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
. cleanup = rxe_cq_cleanup ,
} ,
[ RXE_TYPE_MR ] = {
. name = " rxe-mr " ,
. size = sizeof ( struct rxe_mem ) ,
. cleanup = rxe_mem_cleanup ,
. flags = RXE_POOL_INDEX ,
. max_index = RXE_MAX_MR_INDEX ,
. min_index = RXE_MIN_MR_INDEX ,
} ,
[ RXE_TYPE_MW ] = {
. name = " rxe-mw " ,
. size = sizeof ( struct rxe_mem ) ,
. flags = RXE_POOL_INDEX ,
. max_index = RXE_MAX_MW_INDEX ,
. min_index = RXE_MIN_MW_INDEX ,
} ,
[ RXE_TYPE_MC_GRP ] = {
. name = " rxe-mc_grp " ,
. size = sizeof ( struct rxe_mc_grp ) ,
. cleanup = rxe_mc_cleanup ,
. flags = RXE_POOL_KEY ,
. key_offset = offsetof ( struct rxe_mc_grp , mgid ) ,
. key_size = sizeof ( union ib_gid ) ,
} ,
[ RXE_TYPE_MC_ELEM ] = {
. name = " rxe-mc_elem " ,
. size = sizeof ( struct rxe_mc_elem ) ,
. flags = RXE_POOL_ATOMIC ,
} ,
} ;
2017-01-10 22:15:41 +03:00
static inline const char * pool_name ( struct rxe_pool * pool )
2016-06-16 16:45:23 +03:00
{
return rxe_type_info [ pool - > type ] . name ;
}
static int rxe_pool_init_index ( struct rxe_pool * pool , u32 max , u32 min )
{
int err = 0 ;
size_t size ;
if ( ( max - min + 1 ) < pool - > max_elem ) {
pr_warn ( " not enough indices for max_elem \n " ) ;
err = - EINVAL ;
goto out ;
}
pool - > max_index = max ;
pool - > min_index = min ;
size = BITS_TO_LONGS ( max - min + 1 ) * sizeof ( long ) ;
pool - > table = kmalloc ( size , GFP_KERNEL ) ;
if ( ! pool - > table ) {
err = - ENOMEM ;
goto out ;
}
pool - > table_size = size ;
bitmap_zero ( pool - > table , max - min + 1 ) ;
out :
return err ;
}
int rxe_pool_init (
struct rxe_dev * rxe ,
struct rxe_pool * pool ,
enum rxe_elem_type type ,
2017-06-15 11:29:04 +03:00
unsigned int max_elem )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
size_t size = rxe_type_info [ type ] . size ;
memset ( pool , 0 , sizeof ( * pool ) ) ;
pool - > rxe = rxe ;
pool - > type = type ;
pool - > max_elem = max_elem ;
pool - > elem_size = ALIGN ( size , RXE_POOL_ALIGN ) ;
pool - > flags = rxe_type_info [ type ] . flags ;
pool - > tree = RB_ROOT ;
pool - > cleanup = rxe_type_info [ type ] . cleanup ;
atomic_set ( & pool - > num_elem , 0 ) ;
kref_init ( & pool - > ref_cnt ) ;
2018-08-27 08:44:14 +03:00
rwlock_init ( & pool - > pool_lock ) ;
2016-06-16 16:45:23 +03:00
if ( rxe_type_info [ type ] . flags & RXE_POOL_INDEX ) {
err = rxe_pool_init_index ( pool ,
rxe_type_info [ type ] . max_index ,
rxe_type_info [ type ] . min_index ) ;
if ( err )
goto out ;
}
if ( rxe_type_info [ type ] . flags & RXE_POOL_KEY ) {
pool - > key_offset = rxe_type_info [ type ] . key_offset ;
pool - > key_size = rxe_type_info [ type ] . key_size ;
}
2018-08-27 08:44:15 +03:00
pool - > state = RXE_POOL_STATE_VALID ;
2016-06-16 16:45:23 +03:00
out :
return err ;
}
static void rxe_pool_release ( struct kref * kref )
{
struct rxe_pool * pool = container_of ( kref , struct rxe_pool , ref_cnt ) ;
2018-08-27 08:44:15 +03:00
pool - > state = RXE_POOL_STATE_INVALID ;
2016-06-16 16:45:23 +03:00
kfree ( pool - > table ) ;
}
static void rxe_pool_put ( struct rxe_pool * pool )
{
kref_put ( & pool - > ref_cnt , rxe_pool_release ) ;
}
2018-12-06 14:04:38 +03:00
void rxe_pool_cleanup ( struct rxe_pool * pool )
2016-06-16 16:45:23 +03:00
{
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2018-08-27 08:44:15 +03:00
pool - > state = RXE_POOL_STATE_INVALID ;
2016-06-16 16:45:23 +03:00
if ( atomic_read ( & pool - > num_elem ) > 0 )
pr_warn ( " %s pool destroyed with unfree'd elem \n " ,
pool_name ( pool ) ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
rxe_pool_put ( pool ) ;
}
static u32 alloc_index ( struct rxe_pool * pool )
{
u32 index ;
u32 range = pool - > max_index - pool - > min_index + 1 ;
index = find_next_zero_bit ( pool - > table , range , pool - > last ) ;
if ( index > = range )
index = find_first_zero_bit ( pool - > table , range ) ;
2017-01-10 22:15:48 +03:00
WARN_ON_ONCE ( index > = range ) ;
2016-06-16 16:45:23 +03:00
set_bit ( index , pool - > table ) ;
pool - > last = index ;
return index + pool - > min_index ;
}
static void insert_index ( struct rxe_pool * pool , struct rxe_pool_entry * new )
{
struct rb_node * * link = & pool - > tree . rb_node ;
struct rb_node * parent = NULL ;
struct rxe_pool_entry * elem ;
while ( * link ) {
parent = * link ;
elem = rb_entry ( parent , struct rxe_pool_entry , node ) ;
if ( elem - > index = = new - > index ) {
pr_warn ( " element already exists! \n " ) ;
goto out ;
}
if ( elem - > index > new - > index )
link = & ( * link ) - > rb_left ;
else
link = & ( * link ) - > rb_right ;
}
rb_link_node ( & new - > node , parent , link ) ;
rb_insert_color ( & new - > node , & pool - > tree ) ;
out :
return ;
}
static void insert_key ( struct rxe_pool * pool , struct rxe_pool_entry * new )
{
struct rb_node * * link = & pool - > tree . rb_node ;
struct rb_node * parent = NULL ;
struct rxe_pool_entry * elem ;
int cmp ;
while ( * link ) {
parent = * link ;
elem = rb_entry ( parent , struct rxe_pool_entry , node ) ;
cmp = memcmp ( ( u8 * ) elem + pool - > key_offset ,
( u8 * ) new + pool - > key_offset , pool - > key_size ) ;
if ( cmp = = 0 ) {
pr_warn ( " key already exists! \n " ) ;
goto out ;
}
if ( cmp > 0 )
link = & ( * link ) - > rb_left ;
else
link = & ( * link ) - > rb_right ;
}
rb_link_node ( & new - > node , parent , link ) ;
rb_insert_color ( & new - > node , & pool - > tree ) ;
out :
return ;
}
void rxe_add_key ( void * arg , void * key )
{
struct rxe_pool_entry * elem = arg ;
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
memcpy ( ( u8 * ) elem + pool - > key_offset , key , pool - > key_size ) ;
insert_key ( pool , elem ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
void rxe_drop_key ( void * arg )
{
struct rxe_pool_entry * elem = arg ;
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
rb_erase ( & elem - > node , & pool - > tree ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
void rxe_add_index ( void * arg )
{
struct rxe_pool_entry * elem = arg ;
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
elem - > index = alloc_index ( pool ) ;
insert_index ( pool , elem ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
void rxe_drop_index ( void * arg )
{
struct rxe_pool_entry * elem = arg ;
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
clear_bit ( elem - > index - pool - > min_index , pool - > table ) ;
rb_erase ( & elem - > node , & pool - > tree ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
void * rxe_alloc ( struct rxe_pool * pool )
{
struct rxe_pool_entry * elem ;
unsigned long flags ;
might_sleep_if ( ! ( pool - > flags & RXE_POOL_ATOMIC ) ) ;
2018-08-27 08:44:14 +03:00
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
2018-08-27 08:44:15 +03:00
if ( pool - > state ! = RXE_POOL_STATE_VALID ) {
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
return NULL ;
}
kref_get ( & pool - > ref_cnt ) ;
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
2019-01-23 02:27:24 +03:00
if ( ! ib_device_try_get ( & pool - > rxe - > ib_dev ) )
goto out_put_pool ;
2016-06-16 16:45:23 +03:00
2017-10-09 16:11:32 +03:00
if ( atomic_inc_return ( & pool - > num_elem ) > pool - > max_elem )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2016-06-16 16:45:23 +03:00
2020-08-27 19:35:36 +03:00
elem = kzalloc ( rxe_type_info [ pool - > type ] . size ,
2016-06-16 16:45:23 +03:00
( pool - > flags & RXE_POOL_ATOMIC ) ?
GFP_ATOMIC : GFP_KERNEL ) ;
2017-09-08 17:37:45 +03:00
if ( ! elem )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2016-06-16 16:45:23 +03:00
elem - > pool = pool ;
kref_init ( & elem - > ref_cnt ) ;
return elem ;
2017-10-09 16:11:32 +03:00
2019-01-23 02:27:24 +03:00
out_cnt :
2017-10-09 16:11:32 +03:00
atomic_dec ( & pool - > num_elem ) ;
2019-01-23 02:27:24 +03:00
ib_device_put ( & pool - > rxe - > ib_dev ) ;
out_put_pool :
2017-10-09 16:11:32 +03:00
rxe_pool_put ( pool ) ;
return NULL ;
2016-06-16 16:45:23 +03:00
}
2019-02-03 15:55:51 +03:00
int rxe_add_to_pool ( struct rxe_pool * pool , struct rxe_pool_entry * elem )
{
unsigned long flags ;
might_sleep_if ( ! ( pool - > flags & RXE_POOL_ATOMIC ) ) ;
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
if ( pool - > state ! = RXE_POOL_STATE_VALID ) {
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
return - EINVAL ;
}
kref_get ( & pool - > ref_cnt ) ;
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2019-01-23 02:27:24 +03:00
if ( ! ib_device_try_get ( & pool - > rxe - > ib_dev ) )
goto out_put_pool ;
2019-02-03 15:55:51 +03:00
if ( atomic_inc_return ( & pool - > num_elem ) > pool - > max_elem )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2019-02-03 15:55:51 +03:00
elem - > pool = pool ;
kref_init ( & elem - > ref_cnt ) ;
return 0 ;
2019-01-23 02:27:24 +03:00
out_cnt :
2019-02-03 15:55:51 +03:00
atomic_dec ( & pool - > num_elem ) ;
2019-01-23 02:27:24 +03:00
ib_device_put ( & pool - > rxe - > ib_dev ) ;
out_put_pool :
2019-02-03 15:55:51 +03:00
rxe_pool_put ( pool ) ;
return - EINVAL ;
}
2016-06-16 16:45:23 +03:00
void rxe_elem_release ( struct kref * kref )
{
struct rxe_pool_entry * elem =
container_of ( kref , struct rxe_pool_entry , ref_cnt ) ;
struct rxe_pool * pool = elem - > pool ;
if ( pool - > cleanup )
pool - > cleanup ( elem ) ;
2019-02-03 15:55:51 +03:00
if ( ! ( pool - > flags & RXE_POOL_NO_ALLOC ) )
2020-08-27 19:35:36 +03:00
kfree ( elem ) ;
2016-06-16 16:45:23 +03:00
atomic_dec ( & pool - > num_elem ) ;
2019-01-23 02:27:24 +03:00
ib_device_put ( & pool - > rxe - > ib_dev ) ;
2016-06-16 16:45:23 +03:00
rxe_pool_put ( pool ) ;
}
void * rxe_pool_get_index ( struct rxe_pool * pool , u32 index )
{
struct rb_node * node = NULL ;
struct rxe_pool_entry * elem = NULL ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
2018-08-27 08:44:15 +03:00
if ( pool - > state ! = RXE_POOL_STATE_VALID )
2016-06-16 16:45:23 +03:00
goto out ;
node = pool - > tree . rb_node ;
while ( node ) {
elem = rb_entry ( node , struct rxe_pool_entry , node ) ;
if ( elem - > index > index )
node = node - > rb_left ;
else if ( elem - > index < index )
node = node - > rb_right ;
2018-08-27 08:44:16 +03:00
else {
kref_get ( & elem - > ref_cnt ) ;
2016-06-16 16:45:23 +03:00
break ;
2018-08-27 08:44:16 +03:00
}
2016-06-16 16:45:23 +03:00
}
out :
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2017-01-10 22:15:44 +03:00
return node ? elem : NULL ;
2016-06-16 16:45:23 +03:00
}
void * rxe_pool_get_key ( struct rxe_pool * pool , void * key )
{
struct rb_node * node = NULL ;
struct rxe_pool_entry * elem = NULL ;
int cmp ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
2018-08-27 08:44:15 +03:00
if ( pool - > state ! = RXE_POOL_STATE_VALID )
2016-06-16 16:45:23 +03:00
goto out ;
node = pool - > tree . rb_node ;
while ( node ) {
elem = rb_entry ( node , struct rxe_pool_entry , node ) ;
cmp = memcmp ( ( u8 * ) elem + pool - > key_offset ,
key , pool - > key_size ) ;
if ( cmp > 0 )
node = node - > rb_left ;
else if ( cmp < 0 )
node = node - > rb_right ;
else
break ;
}
if ( node )
kref_get ( & elem - > ref_cnt ) ;
out :
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2017-01-10 22:15:44 +03:00
return node ? elem : NULL ;
2016-06-16 16:45:23 +03:00
}