2020-08-27 17:54:40 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
# include "rxe.h"
# include "rxe_loc.h"
/* info about object pools
* note that mr and mw share a single index space
* so that one can map an lkey to the correct type of object
*/
struct rxe_type_info rxe_type_info [ RXE_NUM_TYPES ] = {
[ RXE_TYPE_UC ] = {
. name = " rxe-uc " ,
. size = sizeof ( struct rxe_ucontext ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_ucontext , pelem ) ,
2019-02-12 21:39:16 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_PD ] = {
. name = " rxe-pd " ,
. size = sizeof ( struct rxe_pd ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_pd , pelem ) ,
2019-02-03 15:55:51 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_AH ] = {
. name = " rxe-ah " ,
. size = sizeof ( struct rxe_ah ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_ah , pelem ) ,
2020-12-17 02:15:44 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
} ,
[ RXE_TYPE_SRQ ] = {
. name = " rxe-srq " ,
. size = sizeof ( struct rxe_srq ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_srq , pelem ) ,
2019-04-03 16:42:43 +03:00
. flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
. min_index = RXE_MIN_SRQ_INDEX ,
. max_index = RXE_MAX_SRQ_INDEX ,
} ,
[ RXE_TYPE_QP ] = {
. name = " rxe-qp " ,
. size = sizeof ( struct rxe_qp ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_qp , pelem ) ,
2016-06-16 16:45:23 +03:00
. cleanup = rxe_qp_cleanup ,
. flags = RXE_POOL_INDEX ,
. min_index = RXE_MIN_QP_INDEX ,
. max_index = RXE_MAX_QP_INDEX ,
} ,
[ RXE_TYPE_CQ ] = {
. name = " rxe-cq " ,
. size = sizeof ( struct rxe_cq ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_cq , pelem ) ,
2019-05-28 14:37:29 +03:00
. flags = RXE_POOL_NO_ALLOC ,
2016-06-16 16:45:23 +03:00
. cleanup = rxe_cq_cleanup ,
} ,
[ RXE_TYPE_MR ] = {
. name = " rxe-mr " ,
. size = sizeof ( struct rxe_mem ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_mem , pelem ) ,
2016-06-16 16:45:23 +03:00
. cleanup = rxe_mem_cleanup ,
. flags = RXE_POOL_INDEX ,
. max_index = RXE_MAX_MR_INDEX ,
. min_index = RXE_MIN_MR_INDEX ,
} ,
[ RXE_TYPE_MW ] = {
. name = " rxe-mw " ,
. size = sizeof ( struct rxe_mem ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_mem , pelem ) ,
2016-06-16 16:45:23 +03:00
. flags = RXE_POOL_INDEX ,
. max_index = RXE_MAX_MW_INDEX ,
. min_index = RXE_MIN_MW_INDEX ,
} ,
[ RXE_TYPE_MC_GRP ] = {
. name = " rxe-mc_grp " ,
. size = sizeof ( struct rxe_mc_grp ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_mc_grp , pelem ) ,
2016-06-16 16:45:23 +03:00
. cleanup = rxe_mc_cleanup ,
. flags = RXE_POOL_KEY ,
. key_offset = offsetof ( struct rxe_mc_grp , mgid ) ,
. key_size = sizeof ( union ib_gid ) ,
} ,
[ RXE_TYPE_MC_ELEM ] = {
. name = " rxe-mc_elem " ,
. size = sizeof ( struct rxe_mc_elem ) ,
2020-12-17 02:15:46 +03:00
. elem_offset = offsetof ( struct rxe_mc_elem , pelem ) ,
2016-06-16 16:45:23 +03:00
} ,
} ;
2017-01-10 22:15:41 +03:00
static inline const char * pool_name ( struct rxe_pool * pool )
2016-06-16 16:45:23 +03:00
{
return rxe_type_info [ pool - > type ] . name ;
}
static int rxe_pool_init_index ( struct rxe_pool * pool , u32 max , u32 min )
{
int err = 0 ;
size_t size ;
if ( ( max - min + 1 ) < pool - > max_elem ) {
pr_warn ( " not enough indices for max_elem \n " ) ;
err = - EINVAL ;
goto out ;
}
2020-12-17 02:15:45 +03:00
pool - > index . max_index = max ;
pool - > index . min_index = min ;
2016-06-16 16:45:23 +03:00
size = BITS_TO_LONGS ( max - min + 1 ) * sizeof ( long ) ;
2020-12-17 02:15:45 +03:00
pool - > index . table = kmalloc ( size , GFP_KERNEL ) ;
if ( ! pool - > index . table ) {
2016-06-16 16:45:23 +03:00
err = - ENOMEM ;
goto out ;
}
2020-12-17 02:15:45 +03:00
pool - > index . table_size = size ;
bitmap_zero ( pool - > index . table , max - min + 1 ) ;
2016-06-16 16:45:23 +03:00
out :
return err ;
}
int rxe_pool_init (
struct rxe_dev * rxe ,
struct rxe_pool * pool ,
enum rxe_elem_type type ,
2017-06-15 11:29:04 +03:00
unsigned int max_elem )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
size_t size = rxe_type_info [ type ] . size ;
memset ( pool , 0 , sizeof ( * pool ) ) ;
pool - > rxe = rxe ;
pool - > type = type ;
pool - > max_elem = max_elem ;
pool - > elem_size = ALIGN ( size , RXE_POOL_ALIGN ) ;
pool - > flags = rxe_type_info [ type ] . flags ;
2020-12-17 02:15:45 +03:00
pool - > index . tree = RB_ROOT ;
pool - > key . tree = RB_ROOT ;
2016-06-16 16:45:23 +03:00
pool - > cleanup = rxe_type_info [ type ] . cleanup ;
atomic_set ( & pool - > num_elem , 0 ) ;
2018-08-27 08:44:14 +03:00
rwlock_init ( & pool - > pool_lock ) ;
2016-06-16 16:45:23 +03:00
if ( rxe_type_info [ type ] . flags & RXE_POOL_INDEX ) {
err = rxe_pool_init_index ( pool ,
rxe_type_info [ type ] . max_index ,
rxe_type_info [ type ] . min_index ) ;
if ( err )
goto out ;
}
if ( rxe_type_info [ type ] . flags & RXE_POOL_KEY ) {
2020-12-17 02:15:45 +03:00
pool - > key . key_offset = rxe_type_info [ type ] . key_offset ;
pool - > key . key_size = rxe_type_info [ type ] . key_size ;
2016-06-16 16:45:23 +03:00
}
out :
return err ;
}
2018-12-06 14:04:38 +03:00
void rxe_pool_cleanup ( struct rxe_pool * pool )
2016-06-16 16:45:23 +03:00
{
if ( atomic_read ( & pool - > num_elem ) > 0 )
pr_warn ( " %s pool destroyed with unfree'd elem \n " ,
pool_name ( pool ) ) ;
2021-01-26 00:16:39 +03:00
kfree ( pool - > index . table ) ;
2016-06-16 16:45:23 +03:00
}
static u32 alloc_index ( struct rxe_pool * pool )
{
u32 index ;
2020-12-17 02:15:45 +03:00
u32 range = pool - > index . max_index - pool - > index . min_index + 1 ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:45 +03:00
index = find_next_zero_bit ( pool - > index . table , range , pool - > index . last ) ;
2016-06-16 16:45:23 +03:00
if ( index > = range )
2020-12-17 02:15:45 +03:00
index = find_first_zero_bit ( pool - > index . table , range ) ;
2016-06-16 16:45:23 +03:00
2017-01-10 22:15:48 +03:00
WARN_ON_ONCE ( index > = range ) ;
2020-12-17 02:15:45 +03:00
set_bit ( index , pool - > index . table ) ;
pool - > index . last = index ;
return index + pool - > index . min_index ;
2016-06-16 16:45:23 +03:00
}
static void insert_index ( struct rxe_pool * pool , struct rxe_pool_entry * new )
{
2020-12-17 02:15:45 +03:00
struct rb_node * * link = & pool - > index . tree . rb_node ;
2016-06-16 16:45:23 +03:00
struct rb_node * parent = NULL ;
struct rxe_pool_entry * elem ;
while ( * link ) {
parent = * link ;
2020-12-17 02:15:45 +03:00
elem = rb_entry ( parent , struct rxe_pool_entry , index_node ) ;
2016-06-16 16:45:23 +03:00
if ( elem - > index = = new - > index ) {
pr_warn ( " element already exists! \n " ) ;
goto out ;
}
if ( elem - > index > new - > index )
link = & ( * link ) - > rb_left ;
else
link = & ( * link ) - > rb_right ;
}
2020-12-17 02:15:45 +03:00
rb_link_node ( & new - > index_node , parent , link ) ;
rb_insert_color ( & new - > index_node , & pool - > index . tree ) ;
2016-06-16 16:45:23 +03:00
out :
return ;
}
static void insert_key ( struct rxe_pool * pool , struct rxe_pool_entry * new )
{
2020-12-17 02:15:45 +03:00
struct rb_node * * link = & pool - > key . tree . rb_node ;
2016-06-16 16:45:23 +03:00
struct rb_node * parent = NULL ;
struct rxe_pool_entry * elem ;
int cmp ;
while ( * link ) {
parent = * link ;
2020-12-17 02:15:45 +03:00
elem = rb_entry ( parent , struct rxe_pool_entry , key_node ) ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:45 +03:00
cmp = memcmp ( ( u8 * ) elem + pool - > key . key_offset ,
( u8 * ) new + pool - > key . key_offset , pool - > key . key_size ) ;
2016-06-16 16:45:23 +03:00
if ( cmp = = 0 ) {
pr_warn ( " key already exists! \n " ) ;
goto out ;
}
if ( cmp > 0 )
link = & ( * link ) - > rb_left ;
else
link = & ( * link ) - > rb_right ;
}
2020-12-17 02:15:45 +03:00
rb_link_node ( & new - > key_node , parent , link ) ;
rb_insert_color ( & new - > key_node , & pool - > key . tree ) ;
2016-06-16 16:45:23 +03:00
out :
return ;
}
2021-01-26 00:16:37 +03:00
void __rxe_add_key_locked ( struct rxe_pool_entry * elem , void * key )
2020-12-17 02:15:49 +03:00
{
struct rxe_pool * pool = elem - > pool ;
memcpy ( ( u8 * ) elem + pool - > key . key_offset , key , pool - > key . key_size ) ;
insert_key ( pool , elem ) ;
}
2020-12-17 02:15:48 +03:00
void __rxe_add_key ( struct rxe_pool_entry * elem , void * key )
2016-06-16 16:45:23 +03:00
{
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:37 +03:00
__rxe_add_key_locked ( elem , key ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
2021-01-26 00:16:37 +03:00
void __rxe_drop_key_locked ( struct rxe_pool_entry * elem )
2020-12-17 02:15:49 +03:00
{
struct rxe_pool * pool = elem - > pool ;
rb_erase ( & elem - > key_node , & pool - > key . tree ) ;
}
2020-12-17 02:15:48 +03:00
void __rxe_drop_key ( struct rxe_pool_entry * elem )
2016-06-16 16:45:23 +03:00
{
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:37 +03:00
__rxe_drop_key_locked ( elem ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
2021-01-26 00:16:37 +03:00
void __rxe_add_index_locked ( struct rxe_pool_entry * elem )
2020-12-17 02:15:49 +03:00
{
struct rxe_pool * pool = elem - > pool ;
elem - > index = alloc_index ( pool ) ;
insert_index ( pool , elem ) ;
}
2020-12-17 02:15:48 +03:00
void __rxe_add_index ( struct rxe_pool_entry * elem )
2016-06-16 16:45:23 +03:00
{
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:37 +03:00
__rxe_add_index_locked ( elem ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
2021-01-26 00:16:37 +03:00
void __rxe_drop_index_locked ( struct rxe_pool_entry * elem )
2020-12-17 02:15:49 +03:00
{
struct rxe_pool * pool = elem - > pool ;
clear_bit ( elem - > index - pool - > index . min_index , pool - > index . table ) ;
rb_erase ( & elem - > index_node , & pool - > index . tree ) ;
}
2020-12-17 02:15:48 +03:00
void __rxe_drop_index ( struct rxe_pool_entry * elem )
2016-06-16 16:45:23 +03:00
{
struct rxe_pool * pool = elem - > pool ;
unsigned long flags ;
2018-08-27 08:44:14 +03:00
write_lock_irqsave ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:37 +03:00
__rxe_drop_index_locked ( elem ) ;
2018-08-27 08:44:14 +03:00
write_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
}
2021-01-26 00:16:37 +03:00
void * rxe_alloc_locked ( struct rxe_pool * pool )
2016-06-16 16:45:23 +03:00
{
2020-12-17 02:15:47 +03:00
struct rxe_type_info * info = & rxe_type_info [ pool - > type ] ;
2016-06-16 16:45:23 +03:00
struct rxe_pool_entry * elem ;
2020-12-17 02:15:47 +03:00
u8 * obj ;
2016-06-16 16:45:23 +03:00
2017-10-09 16:11:32 +03:00
if ( atomic_inc_return ( & pool - > num_elem ) > pool - > max_elem )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2016-06-16 16:45:23 +03:00
2021-01-26 00:16:36 +03:00
obj = kzalloc ( info - > size , GFP_ATOMIC ) ;
2020-12-17 02:15:47 +03:00
if ( ! obj )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:47 +03:00
elem = ( struct rxe_pool_entry * ) ( obj + info - > elem_offset ) ;
2016-06-16 16:45:23 +03:00
elem - > pool = pool ;
kref_init ( & elem - > ref_cnt ) ;
2020-12-17 02:15:47 +03:00
return obj ;
2017-10-09 16:11:32 +03:00
2019-01-23 02:27:24 +03:00
out_cnt :
2017-10-09 16:11:32 +03:00
atomic_dec ( & pool - > num_elem ) ;
return NULL ;
2016-06-16 16:45:23 +03:00
}
2020-12-17 02:15:49 +03:00
void * rxe_alloc ( struct rxe_pool * pool )
2019-02-03 15:55:51 +03:00
{
2021-01-26 00:16:36 +03:00
struct rxe_type_info * info = & rxe_type_info [ pool - > type ] ;
struct rxe_pool_entry * elem ;
u8 * obj ;
if ( atomic_inc_return ( & pool - > num_elem ) > pool - > max_elem )
goto out_cnt ;
2021-01-26 00:16:38 +03:00
obj = kzalloc ( info - > size , GFP_KERNEL ) ;
2021-01-26 00:16:36 +03:00
if ( ! obj )
goto out_cnt ;
elem = ( struct rxe_pool_entry * ) ( obj + info - > elem_offset ) ;
elem - > pool = pool ;
kref_init ( & elem - > ref_cnt ) ;
2020-12-17 02:15:49 +03:00
return obj ;
2021-01-26 00:16:36 +03:00
out_cnt :
atomic_dec ( & pool - > num_elem ) ;
return NULL ;
2020-12-17 02:15:49 +03:00
}
int __rxe_add_to_pool ( struct rxe_pool * pool , struct rxe_pool_entry * elem )
{
2019-02-03 15:55:51 +03:00
if ( atomic_inc_return ( & pool - > num_elem ) > pool - > max_elem )
2019-01-23 02:27:24 +03:00
goto out_cnt ;
2019-02-03 15:55:51 +03:00
elem - > pool = pool ;
kref_init ( & elem - > ref_cnt ) ;
return 0 ;
2019-01-23 02:27:24 +03:00
out_cnt :
2019-02-03 15:55:51 +03:00
atomic_dec ( & pool - > num_elem ) ;
return - EINVAL ;
}
2016-06-16 16:45:23 +03:00
void rxe_elem_release ( struct kref * kref )
{
struct rxe_pool_entry * elem =
container_of ( kref , struct rxe_pool_entry , ref_cnt ) ;
struct rxe_pool * pool = elem - > pool ;
2020-12-17 02:15:47 +03:00
struct rxe_type_info * info = & rxe_type_info [ pool - > type ] ;
u8 * obj ;
2016-06-16 16:45:23 +03:00
if ( pool - > cleanup )
pool - > cleanup ( elem ) ;
2020-12-17 02:15:47 +03:00
if ( ! ( pool - > flags & RXE_POOL_NO_ALLOC ) ) {
obj = ( u8 * ) elem - info - > elem_offset ;
kfree ( obj ) ;
}
2016-06-16 16:45:23 +03:00
atomic_dec ( & pool - > num_elem ) ;
}
void * rxe_pool_get_index ( struct rxe_pool * pool , u32 index )
{
2020-12-17 02:15:47 +03:00
struct rxe_type_info * info = & rxe_type_info [ pool - > type ] ;
struct rb_node * node ;
struct rxe_pool_entry * elem ;
u8 * obj = NULL ;
2016-06-16 16:45:23 +03:00
unsigned long flags ;
2018-08-27 08:44:14 +03:00
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:45 +03:00
node = pool - > index . tree . rb_node ;
2016-06-16 16:45:23 +03:00
while ( node ) {
2020-12-17 02:15:45 +03:00
elem = rb_entry ( node , struct rxe_pool_entry , index_node ) ;
2016-06-16 16:45:23 +03:00
if ( elem - > index > index )
node = node - > rb_left ;
else if ( elem - > index < index )
node = node - > rb_right ;
2020-12-17 02:15:47 +03:00
else
2016-06-16 16:45:23 +03:00
break ;
2020-12-17 02:15:47 +03:00
}
if ( node ) {
kref_get ( & elem - > ref_cnt ) ;
obj = ( u8 * ) elem - info - > elem_offset ;
} else {
obj = NULL ;
2016-06-16 16:45:23 +03:00
}
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:40 +03:00
2020-12-17 02:15:47 +03:00
return obj ;
2016-06-16 16:45:23 +03:00
}
2021-01-26 00:16:37 +03:00
void * rxe_pool_get_key_locked ( struct rxe_pool * pool , void * key )
2016-06-16 16:45:23 +03:00
{
2020-12-17 02:15:47 +03:00
struct rxe_type_info * info = & rxe_type_info [ pool - > type ] ;
struct rb_node * node ;
struct rxe_pool_entry * elem ;
u8 * obj = NULL ;
2016-06-16 16:45:23 +03:00
int cmp ;
2020-12-17 02:15:45 +03:00
node = pool - > key . tree . rb_node ;
2016-06-16 16:45:23 +03:00
while ( node ) {
2020-12-17 02:15:45 +03:00
elem = rb_entry ( node , struct rxe_pool_entry , key_node ) ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:45 +03:00
cmp = memcmp ( ( u8 * ) elem + pool - > key . key_offset ,
key , pool - > key . key_size ) ;
2016-06-16 16:45:23 +03:00
if ( cmp > 0 )
node = node - > rb_left ;
else if ( cmp < 0 )
node = node - > rb_right ;
else
break ;
}
2020-12-17 02:15:47 +03:00
if ( node ) {
2016-06-16 16:45:23 +03:00
kref_get ( & elem - > ref_cnt ) ;
2020-12-17 02:15:47 +03:00
obj = ( u8 * ) elem - info - > elem_offset ;
} else {
obj = NULL ;
}
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:49 +03:00
return obj ;
}
void * rxe_pool_get_key ( struct rxe_pool * pool , void * key )
{
u8 * obj = NULL ;
unsigned long flags ;
read_lock_irqsave ( & pool - > pool_lock , flags ) ;
2021-01-26 00:16:37 +03:00
obj = rxe_pool_get_key_locked ( pool , key ) ;
2018-08-27 08:44:14 +03:00
read_unlock_irqrestore ( & pool - > pool_lock , flags ) ;
2020-12-17 02:15:49 +03:00
2020-12-17 02:15:47 +03:00
return obj ;
2016-06-16 16:45:23 +03:00
}