2016-07-21 14:06:38 +03:00
/*
* Copyright ( c ) 2016 Hisilicon Limited .
* Copyright ( c ) 2007 , 2008 Mellanox Technologies . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include "hns_roce_device.h"
# include "hns_roce_hem.h"
# include "hns_roce_common.h"
2020-03-17 06:55:23 +03:00
# define HEM_INDEX_BUF BIT(0)
# define HEM_INDEX_L0 BIT(1)
# define HEM_INDEX_L1 BIT(2)
struct hns_roce_hem_index {
u64 buf ;
u64 l0 ;
u64 l1 ;
u32 inited ; /* indicate which index is available */
} ;
2017-08-30 12:23:06 +03:00
bool hns_roce_check_whether_mhop ( struct hns_roce_dev * hr_dev , u32 type )
{
2019-07-08 16:41:24 +03:00
int hop_num = 0 ;
switch ( type ) {
case HEM_TYPE_QPC :
hop_num = hr_dev - > caps . qpc_hop_num ;
break ;
case HEM_TYPE_MTPT :
hop_num = hr_dev - > caps . mpt_hop_num ;
break ;
case HEM_TYPE_CQC :
hop_num = hr_dev - > caps . cqc_hop_num ;
break ;
case HEM_TYPE_SRQC :
hop_num = hr_dev - > caps . srqc_hop_num ;
break ;
case HEM_TYPE_SCCC :
hop_num = hr_dev - > caps . sccc_hop_num ;
break ;
case HEM_TYPE_QPC_TIMER :
hop_num = hr_dev - > caps . qpc_timer_hop_num ;
break ;
case HEM_TYPE_CQC_TIMER :
hop_num = hr_dev - > caps . cqc_timer_hop_num ;
break ;
2020-10-24 06:07:15 +03:00
case HEM_TYPE_GMV :
hop_num = hr_dev - > caps . gmv_hop_num ;
break ;
2019-07-08 16:41:24 +03:00
default :
return false ;
}
2023-07-31 11:51:18 +03:00
return hop_num ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:24 +03:00
static bool hns_roce_check_hem_null ( struct hns_roce_hem * * hem , u64 hem_idx ,
u32 bt_chunk_num , u64 hem_max_num )
2017-08-30 12:23:06 +03:00
{
2020-03-17 06:55:24 +03:00
u64 start_idx = round_down ( hem_idx , bt_chunk_num ) ;
2019-08-09 12:41:02 +03:00
u64 check_max_num = start_idx + bt_chunk_num ;
u64 i ;
2017-08-30 12:23:06 +03:00
2019-08-09 12:41:02 +03:00
for ( i = start_idx ; ( i < check_max_num ) & & ( i < hem_max_num ) ; i + + )
2020-03-17 06:55:24 +03:00
if ( i ! = hem_idx & & hem [ i ] )
2017-08-30 12:23:06 +03:00
return false ;
return true ;
}
2020-03-17 06:55:24 +03:00
static bool hns_roce_check_bt_null ( u64 * * bt , u64 ba_idx , u32 bt_chunk_num )
2017-08-30 12:23:06 +03:00
{
2020-03-17 06:55:24 +03:00
u64 start_idx = round_down ( ba_idx , bt_chunk_num ) ;
2017-08-30 12:23:06 +03:00
int i ;
for ( i = 0 ; i < bt_chunk_num ; i + + )
2020-03-17 06:55:24 +03:00
if ( i ! = ba_idx & & bt [ start_idx + i ] )
2017-08-30 12:23:06 +03:00
return false ;
return true ;
}
static int hns_roce_get_bt_num ( u32 table_type , u32 hop_num )
{
if ( check_whether_bt_num_3 ( table_type , hop_num ) )
return 3 ;
else if ( check_whether_bt_num_2 ( table_type , hop_num ) )
return 2 ;
else if ( check_whether_bt_num_1 ( table_type , hop_num ) )
return 1 ;
else
return 0 ;
}
2019-07-08 16:41:24 +03:00
static int get_hem_table_config ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_mhop * mhop ,
u32 type )
2017-08-30 12:23:06 +03:00
{
struct device * dev = hr_dev - > dev ;
2019-07-08 16:41:24 +03:00
switch ( type ) {
2017-08-30 12:23:06 +03:00
case HEM_TYPE_QPC :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . qpc_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . qpc_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . qpc_bt_num ;
mhop - > hop_num = hr_dev - > caps . qpc_hop_num ;
break ;
case HEM_TYPE_MTPT :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . mpt_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . mpt_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . mpt_bt_num ;
mhop - > hop_num = hr_dev - > caps . mpt_hop_num ;
break ;
case HEM_TYPE_CQC :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . cqc_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . cqc_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . cqc_bt_num ;
mhop - > hop_num = hr_dev - > caps . cqc_hop_num ;
break ;
2018-12-18 16:21:53 +03:00
case HEM_TYPE_SCCC :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . sccc_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . sccc_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . sccc_bt_num ;
mhop - > hop_num = hr_dev - > caps . sccc_hop_num ;
break ;
2018-12-18 16:21:55 +03:00
case HEM_TYPE_QPC_TIMER :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . qpc_timer_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . qpc_timer_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . qpc_timer_bt_num ;
mhop - > hop_num = hr_dev - > caps . qpc_timer_hop_num ;
break ;
case HEM_TYPE_CQC_TIMER :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . cqc_timer_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . cqc_timer_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . cqc_timer_bt_num ;
mhop - > hop_num = hr_dev - > caps . cqc_timer_hop_num ;
break ;
2017-08-30 12:23:06 +03:00
case HEM_TYPE_SRQC :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . srqc_buf_pg_sz
+ PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . srqc_ba_pg_sz
+ PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . srqc_bt_num ;
mhop - > hop_num = hr_dev - > caps . srqc_hop_num ;
break ;
2020-10-24 06:07:15 +03:00
case HEM_TYPE_GMV :
mhop - > buf_chunk_size = 1 < < ( hr_dev - > caps . gmv_buf_pg_sz +
PAGE_SHIFT ) ;
mhop - > bt_chunk_size = 1 < < ( hr_dev - > caps . gmv_ba_pg_sz +
PAGE_SHIFT ) ;
mhop - > ba_l0_num = hr_dev - > caps . gmv_bt_num ;
mhop - > hop_num = hr_dev - > caps . gmv_hop_num ;
break ;
2017-08-30 12:23:06 +03:00
default :
2020-12-11 04:37:36 +03:00
dev_err ( dev , " table %u not support multi-hop addressing! \n " ,
2019-07-08 16:41:24 +03:00
type ) ;
2017-08-30 12:23:06 +03:00
return - EINVAL ;
}
2019-07-08 16:41:24 +03:00
return 0 ;
}
int hns_roce_calc_hem_mhop ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long * obj ,
struct hns_roce_hem_mhop * mhop )
{
struct device * dev = hr_dev - > dev ;
u32 chunk_ba_num ;
2020-12-11 04:37:33 +03:00
u32 chunk_size ;
2019-07-08 16:41:24 +03:00
u32 table_idx ;
u32 bt_num ;
if ( get_hem_table_config ( hr_dev , mhop , table - > type ) )
return - EINVAL ;
2017-08-30 12:23:06 +03:00
if ( ! obj )
return 0 ;
2017-08-30 12:23:08 +03:00
/*
2018-12-18 16:21:53 +03:00
* QPC / MTPT / CQC / SRQC / SCCC alloc hem for buffer pages .
2017-08-30 12:23:08 +03:00
* MTT / CQE alloc hem for bt pages .
*/
2017-08-30 12:23:06 +03:00
bt_num = hns_roce_get_bt_num ( table - > type , mhop - > hop_num ) ;
2019-05-24 18:29:36 +03:00
chunk_ba_num = mhop - > bt_chunk_size / BA_BYTE_LEN ;
2017-08-30 12:23:08 +03:00
chunk_size = table - > type < HEM_TYPE_MTT ? mhop - > buf_chunk_size :
mhop - > bt_chunk_size ;
2021-06-01 12:57:07 +03:00
table_idx = * obj / ( chunk_size / table - > obj_size ) ;
2017-08-30 12:23:06 +03:00
switch ( bt_num ) {
case 3 :
mhop - > l2_idx = table_idx & ( chunk_ba_num - 1 ) ;
mhop - > l1_idx = table_idx / chunk_ba_num & ( chunk_ba_num - 1 ) ;
2018-07-25 10:29:38 +03:00
mhop - > l0_idx = ( table_idx / chunk_ba_num ) / chunk_ba_num ;
2017-08-30 12:23:06 +03:00
break ;
case 2 :
mhop - > l1_idx = table_idx & ( chunk_ba_num - 1 ) ;
mhop - > l0_idx = table_idx / chunk_ba_num ;
break ;
case 1 :
mhop - > l0_idx = table_idx ;
break ;
default :
2020-12-11 04:37:36 +03:00
dev_err ( dev , " table %u not support hop_num = %u! \n " ,
table - > type , mhop - > hop_num ) ;
2017-08-30 12:23:06 +03:00
return - EINVAL ;
}
if ( mhop - > l0_idx > = mhop - > ba_l0_num )
mhop - > l0_idx % = mhop - > ba_l0_num ;
return 0 ;
}
static struct hns_roce_hem * hns_roce_alloc_hem ( struct hns_roce_dev * hr_dev ,
int npages ,
unsigned long hem_alloc_size ,
gfp_t gfp_mask )
2016-07-21 14:06:38 +03:00
{
struct hns_roce_hem_chunk * chunk = NULL ;
struct hns_roce_hem * hem ;
struct scatterlist * mem ;
int order ;
void * buf ;
WARN_ON ( gfp_mask & __GFP_HIGHMEM ) ;
hem = kmalloc ( sizeof ( * hem ) ,
gfp_mask & ~ ( __GFP_HIGHMEM | __GFP_NOWARN ) ) ;
if ( ! hem )
return NULL ;
INIT_LIST_HEAD ( & hem - > chunk_list ) ;
2017-08-30 12:23:06 +03:00
order = get_order ( hem_alloc_size ) ;
2016-07-21 14:06:38 +03:00
while ( npages > 0 ) {
if ( ! chunk ) {
chunk = kmalloc ( sizeof ( * chunk ) ,
gfp_mask & ~ ( __GFP_HIGHMEM | __GFP_NOWARN ) ) ;
if ( ! chunk )
goto fail ;
sg_init_table ( chunk - > mem , HNS_ROCE_HEM_CHUNK_LEN ) ;
chunk - > npages = 0 ;
chunk - > nsg = 0 ;
2017-11-28 10:10:28 +03:00
memset ( chunk - > buf , 0 , sizeof ( chunk - > buf ) ) ;
2016-07-21 14:06:38 +03:00
list_add_tail ( & chunk - > list , & hem - > chunk_list ) ;
}
while ( 1 < < order > npages )
- - order ;
/*
2016-11-23 22:41:09 +03:00
* Alloc memory one time . If failed , don ' t alloc small block
* memory , directly return fail .
*/
2016-07-21 14:06:38 +03:00
mem = & chunk - > mem [ chunk - > npages ] ;
2017-08-30 12:23:02 +03:00
buf = dma_alloc_coherent ( hr_dev - > dev , PAGE_SIZE < < order ,
2016-07-21 14:06:38 +03:00
& sg_dma_address ( mem ) , gfp_mask ) ;
if ( ! buf )
goto fail ;
2017-11-28 10:10:28 +03:00
chunk - > buf [ chunk - > npages ] = buf ;
2016-07-21 14:06:38 +03:00
sg_dma_len ( mem ) = PAGE_SIZE < < order ;
+ + chunk - > npages ;
+ + chunk - > nsg ;
npages - = 1 < < order ;
}
return hem ;
fail :
hns_roce_free_hem ( hr_dev , hem ) ;
return NULL ;
}
void hns_roce_free_hem ( struct hns_roce_dev * hr_dev , struct hns_roce_hem * hem )
{
struct hns_roce_hem_chunk * chunk , * tmp ;
int i ;
if ( ! hem )
return ;
list_for_each_entry_safe ( chunk , tmp , & hem - > chunk_list , list ) {
for ( i = 0 ; i < chunk - > npages ; + + i )
2017-08-30 12:23:02 +03:00
dma_free_coherent ( hr_dev - > dev ,
2017-11-28 10:10:28 +03:00
sg_dma_len ( & chunk - > mem [ i ] ) ,
chunk - > buf [ i ] ,
2016-07-21 14:06:38 +03:00
sg_dma_address ( & chunk - > mem [ i ] ) ) ;
kfree ( chunk ) ;
}
kfree ( hem ) ;
}
2020-03-17 06:55:23 +03:00
static int calc_hem_config ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long obj ,
struct hns_roce_hem_mhop * mhop ,
struct hns_roce_hem_index * index )
2017-08-30 12:23:06 +03:00
{
2020-03-17 06:55:23 +03:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
unsigned long mhop_obj = obj ;
u32 l0_idx , l1_idx , l2_idx ;
2017-08-30 12:23:06 +03:00
u32 chunk_ba_num ;
u32 bt_num ;
int ret ;
2020-03-17 06:55:23 +03:00
ret = hns_roce_calc_hem_mhop ( hr_dev , table , & mhop_obj , mhop ) ;
2017-08-30 12:23:06 +03:00
if ( ret )
return ret ;
2020-03-17 06:55:23 +03:00
l0_idx = mhop - > l0_idx ;
l1_idx = mhop - > l1_idx ;
l2_idx = mhop - > l2_idx ;
chunk_ba_num = mhop - > bt_chunk_size / BA_BYTE_LEN ;
bt_num = hns_roce_get_bt_num ( table - > type , mhop - > hop_num ) ;
2017-08-30 12:23:06 +03:00
switch ( bt_num ) {
case 3 :
2020-03-17 06:55:23 +03:00
index - > l1 = l0_idx * chunk_ba_num + l1_idx ;
index - > l0 = l0_idx ;
index - > buf = l0_idx * chunk_ba_num * chunk_ba_num +
l1_idx * chunk_ba_num + l2_idx ;
2017-08-30 12:23:06 +03:00
break ;
case 2 :
2020-03-17 06:55:23 +03:00
index - > l0 = l0_idx ;
index - > buf = l0_idx * chunk_ba_num + l1_idx ;
2017-08-30 12:23:06 +03:00
break ;
case 1 :
2020-03-17 06:55:23 +03:00
index - > buf = l0_idx ;
2017-08-30 12:23:06 +03:00
break ;
default :
2020-12-11 04:37:36 +03:00
ibdev_err ( ibdev , " table %u not support mhop.hop_num = %u! \n " ,
2020-03-17 06:55:23 +03:00
table - > type , mhop - > hop_num ) ;
2017-08-30 12:23:06 +03:00
return - EINVAL ;
}
2020-03-17 06:55:23 +03:00
if ( unlikely ( index - > buf > = table - > num_hem ) ) {
2020-12-11 04:37:36 +03:00
ibdev_err ( ibdev , " table %u exceed hem limt idx %llu, max %lu! \n " ,
2020-03-17 06:55:23 +03:00
table - > type , index - > buf , table - > num_hem ) ;
2019-08-09 12:41:02 +03:00
return - EINVAL ;
}
2020-03-17 06:55:23 +03:00
return 0 ;
}
2017-08-30 12:23:06 +03:00
2020-03-17 06:55:23 +03:00
static void free_mhop_hem ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table ,
struct hns_roce_hem_mhop * mhop ,
struct hns_roce_hem_index * index )
{
u32 bt_size = mhop - > bt_chunk_size ;
struct device * dev = hr_dev - > dev ;
if ( index - > inited & HEM_INDEX_BUF ) {
hns_roce_free_hem ( hr_dev , table - > hem [ index - > buf ] ) ;
table - > hem [ index - > buf ] = NULL ;
}
if ( index - > inited & HEM_INDEX_L1 ) {
dma_free_coherent ( dev , bt_size , table - > bt_l1 [ index - > l1 ] ,
table - > bt_l1_dma_addr [ index - > l1 ] ) ;
table - > bt_l1 [ index - > l1 ] = NULL ;
}
if ( index - > inited & HEM_INDEX_L0 ) {
dma_free_coherent ( dev , bt_size , table - > bt_l0 [ index - > l0 ] ,
table - > bt_l0_dma_addr [ index - > l0 ] ) ;
table - > bt_l0 [ index - > l0 ] = NULL ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
}
static int alloc_mhop_hem ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table ,
struct hns_roce_hem_mhop * mhop ,
struct hns_roce_hem_index * index )
{
u32 bt_size = mhop - > bt_chunk_size ;
struct device * dev = hr_dev - > dev ;
struct hns_roce_hem_iter iter ;
gfp_t flag ;
u64 bt_ba ;
u32 size ;
int ret ;
2017-08-30 12:23:06 +03:00
/* alloc L1 BA's chunk */
2020-03-17 06:55:23 +03:00
if ( ( check_whether_bt_num_3 ( table - > type , mhop - > hop_num ) | |
check_whether_bt_num_2 ( table - > type , mhop - > hop_num ) ) & &
! table - > bt_l0 [ index - > l0 ] ) {
table - > bt_l0 [ index - > l0 ] = dma_alloc_coherent ( dev , bt_size ,
& table - > bt_l0_dma_addr [ index - > l0 ] ,
2017-08-30 12:23:06 +03:00
GFP_KERNEL ) ;
2020-03-17 06:55:23 +03:00
if ( ! table - > bt_l0 [ index - > l0 ] ) {
2017-08-30 12:23:06 +03:00
ret = - ENOMEM ;
goto out ;
}
2020-03-17 06:55:23 +03:00
index - > inited | = HEM_INDEX_L0 ;
2017-08-30 12:23:06 +03:00
}
/* alloc L2 BA's chunk */
2020-03-17 06:55:23 +03:00
if ( check_whether_bt_num_3 ( table - > type , mhop - > hop_num ) & &
! table - > bt_l1 [ index - > l1 ] ) {
table - > bt_l1 [ index - > l1 ] = dma_alloc_coherent ( dev , bt_size ,
& table - > bt_l1_dma_addr [ index - > l1 ] ,
2017-08-30 12:23:06 +03:00
GFP_KERNEL ) ;
2020-03-17 06:55:23 +03:00
if ( ! table - > bt_l1 [ index - > l1 ] ) {
2017-08-30 12:23:06 +03:00
ret = - ENOMEM ;
2020-03-17 06:55:23 +03:00
goto err_alloc_hem ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
index - > inited | = HEM_INDEX_L1 ;
* ( table - > bt_l0 [ index - > l0 ] + mhop - > l1_idx ) =
table - > bt_l1_dma_addr [ index - > l1 ] ;
2017-08-30 12:23:06 +03:00
}
2017-08-30 12:23:08 +03:00
/*
2018-12-18 16:21:53 +03:00
* alloc buffer space chunk for QPC / MTPT / CQC / SRQC / SCCC .
2017-08-30 12:23:08 +03:00
* alloc bt space chunk for MTT / CQE .
*/
2020-03-17 06:55:23 +03:00
size = table - > type < HEM_TYPE_MTT ? mhop - > buf_chunk_size : bt_size ;
2022-09-22 15:33:09 +03:00
flag = GFP_KERNEL | __GFP_NOWARN ;
2020-03-17 06:55:23 +03:00
table - > hem [ index - > buf ] = hns_roce_alloc_hem ( hr_dev , size > > PAGE_SHIFT ,
size , flag ) ;
if ( ! table - > hem [ index - > buf ] ) {
2017-08-30 12:23:06 +03:00
ret = - ENOMEM ;
2020-03-17 06:55:23 +03:00
goto err_alloc_hem ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
index - > inited | = HEM_INDEX_BUF ;
hns_roce_hem_first ( table - > hem [ index - > buf ] , & iter ) ;
2017-08-30 12:23:06 +03:00
bt_ba = hns_roce_hem_addr ( & iter ) ;
if ( table - > type < HEM_TYPE_MTT ) {
2020-03-17 06:55:23 +03:00
if ( mhop - > hop_num = = 2 )
* ( table - > bt_l1 [ index - > l1 ] + mhop - > l2_idx ) = bt_ba ;
else if ( mhop - > hop_num = = 1 )
* ( table - > bt_l0 [ index - > l0 ] + mhop - > l1_idx ) = bt_ba ;
} else if ( mhop - > hop_num = = 2 ) {
* ( table - > bt_l0 [ index - > l0 ] + mhop - > l1_idx ) = bt_ba ;
}
return 0 ;
err_alloc_hem :
free_mhop_hem ( hr_dev , table , mhop , index ) ;
out :
return ret ;
}
static int set_mhop_hem ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long obj ,
struct hns_roce_hem_mhop * mhop ,
struct hns_roce_hem_index * index )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
2022-03-02 09:48:25 +03:00
u32 step_idx ;
2020-03-28 05:35:39 +03:00
int ret = 0 ;
2020-03-17 06:55:23 +03:00
if ( index - > inited & HEM_INDEX_L0 ) {
ret = hr_dev - > hw - > set_hem ( hr_dev , table , obj , 0 ) ;
if ( ret ) {
ibdev_err ( ibdev , " set HEM step 0 failed! \n " ) ;
goto out ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
}
2017-08-30 12:23:06 +03:00
2020-03-17 06:55:23 +03:00
if ( index - > inited & HEM_INDEX_L1 ) {
ret = hr_dev - > hw - > set_hem ( hr_dev , table , obj , 1 ) ;
if ( ret ) {
ibdev_err ( ibdev , " set HEM step 1 failed! \n " ) ;
goto out ;
2017-08-30 12:23:06 +03:00
}
}
2020-03-17 06:55:23 +03:00
if ( index - > inited & HEM_INDEX_BUF ) {
if ( mhop - > hop_num = = HNS_ROCE_HOP_NUM_0 )
step_idx = 0 ;
else
step_idx = mhop - > hop_num ;
ret = hr_dev - > hw - > set_hem ( hr_dev , table , obj , step_idx ) ;
if ( ret )
ibdev_err ( ibdev , " set HEM step last failed! \n " ) ;
}
out :
return ret ;
}
static int hns_roce_table_mhop_get ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table ,
unsigned long obj )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_hem_index index = { } ;
struct hns_roce_hem_mhop mhop = { } ;
int ret ;
2017-08-30 12:23:06 +03:00
2020-03-17 06:55:23 +03:00
ret = calc_hem_config ( hr_dev , table , obj , & mhop , & index ) ;
if ( ret ) {
ibdev_err ( ibdev , " calc hem config failed! \n " ) ;
return ret ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
mutex_lock ( & table - > mutex ) ;
if ( table - > hem [ index . buf ] ) {
2021-05-21 12:29:54 +03:00
refcount_inc ( & table - > hem [ index . buf ] - > refcount ) ;
2020-03-17 06:55:23 +03:00
goto out ;
2017-08-30 12:23:06 +03:00
}
2020-03-17 06:55:23 +03:00
ret = alloc_mhop_hem ( hr_dev , table , & mhop , & index ) ;
if ( ret ) {
ibdev_err ( ibdev , " alloc mhop hem failed! \n " ) ;
goto out ;
}
/* set HEM base address to hardware */
if ( table - > type < HEM_TYPE_MTT ) {
ret = set_mhop_hem ( hr_dev , table , obj , & mhop , & index ) ;
if ( ret ) {
ibdev_err ( ibdev , " set HEM address to HW failed! \n " ) ;
goto err_alloc ;
}
}
2021-05-21 12:29:54 +03:00
refcount_set ( & table - > hem [ index . buf ] - > refcount , 1 ) ;
2020-03-17 06:55:23 +03:00
goto out ;
err_alloc :
free_mhop_hem ( hr_dev , table , & mhop , & index ) ;
2017-08-30 12:23:06 +03:00
out :
mutex_unlock ( & table - > mutex ) ;
return ret ;
}
2016-07-21 14:06:38 +03:00
int hns_roce_table_get ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long obj )
{
2017-08-30 12:23:02 +03:00
struct device * dev = hr_dev - > dev ;
2016-07-21 14:06:38 +03:00
unsigned long i ;
2020-12-11 04:37:33 +03:00
int ret = 0 ;
2016-07-21 14:06:38 +03:00
2017-08-30 12:23:06 +03:00
if ( hns_roce_check_whether_mhop ( hr_dev , table - > type ) )
return hns_roce_table_mhop_get ( hr_dev , table , obj ) ;
2021-06-01 12:57:07 +03:00
i = obj / ( table - > table_chunk_size / table - > obj_size ) ;
2016-07-21 14:06:38 +03:00
mutex_lock ( & table - > mutex ) ;
if ( table - > hem [ i ] ) {
2021-05-21 12:29:54 +03:00
refcount_inc ( & table - > hem [ i ] - > refcount ) ;
2016-07-21 14:06:38 +03:00
goto out ;
}
table - > hem [ i ] = hns_roce_alloc_hem ( hr_dev ,
2017-10-18 12:32:45 +03:00
table - > table_chunk_size > > PAGE_SHIFT ,
table - > table_chunk_size ,
2022-09-22 15:33:09 +03:00
GFP_KERNEL | __GFP_NOWARN ) ;
2016-07-21 14:06:38 +03:00
if ( ! table - > hem [ i ] ) {
ret = - ENOMEM ;
goto out ;
}
/* Set HEM base address(128K/page, pa) to Hardware */
2023-05-23 15:16:40 +03:00
ret = hr_dev - > hw - > set_hem ( hr_dev , table , obj , HEM_HOP_STEP_DIRECT ) ;
if ( ret ) {
2017-08-30 12:23:16 +03:00
hns_roce_free_hem ( hr_dev , table - > hem [ i ] ) ;
table - > hem [ i ] = NULL ;
2023-05-23 15:16:40 +03:00
dev_err ( dev , " set HEM base address to HW failed, ret = %d. \n " ,
ret ) ;
2016-07-21 14:06:38 +03:00
goto out ;
}
2021-05-21 12:29:54 +03:00
refcount_set ( & table - > hem [ i ] - > refcount , 1 ) ;
2016-07-21 14:06:38 +03:00
out :
mutex_unlock ( & table - > mutex ) ;
return ret ;
}
2020-03-17 06:55:24 +03:00
static void clear_mhop_hem ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long obj ,
struct hns_roce_hem_mhop * mhop ,
struct hns_roce_hem_index * index )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
u32 hop_num = mhop - > hop_num ;
u32 chunk_ba_num ;
2022-03-02 09:48:25 +03:00
u32 step_idx ;
2023-05-23 15:16:41 +03:00
int ret ;
2020-03-17 06:55:24 +03:00
index - > inited = HEM_INDEX_BUF ;
chunk_ba_num = mhop - > bt_chunk_size / BA_BYTE_LEN ;
if ( check_whether_bt_num_2 ( table - > type , hop_num ) ) {
if ( hns_roce_check_hem_null ( table - > hem , index - > buf ,
chunk_ba_num , table - > num_hem ) )
index - > inited | = HEM_INDEX_L0 ;
} else if ( check_whether_bt_num_3 ( table - > type , hop_num ) ) {
if ( hns_roce_check_hem_null ( table - > hem , index - > buf ,
chunk_ba_num , table - > num_hem ) ) {
index - > inited | = HEM_INDEX_L1 ;
if ( hns_roce_check_bt_null ( table - > bt_l1 , index - > l1 ,
chunk_ba_num ) )
index - > inited | = HEM_INDEX_L0 ;
}
}
if ( table - > type < HEM_TYPE_MTT ) {
if ( hop_num = = HNS_ROCE_HOP_NUM_0 )
step_idx = 0 ;
else
step_idx = hop_num ;
2023-05-23 15:16:41 +03:00
ret = hr_dev - > hw - > clear_hem ( hr_dev , table , obj , step_idx ) ;
if ( ret )
ibdev_warn ( ibdev , " failed to clear hop%u HEM, ret = %d. \n " ,
hop_num , ret ) ;
if ( index - > inited & HEM_INDEX_L1 ) {
ret = hr_dev - > hw - > clear_hem ( hr_dev , table , obj , 1 ) ;
if ( ret )
ibdev_warn ( ibdev , " failed to clear HEM step 1, ret = %d. \n " ,
ret ) ;
}
2020-03-17 06:55:24 +03:00
2023-05-23 15:16:41 +03:00
if ( index - > inited & HEM_INDEX_L0 ) {
ret = hr_dev - > hw - > clear_hem ( hr_dev , table , obj , 0 ) ;
if ( ret )
ibdev_warn ( ibdev , " failed to clear HEM step 0, ret = %d. \n " ,
ret ) ;
}
2020-03-17 06:55:24 +03:00
}
}
2017-09-29 16:16:01 +03:00
static void hns_roce_table_mhop_put ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table ,
unsigned long obj ,
int check_refcount )
2017-08-30 12:23:06 +03:00
{
2020-03-17 06:55:24 +03:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_hem_index index = { } ;
struct hns_roce_hem_mhop mhop = { } ;
2017-08-30 12:23:06 +03:00
int ret ;
2020-03-17 06:55:24 +03:00
ret = calc_hem_config ( hr_dev , table , obj , & mhop , & index ) ;
if ( ret ) {
ibdev_err ( ibdev , " calc hem config failed! \n " ) ;
2017-08-30 12:23:06 +03:00
return ;
}
2021-05-21 12:29:54 +03:00
if ( ! check_refcount )
mutex_lock ( & table - > mutex ) ;
else if ( ! refcount_dec_and_mutex_lock ( & table - > hem [ index . buf ] - > refcount ,
& table - > mutex ) )
2017-08-30 12:23:06 +03:00
return ;
2020-03-17 06:55:24 +03:00
clear_mhop_hem ( hr_dev , table , obj , & mhop , & index ) ;
free_mhop_hem ( hr_dev , table , & mhop , & index ) ;
2017-08-30 12:23:06 +03:00
mutex_unlock ( & table - > mutex ) ;
}
2016-07-21 14:06:38 +03:00
void hns_roce_table_put ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , unsigned long obj )
{
2017-08-30 12:23:02 +03:00
struct device * dev = hr_dev - > dev ;
2016-07-21 14:06:38 +03:00
unsigned long i ;
2023-05-23 15:16:41 +03:00
int ret ;
2016-07-21 14:06:38 +03:00
2017-08-30 12:23:06 +03:00
if ( hns_roce_check_whether_mhop ( hr_dev , table - > type ) ) {
hns_roce_table_mhop_put ( hr_dev , table , obj , 1 ) ;
return ;
}
2021-06-01 12:57:07 +03:00
i = obj / ( table - > table_chunk_size / table - > obj_size ) ;
2016-07-21 14:06:38 +03:00
2021-05-21 12:29:54 +03:00
if ( ! refcount_dec_and_mutex_lock ( & table - > hem [ i ] - > refcount ,
& table - > mutex ) )
return ;
2016-07-21 14:06:38 +03:00
2023-05-23 15:16:41 +03:00
ret = hr_dev - > hw - > clear_hem ( hr_dev , table , obj , HEM_HOP_STEP_DIRECT ) ;
if ( ret )
dev_warn ( dev , " failed to clear HEM base address, ret = %d. \n " ,
ret ) ;
2016-07-21 14:06:38 +03:00
2021-05-21 12:29:54 +03:00
hns_roce_free_hem ( hr_dev , table - > hem [ i ] ) ;
table - > hem [ i ] = NULL ;
2016-07-21 14:06:38 +03:00
mutex_unlock ( & table - > mutex ) ;
}
2017-08-30 12:23:08 +03:00
void * hns_roce_table_find ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table ,
unsigned long obj , dma_addr_t * dma_handle )
2016-07-21 14:06:38 +03:00
{
struct hns_roce_hem_chunk * chunk ;
2017-08-30 12:23:08 +03:00
struct hns_roce_hem_mhop mhop ;
2016-07-21 14:06:38 +03:00
struct hns_roce_hem * hem ;
2017-08-30 12:23:08 +03:00
unsigned long mhop_obj = obj ;
2017-11-10 11:55:52 +03:00
unsigned long obj_per_chunk ;
unsigned long idx_offset ;
2017-08-30 12:23:08 +03:00
int offset , dma_offset ;
2020-12-11 04:37:33 +03:00
void * addr = NULL ;
u32 hem_idx = 0 ;
2017-11-28 10:10:28 +03:00
int length ;
2017-08-30 12:23:08 +03:00
int i , j ;
2016-07-21 14:06:38 +03:00
mutex_lock ( & table - > mutex ) ;
2017-08-30 12:23:08 +03:00
if ( ! hns_roce_check_whether_mhop ( hr_dev , table - > type ) ) {
2017-11-10 11:55:52 +03:00
obj_per_chunk = table - > table_chunk_size / table - > obj_size ;
2021-06-01 12:57:07 +03:00
hem = table - > hem [ obj / obj_per_chunk ] ;
idx_offset = obj % obj_per_chunk ;
2017-11-10 11:55:52 +03:00
dma_offset = offset = idx_offset * table - > obj_size ;
2017-08-30 12:23:08 +03:00
} else {
2019-04-07 08:23:38 +03:00
u32 seg_size = 64 ; /* 8 bytes per BA and 8 BA per segment */
2019-08-08 17:53:47 +03:00
if ( hns_roce_calc_hem_mhop ( hr_dev , table , & mhop_obj , & mhop ) )
goto out ;
2017-08-30 12:23:08 +03:00
/* mtt mhop */
i = mhop . l0_idx ;
j = mhop . l1_idx ;
if ( mhop . hop_num = = 2 )
2019-05-24 18:29:36 +03:00
hem_idx = i * ( mhop . bt_chunk_size / BA_BYTE_LEN ) + j ;
2017-08-30 12:23:08 +03:00
else if ( mhop . hop_num = = 1 | |
mhop . hop_num = = HNS_ROCE_HOP_NUM_0 )
hem_idx = i ;
hem = table - > hem [ hem_idx ] ;
2021-06-01 12:57:07 +03:00
dma_offset = offset = obj * seg_size % mhop . bt_chunk_size ;
2017-08-30 12:23:08 +03:00
if ( mhop . hop_num = = 2 )
dma_offset = offset = 0 ;
}
2016-07-21 14:06:38 +03:00
if ( ! hem )
goto out ;
list_for_each_entry ( chunk , & hem - > chunk_list , list ) {
for ( i = 0 ; i < chunk - > npages ; + + i ) {
2017-11-28 10:10:28 +03:00
length = sg_dma_len ( & chunk - > mem [ i ] ) ;
2016-07-21 14:06:38 +03:00
if ( dma_handle & & dma_offset > = 0 ) {
2017-11-28 10:10:28 +03:00
if ( length > ( u32 ) dma_offset )
2016-07-21 14:06:38 +03:00
* dma_handle = sg_dma_address (
& chunk - > mem [ i ] ) + dma_offset ;
2017-11-28 10:10:28 +03:00
dma_offset - = length ;
2016-07-21 14:06:38 +03:00
}
2017-11-28 10:10:28 +03:00
if ( length > ( u32 ) offset ) {
addr = chunk - > buf [ i ] + offset ;
2016-07-21 14:06:38 +03:00
goto out ;
}
2017-11-28 10:10:28 +03:00
offset - = length ;
2016-07-21 14:06:38 +03:00
}
}
out :
mutex_unlock ( & table - > mutex ) ;
2017-11-28 10:10:28 +03:00
return addr ;
2016-07-21 14:06:38 +03:00
}
int hns_roce_init_hem_table ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table , u32 type ,
2022-09-22 15:33:09 +03:00
unsigned long obj_size , unsigned long nobj )
2016-07-21 14:06:38 +03:00
{
unsigned long obj_per_chunk ;
unsigned long num_hem ;
2017-08-30 12:23:06 +03:00
if ( ! hns_roce_check_whether_mhop ( hr_dev , type ) ) {
2017-10-18 12:32:45 +03:00
table - > table_chunk_size = hr_dev - > caps . chunk_sz ;
obj_per_chunk = table - > table_chunk_size / obj_size ;
2021-06-01 12:57:07 +03:00
num_hem = DIV_ROUND_UP ( nobj , obj_per_chunk ) ;
2017-08-30 12:23:06 +03:00
table - > hem = kcalloc ( num_hem , sizeof ( * table - > hem ) , GFP_KERNEL ) ;
if ( ! table - > hem )
return - ENOMEM ;
} else {
2019-07-08 16:41:24 +03:00
struct hns_roce_hem_mhop mhop = { } ;
2017-08-30 12:23:06 +03:00
unsigned long buf_chunk_size ;
unsigned long bt_chunk_size ;
unsigned long bt_chunk_num ;
2020-12-11 04:37:34 +03:00
unsigned long num_bt_l0 ;
2017-08-30 12:23:06 +03:00
u32 hop_num ;
2019-07-08 16:41:24 +03:00
if ( get_hem_table_config ( hr_dev , & mhop , type ) )
2017-08-30 12:23:06 +03:00
return - EINVAL ;
2019-07-08 16:41:24 +03:00
buf_chunk_size = mhop . buf_chunk_size ;
bt_chunk_size = mhop . bt_chunk_size ;
num_bt_l0 = mhop . ba_l0_num ;
hop_num = mhop . hop_num ;
2017-08-30 12:23:06 +03:00
obj_per_chunk = buf_chunk_size / obj_size ;
2021-06-01 12:57:07 +03:00
num_hem = DIV_ROUND_UP ( nobj , obj_per_chunk ) ;
2019-05-24 18:29:36 +03:00
bt_chunk_num = bt_chunk_size / BA_BYTE_LEN ;
2021-06-01 12:57:07 +03:00
2018-04-26 09:46:15 +03:00
if ( type > = HEM_TYPE_MTT )
2017-08-30 12:23:08 +03:00
num_bt_l0 = bt_chunk_num ;
2017-08-30 12:23:06 +03:00
table - > hem = kcalloc ( num_hem , sizeof ( * table - > hem ) ,
GFP_KERNEL ) ;
if ( ! table - > hem )
goto err_kcalloc_hem_buf ;
2018-04-26 09:46:15 +03:00
if ( check_whether_bt_num_3 ( type , hop_num ) ) {
2017-08-30 12:23:06 +03:00
unsigned long num_bt_l1 ;
2021-06-01 12:57:07 +03:00
num_bt_l1 = DIV_ROUND_UP ( num_hem , bt_chunk_num ) ;
2017-08-30 12:23:06 +03:00
table - > bt_l1 = kcalloc ( num_bt_l1 ,
sizeof ( * table - > bt_l1 ) ,
GFP_KERNEL ) ;
if ( ! table - > bt_l1 )
goto err_kcalloc_bt_l1 ;
table - > bt_l1_dma_addr = kcalloc ( num_bt_l1 ,
sizeof ( * table - > bt_l1_dma_addr ) ,
GFP_KERNEL ) ;
if ( ! table - > bt_l1_dma_addr )
goto err_kcalloc_l1_dma ;
}
2016-07-21 14:06:38 +03:00
2018-04-26 09:46:15 +03:00
if ( check_whether_bt_num_2 ( type , hop_num ) | |
check_whether_bt_num_3 ( type , hop_num ) ) {
2017-08-30 12:23:06 +03:00
table - > bt_l0 = kcalloc ( num_bt_l0 , sizeof ( * table - > bt_l0 ) ,
GFP_KERNEL ) ;
if ( ! table - > bt_l0 )
goto err_kcalloc_bt_l0 ;
table - > bt_l0_dma_addr = kcalloc ( num_bt_l0 ,
sizeof ( * table - > bt_l0_dma_addr ) ,
GFP_KERNEL ) ;
if ( ! table - > bt_l0_dma_addr )
goto err_kcalloc_l0_dma ;
}
}
2016-07-21 14:06:38 +03:00
table - > type = type ;
table - > num_hem = num_hem ;
table - > obj_size = obj_size ;
mutex_init ( & table - > mutex ) ;
return 0 ;
2017-08-30 12:23:06 +03:00
err_kcalloc_l0_dma :
kfree ( table - > bt_l0 ) ;
table - > bt_l0 = NULL ;
err_kcalloc_bt_l0 :
kfree ( table - > bt_l1_dma_addr ) ;
table - > bt_l1_dma_addr = NULL ;
err_kcalloc_l1_dma :
kfree ( table - > bt_l1 ) ;
table - > bt_l1 = NULL ;
err_kcalloc_bt_l1 :
kfree ( table - > hem ) ;
table - > hem = NULL ;
err_kcalloc_hem_buf :
return - ENOMEM ;
}
2017-09-29 16:16:01 +03:00
static void hns_roce_cleanup_mhop_hem_table ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table )
2017-08-30 12:23:06 +03:00
{
struct hns_roce_hem_mhop mhop ;
u32 buf_chunk_size ;
u64 obj ;
2020-12-11 04:37:33 +03:00
int i ;
2017-08-30 12:23:06 +03:00
2019-08-08 17:53:47 +03:00
if ( hns_roce_calc_hem_mhop ( hr_dev , table , NULL , & mhop ) )
return ;
2017-08-30 12:23:08 +03:00
buf_chunk_size = table - > type < HEM_TYPE_MTT ? mhop . buf_chunk_size :
mhop . bt_chunk_size ;
2017-08-30 12:23:06 +03:00
for ( i = 0 ; i < table - > num_hem ; + + i ) {
obj = i * buf_chunk_size / table - > obj_size ;
if ( table - > hem [ i ] )
hns_roce_table_mhop_put ( hr_dev , table , obj , 0 ) ;
}
kfree ( table - > hem ) ;
table - > hem = NULL ;
kfree ( table - > bt_l1 ) ;
table - > bt_l1 = NULL ;
kfree ( table - > bt_l1_dma_addr ) ;
table - > bt_l1_dma_addr = NULL ;
kfree ( table - > bt_l0 ) ;
table - > bt_l0 = NULL ;
kfree ( table - > bt_l0_dma_addr ) ;
table - > bt_l0_dma_addr = NULL ;
2016-07-21 14:06:38 +03:00
}
void hns_roce_cleanup_hem_table ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_table * table )
{
2017-08-30 12:23:02 +03:00
struct device * dev = hr_dev - > dev ;
2016-07-21 14:06:38 +03:00
unsigned long i ;
2023-05-23 15:16:41 +03:00
int obj ;
int ret ;
2016-07-21 14:06:38 +03:00
2017-08-30 12:23:06 +03:00
if ( hns_roce_check_whether_mhop ( hr_dev , table - > type ) ) {
hns_roce_cleanup_mhop_hem_table ( hr_dev , table ) ;
return ;
}
2016-07-21 14:06:38 +03:00
for ( i = 0 ; i < table - > num_hem ; + + i )
if ( table - > hem [ i ] ) {
2023-05-23 15:16:41 +03:00
obj = i * table - > table_chunk_size / table - > obj_size ;
ret = hr_dev - > hw - > clear_hem ( hr_dev , table , obj , 0 ) ;
if ( ret )
dev_err ( dev , " clear HEM base address failed, ret = %d. \n " ,
ret ) ;
2016-07-21 14:06:38 +03:00
hns_roce_free_hem ( hr_dev , table - > hem [ i ] ) ;
}
kfree ( table - > hem ) ;
}
void hns_roce_cleanup_hem ( struct hns_roce_dev * hr_dev )
{
2020-12-11 04:37:28 +03:00
if ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_SRQ )
2018-11-24 11:49:20 +03:00
hns_roce_cleanup_hem_table ( hr_dev ,
& hr_dev - > srq_table . table ) ;
2016-07-21 14:06:38 +03:00
hns_roce_cleanup_hem_table ( hr_dev , & hr_dev - > cq_table . table ) ;
2018-12-18 16:21:55 +03:00
if ( hr_dev - > caps . qpc_timer_entry_sz )
hns_roce_cleanup_hem_table ( hr_dev ,
& hr_dev - > qpc_timer_table ) ;
if ( hr_dev - > caps . cqc_timer_entry_sz )
hns_roce_cleanup_hem_table ( hr_dev ,
& hr_dev - > cqc_timer_table ) ;
2020-12-11 04:37:28 +03:00
if ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL )
2018-12-18 16:21:53 +03:00
hns_roce_cleanup_hem_table ( hr_dev ,
& hr_dev - > qp_table . sccc_table ) ;
2017-11-10 11:55:44 +03:00
if ( hr_dev - > caps . trrl_entry_sz )
hns_roce_cleanup_hem_table ( hr_dev ,
& hr_dev - > qp_table . trrl_table ) ;
2020-10-24 06:07:15 +03:00
if ( hr_dev - > caps . gmv_entry_sz )
hns_roce_cleanup_hem_table ( hr_dev , & hr_dev - > gmv_table ) ;
2018-04-26 09:46:21 +03:00
hns_roce_cleanup_hem_table ( hr_dev , & hr_dev - > qp_table . irrl_table ) ;
2016-07-21 14:06:38 +03:00
hns_roce_cleanup_hem_table ( hr_dev , & hr_dev - > qp_table . qp_table ) ;
hns_roce_cleanup_hem_table ( hr_dev , & hr_dev - > mr_table . mtpt_table ) ;
}
2019-06-08 09:46:08 +03:00
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item {
2019-06-08 09:46:08 +03:00
struct list_head list ; /* link all hems in the same bt level */
struct list_head sibling ; /* link all hems in last hop for mtt */
void * addr ;
dma_addr_t dma_addr ;
size_t count ; /* max ba numbers */
int start ; /* start buf offset in this hem */
int end ; /* end buf offset in this hem */
} ;
2021-05-21 12:29:52 +03:00
/* All HEM items are linked in a tree structure */
struct hns_roce_hem_head {
struct list_head branch [ HNS_ROCE_MAX_BT_REGION ] ;
struct list_head root ;
struct list_head leaf ;
} ;
static struct hns_roce_hem_item *
hem_list_alloc_item ( struct hns_roce_dev * hr_dev , int start , int end , int count ,
2022-09-22 15:33:08 +03:00
bool exist_bt )
2019-06-08 09:46:08 +03:00
{
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem ;
2019-06-08 09:46:08 +03:00
hem = kzalloc ( sizeof ( * hem ) , GFP_KERNEL ) ;
if ( ! hem )
return NULL ;
if ( exist_bt ) {
2021-02-05 12:39:25 +03:00
hem - > addr = dma_alloc_coherent ( hr_dev - > dev , count * BA_BYTE_LEN ,
& hem - > dma_addr , GFP_KERNEL ) ;
2019-06-08 09:46:08 +03:00
if ( ! hem - > addr ) {
kfree ( hem ) ;
return NULL ;
}
}
hem - > count = count ;
hem - > start = start ;
hem - > end = end ;
INIT_LIST_HEAD ( & hem - > list ) ;
INIT_LIST_HEAD ( & hem - > sibling ) ;
return hem ;
}
static void hem_list_free_item ( struct hns_roce_dev * hr_dev ,
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem , bool exist_bt )
2019-06-08 09:46:08 +03:00
{
if ( exist_bt )
dma_free_coherent ( hr_dev - > dev , hem - > count * BA_BYTE_LEN ,
hem - > addr , hem - > dma_addr ) ;
kfree ( hem ) ;
}
static void hem_list_free_all ( struct hns_roce_dev * hr_dev ,
struct list_head * head , bool exist_bt )
{
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem , * temp_hem ;
2019-06-08 09:46:08 +03:00
list_for_each_entry_safe ( hem , temp_hem , head , list ) {
list_del ( & hem - > list ) ;
hem_list_free_item ( hr_dev , hem , exist_bt ) ;
}
}
static void hem_list_link_bt ( struct hns_roce_dev * hr_dev , void * base_addr ,
u64 table_addr )
{
* ( u64 * ) ( base_addr ) = table_addr ;
}
/* assign L0 table address to hem from root bt */
static void hem_list_assign_bt ( struct hns_roce_dev * hr_dev ,
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem , void * cpu_addr ,
2019-06-08 09:46:08 +03:00
u64 phy_addr )
{
hem - > addr = cpu_addr ;
hem - > dma_addr = ( dma_addr_t ) phy_addr ;
}
2021-05-21 12:29:52 +03:00
static inline bool hem_list_page_is_in_range ( struct hns_roce_hem_item * hem ,
2019-06-08 09:46:08 +03:00
int offset )
{
return ( hem - > start < = offset & & offset < = hem - > end ) ;
}
2021-05-21 12:29:52 +03:00
static struct hns_roce_hem_item * hem_list_search_item ( struct list_head * ba_list ,
int page_offset )
2019-06-08 09:46:08 +03:00
{
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem , * temp_hem ;
struct hns_roce_hem_item * found = NULL ;
2019-06-08 09:46:08 +03:00
list_for_each_entry_safe ( hem , temp_hem , ba_list , list ) {
if ( hem_list_page_is_in_range ( hem , page_offset ) ) {
found = hem ;
break ;
}
}
return found ;
}
static bool hem_list_is_bottom_bt ( int hopnum , int bt_level )
{
/*
* hopnum base address table levels
* 0 L0 ( buf )
* 1 L0 - > buf
* 2 L0 - > L1 - > buf
* 3 L0 - > L1 - > L2 - > buf
*/
return bt_level > = ( hopnum ? hopnum - 1 : hopnum ) ;
}
2021-06-16 09:09:47 +03:00
/*
2019-06-08 09:46:08 +03:00
* calc base address entries num
* @ hopnum : num of mutihop addressing
* @ bt_level : base address table level
* @ unit : ba entries per bt page
*/
static u32 hem_list_calc_ba_range ( int hopnum , int bt_level , int unit )
{
u32 step ;
int max ;
int i ;
if ( hopnum < = bt_level )
return 0 ;
/*
* hopnum bt_level range
* 1 0 unit
* - - - - - - - - - - - -
* 2 0 unit * unit
* 2 1 unit
* - - - - - - - - - - - -
* 3 0 unit * unit * unit
* 3 1 unit * unit
* 3 2 unit
*/
step = 1 ;
max = hopnum - bt_level ;
for ( i = 0 ; i < max ; i + + )
step = step * unit ;
return step ;
}
2021-06-16 09:09:47 +03:00
/*
2019-06-08 09:46:08 +03:00
* calc the root ba entries which could cover all regions
* @ regions : buf region array
* @ region_cnt : array size of @ regions
* @ unit : ba entries per bt page
*/
int hns_roce_hem_list_calc_root_ba ( const struct hns_roce_buf_region * regions ,
int region_cnt , int unit )
{
struct hns_roce_buf_region * r ;
int total = 0 ;
int step ;
int i ;
for ( i = 0 ; i < region_cnt ; i + + ) {
r = ( struct hns_roce_buf_region * ) & regions [ i ] ;
if ( r - > hopnum > 1 ) {
step = hem_list_calc_ba_range ( r - > hopnum , 1 , unit ) ;
if ( step > 0 )
total + = ( r - > count + step - 1 ) / step ;
} else {
total + = r - > count ;
}
}
return total ;
}
static int hem_list_alloc_mid_bt ( struct hns_roce_dev * hr_dev ,
const struct hns_roce_buf_region * r , int unit ,
int offset , struct list_head * mid_bt ,
struct list_head * btm_bt )
{
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem_ptrs [ HNS_ROCE_MAX_BT_LEVEL ] = { NULL } ;
2019-06-08 09:46:08 +03:00
struct list_head temp_list [ HNS_ROCE_MAX_BT_LEVEL ] ;
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * cur , * pre ;
2019-06-08 09:46:08 +03:00
const int hopnum = r - > hopnum ;
int start_aligned ;
int distance ;
int ret = 0 ;
int max_ofs ;
int level ;
u32 step ;
int end ;
if ( hopnum < = 1 )
return 0 ;
if ( hopnum > HNS_ROCE_MAX_BT_LEVEL ) {
dev_err ( hr_dev - > dev , " invalid hopnum %d! \n " , hopnum ) ;
return - EINVAL ;
}
if ( offset < r - > offset ) {
2020-12-11 04:37:36 +03:00
dev_err ( hr_dev - > dev , " invalid offset %d, min %u! \n " ,
2019-06-08 09:46:08 +03:00
offset , r - > offset ) ;
return - EINVAL ;
}
distance = offset - r - > offset ;
max_ofs = r - > offset + r - > count - 1 ;
for ( level = 0 ; level < hopnum ; level + + )
INIT_LIST_HEAD ( & temp_list [ level ] ) ;
/* config L1 bt to last bt and link them to corresponding parent */
for ( level = 1 ; level < hopnum ; level + + ) {
cur = hem_list_search_item ( & mid_bt [ level ] , offset ) ;
if ( cur ) {
hem_ptrs [ level ] = cur ;
continue ;
}
step = hem_list_calc_ba_range ( hopnum , level , unit ) ;
if ( step < 1 ) {
ret = - EINVAL ;
goto err_exit ;
}
start_aligned = ( distance / step ) * step + r - > offset ;
end = min_t ( int , start_aligned + step - 1 , max_ofs ) ;
cur = hem_list_alloc_item ( hr_dev , start_aligned , end , unit ,
2022-09-22 15:33:08 +03:00
true ) ;
2019-06-08 09:46:08 +03:00
if ( ! cur ) {
ret = - ENOMEM ;
goto err_exit ;
}
hem_ptrs [ level ] = cur ;
list_add ( & cur - > list , & temp_list [ level ] ) ;
if ( hem_list_is_bottom_bt ( hopnum , level ) )
list_add ( & cur - > sibling , & temp_list [ 0 ] ) ;
/* link bt to parent bt */
if ( level > 1 ) {
pre = hem_ptrs [ level - 1 ] ;
step = ( cur - > start - pre - > start ) / step * BA_BYTE_LEN ;
hem_list_link_bt ( hr_dev , pre - > addr + step ,
cur - > dma_addr ) ;
}
}
list_splice ( & temp_list [ 0 ] , btm_bt ) ;
for ( level = 1 ; level < hopnum ; level + + )
list_splice ( & temp_list [ level ] , & mid_bt [ level ] ) ;
return 0 ;
err_exit :
for ( level = 1 ; level < hopnum ; level + + )
hem_list_free_all ( hr_dev , & temp_list [ level ] , true ) ;
return ret ;
}
2021-05-21 12:29:52 +03:00
static struct hns_roce_hem_item *
alloc_root_hem ( struct hns_roce_dev * hr_dev , int unit , int * max_ba_num ,
const struct hns_roce_buf_region * regions , int region_cnt )
2019-06-08 09:46:08 +03:00
{
const struct hns_roce_buf_region * r ;
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem ;
2020-03-10 14:18:03 +03:00
int ba_num ;
2019-06-08 09:46:08 +03:00
int offset ;
2020-03-10 14:18:03 +03:00
ba_num = hns_roce_hem_list_calc_root_ba ( regions , region_cnt , unit ) ;
if ( ba_num < 1 )
2021-05-21 12:29:52 +03:00
return ERR_PTR ( - ENOMEM ) ;
2020-03-10 14:18:03 +03:00
2021-02-05 12:39:25 +03:00
if ( ba_num > unit )
2021-05-21 12:29:52 +03:00
return ERR_PTR ( - ENOBUFS ) ;
2021-02-05 12:39:25 +03:00
2021-05-21 12:29:52 +03:00
offset = regions [ 0 ] . offset ;
2019-06-08 09:46:08 +03:00
/* indicate to last region */
r = & regions [ region_cnt - 1 ] ;
2021-05-21 12:29:52 +03:00
hem = hem_list_alloc_item ( hr_dev , offset , r - > offset + r - > count - 1 ,
2022-09-22 15:33:08 +03:00
ba_num , true ) ;
2021-05-21 12:29:52 +03:00
if ( ! hem )
return ERR_PTR ( - ENOMEM ) ;
* max_ba_num = ba_num ;
return hem ;
}
static int alloc_fake_root_bt ( struct hns_roce_dev * hr_dev , void * cpu_base ,
u64 phy_base , const struct hns_roce_buf_region * r ,
struct list_head * branch_head ,
struct list_head * leaf_head )
{
struct hns_roce_hem_item * hem ;
hem = hem_list_alloc_item ( hr_dev , r - > offset , r - > offset + r - > count - 1 ,
2022-09-22 15:33:08 +03:00
r - > count , false ) ;
2021-05-21 12:29:52 +03:00
if ( ! hem )
2019-06-08 09:46:08 +03:00
return - ENOMEM ;
2021-05-21 12:29:52 +03:00
hem_list_assign_bt ( hr_dev , hem , cpu_base , phy_base ) ;
list_add ( & hem - > list , branch_head ) ;
list_add ( & hem - > sibling , leaf_head ) ;
2019-06-08 09:46:08 +03:00
2021-05-21 12:29:52 +03:00
return r - > count ;
}
static int setup_middle_bt ( struct hns_roce_dev * hr_dev , void * cpu_base ,
int unit , const struct hns_roce_buf_region * r ,
const struct list_head * branch_head )
{
struct hns_roce_hem_item * hem , * temp_hem ;
int total = 0 ;
int offset ;
int step ;
step = hem_list_calc_ba_range ( r - > hopnum , 1 , unit ) ;
if ( step < 1 )
return - EINVAL ;
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe ( hem , temp_hem , branch_head , list ) {
offset = ( hem - > start - r - > offset ) / step * BA_BYTE_LEN ;
hem_list_link_bt ( hr_dev , cpu_base + offset , hem - > dma_addr ) ;
total + + ;
}
return total ;
}
static int
setup_root_hem ( struct hns_roce_dev * hr_dev , struct hns_roce_hem_list * hem_list ,
int unit , int max_ba_num , struct hns_roce_hem_head * head ,
const struct hns_roce_buf_region * regions , int region_cnt )
{
const struct hns_roce_buf_region * r ;
struct hns_roce_hem_item * root_hem ;
void * cpu_base ;
u64 phy_base ;
int i , total ;
int ret ;
root_hem = list_first_entry ( & head - > root ,
struct hns_roce_hem_item , list ) ;
if ( ! root_hem )
return - ENOMEM ;
2019-06-08 09:46:08 +03:00
total = 0 ;
2021-05-21 12:29:52 +03:00
for ( i = 0 ; i < region_cnt & & total < max_ba_num ; i + + ) {
2019-06-08 09:46:08 +03:00
r = & regions [ i ] ;
if ( ! r - > count )
continue ;
/* all regions's mid[x][0] shared the root_bt's trunk */
cpu_base = root_hem - > addr + total * BA_BYTE_LEN ;
phy_base = root_hem - > dma_addr + total * BA_BYTE_LEN ;
/* if hopnum is 0 or 1, cut a new fake hem from the root bt
* which ' s address share to all regions .
*/
2021-05-21 12:29:52 +03:00
if ( hem_list_is_bottom_bt ( r - > hopnum , 0 ) )
ret = alloc_fake_root_bt ( hr_dev , cpu_base , phy_base , r ,
& head - > branch [ i ] , & head - > leaf ) ;
else
ret = setup_middle_bt ( hr_dev , cpu_base , unit , r ,
& hem_list - > mid_bt [ i ] [ 1 ] ) ;
if ( ret < 0 )
return ret ;
total + = ret ;
2019-06-08 09:46:08 +03:00
}
2021-05-21 12:29:52 +03:00
list_splice ( & head - > leaf , & hem_list - > btm_bt ) ;
list_splice ( & head - > root , & hem_list - > root_bt ) ;
2019-06-08 09:46:08 +03:00
for ( i = 0 ; i < region_cnt ; i + + )
2021-05-21 12:29:52 +03:00
list_splice ( & head - > branch [ i ] , & hem_list - > mid_bt [ i ] [ 0 ] ) ;
2019-06-08 09:46:08 +03:00
return 0 ;
2021-05-21 12:29:52 +03:00
}
2019-06-08 09:46:08 +03:00
2021-05-21 12:29:52 +03:00
static int hem_list_alloc_root_bt ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_list * hem_list , int unit ,
const struct hns_roce_buf_region * regions ,
int region_cnt )
{
struct hns_roce_hem_item * root_hem ;
struct hns_roce_hem_head head ;
int max_ba_num ;
int ret ;
int i ;
root_hem = hem_list_search_item ( & hem_list - > root_bt , regions [ 0 ] . offset ) ;
if ( root_hem )
return 0 ;
max_ba_num = 0 ;
root_hem = alloc_root_hem ( hr_dev , unit , & max_ba_num , regions ,
region_cnt ) ;
if ( IS_ERR ( root_hem ) )
return PTR_ERR ( root_hem ) ;
/* List head for storing all allocated HEM items */
INIT_LIST_HEAD ( & head . root ) ;
INIT_LIST_HEAD ( & head . leaf ) ;
2019-06-08 09:46:08 +03:00
for ( i = 0 ; i < region_cnt ; i + + )
2021-05-21 12:29:52 +03:00
INIT_LIST_HEAD ( & head . branch [ i ] ) ;
2019-06-08 09:46:08 +03:00
2021-05-21 12:29:52 +03:00
hem_list - > root_ba = root_hem - > dma_addr ;
list_add ( & root_hem - > list , & head . root ) ;
ret = setup_root_hem ( hr_dev , hem_list , unit , max_ba_num , & head , regions ,
region_cnt ) ;
if ( ret ) {
for ( i = 0 ; i < region_cnt ; i + + )
hem_list_free_all ( hr_dev , & head . branch [ i ] , false ) ;
hem_list_free_all ( hr_dev , & head . root , true ) ;
}
2019-06-08 09:46:08 +03:00
return ret ;
}
/* construct the base address table and link them by address hop config */
int hns_roce_hem_list_request ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_list * hem_list ,
const struct hns_roce_buf_region * regions ,
2020-05-20 16:53:16 +03:00
int region_cnt , unsigned int bt_pg_shift )
2019-06-08 09:46:08 +03:00
{
const struct hns_roce_buf_region * r ;
int ofs , end ;
int unit ;
2020-12-11 04:37:33 +03:00
int ret ;
2019-06-08 09:46:08 +03:00
int i ;
if ( region_cnt > HNS_ROCE_MAX_BT_REGION ) {
dev_err ( hr_dev - > dev , " invalid region region_cnt %d! \n " ,
region_cnt ) ;
return - EINVAL ;
}
2020-04-13 14:58:06 +03:00
unit = ( 1 < < bt_pg_shift ) / BA_BYTE_LEN ;
2019-06-08 09:46:08 +03:00
for ( i = 0 ; i < region_cnt ; i + + ) {
r = & regions [ i ] ;
if ( ! r - > count )
continue ;
end = r - > offset + r - > count ;
for ( ofs = r - > offset ; ofs < end ; ofs + = unit ) {
ret = hem_list_alloc_mid_bt ( hr_dev , r , unit , ofs ,
hem_list - > mid_bt [ i ] ,
& hem_list - > btm_bt ) ;
if ( ret ) {
dev_err ( hr_dev - > dev ,
2022-09-22 15:33:15 +03:00
" alloc hem trunk fail ret = %d! \n " , ret ) ;
2019-06-08 09:46:08 +03:00
goto err_alloc ;
}
}
}
ret = hem_list_alloc_root_bt ( hr_dev , hem_list , unit , regions ,
region_cnt ) ;
if ( ret )
2022-09-22 15:33:15 +03:00
dev_err ( hr_dev - > dev , " alloc hem root fail ret = %d! \n " , ret ) ;
2019-06-08 09:46:08 +03:00
else
return 0 ;
err_alloc :
hns_roce_hem_list_release ( hr_dev , hem_list ) ;
return ret ;
}
void hns_roce_hem_list_release ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_list * hem_list )
{
int i , j ;
for ( i = 0 ; i < HNS_ROCE_MAX_BT_REGION ; i + + )
for ( j = 0 ; j < HNS_ROCE_MAX_BT_LEVEL ; j + + )
hem_list_free_all ( hr_dev , & hem_list - > mid_bt [ i ] [ j ] ,
j ! = 0 ) ;
hem_list_free_all ( hr_dev , & hem_list - > root_bt , true ) ;
INIT_LIST_HEAD ( & hem_list - > btm_bt ) ;
hem_list - > root_ba = 0 ;
}
2020-04-13 14:58:06 +03:00
void hns_roce_hem_list_init ( struct hns_roce_hem_list * hem_list )
2019-06-08 09:46:08 +03:00
{
int i , j ;
INIT_LIST_HEAD ( & hem_list - > root_bt ) ;
INIT_LIST_HEAD ( & hem_list - > btm_bt ) ;
for ( i = 0 ; i < HNS_ROCE_MAX_BT_REGION ; i + + )
for ( j = 0 ; j < HNS_ROCE_MAX_BT_LEVEL ; j + + )
INIT_LIST_HEAD ( & hem_list - > mid_bt [ i ] [ j ] ) ;
}
void * hns_roce_hem_list_find_mtt ( struct hns_roce_dev * hr_dev ,
struct hns_roce_hem_list * hem_list ,
2022-09-22 15:33:10 +03:00
int offset , int * mtt_cnt )
2019-06-08 09:46:08 +03:00
{
struct list_head * head = & hem_list - > btm_bt ;
2021-05-21 12:29:52 +03:00
struct hns_roce_hem_item * hem , * temp_hem ;
2019-06-08 09:46:08 +03:00
void * cpu_base = NULL ;
int nr = 0 ;
list_for_each_entry_safe ( hem , temp_hem , head , sibling ) {
if ( hem_list_page_is_in_range ( hem , offset ) ) {
nr = offset - hem - > start ;
cpu_base = hem - > addr + nr * BA_BYTE_LEN ;
nr = hem - > end + 1 - offset ;
break ;
}
}
if ( mtt_cnt )
* mtt_cnt = nr ;
return cpu_base ;
}