2018-11-24 16:49:20 +08:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
* Copyright ( c ) 2018 Hisilicon Limited .
*/
2021-01-30 16:57:59 +08:00
# include <linux/pci.h>
2018-11-24 16:49:20 +08:00
# include <rdma/ib_umem.h>
# include "hns_roce_device.h"
# include "hns_roce_cmd.h"
# include "hns_roce_hem.h"
2018-11-24 16:49:22 +08:00
void hns_roce_srq_event ( struct hns_roce_dev * hr_dev , u32 srqn , int event_type )
{
struct hns_roce_srq_table * srq_table = & hr_dev - > srq_table ;
struct hns_roce_srq * srq ;
xa_lock ( & srq_table - > xa ) ;
srq = xa_load ( & srq_table - > xa , srqn & ( hr_dev - > caps . num_srqs - 1 ) ) ;
if ( srq )
atomic_inc ( & srq - > refcount ) ;
xa_unlock ( & srq_table - > xa ) ;
if ( ! srq ) {
dev_warn ( hr_dev - > dev , " Async event for bogus SRQ %08x \n " , srqn ) ;
return ;
}
srq - > event ( srq , event_type ) ;
if ( atomic_dec_and_test ( & srq - > refcount ) )
complete ( & srq - > free ) ;
}
2018-11-24 16:49:21 +08:00
static void hns_roce_ib_srq_event ( struct hns_roce_srq * srq ,
enum hns_roce_event event_type )
{
struct hns_roce_dev * hr_dev = to_hr_dev ( srq - > ibsrq . device ) ;
struct ib_srq * ibsrq = & srq - > ibsrq ;
struct ib_event event ;
if ( ibsrq - > event_handler ) {
event . device = ibsrq - > device ;
event . element . srq = ibsrq ;
switch ( event_type ) {
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH :
event . event = IB_EVENT_SRQ_LIMIT_REACHED ;
break ;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR :
event . event = IB_EVENT_SRQ_ERR ;
break ;
default :
dev_err ( hr_dev - > dev ,
" hns_roce:Unexpected event type 0x%x on SRQ %06lx \n " ,
event_type , srq - > srqn ) ;
return ;
}
ibsrq - > event_handler ( & event , ibsrq - > srq_context ) ;
}
}
2019-11-05 19:07:58 +08:00
static int hns_roce_hw_create_srq ( struct hns_roce_dev * dev ,
struct hns_roce_cmd_mailbox * mailbox ,
unsigned long srq_num )
2018-11-24 16:49:21 +08:00
{
return hns_roce_cmd_mbox ( dev , mailbox - > dma , 0 , srq_num , 0 ,
2019-11-05 19:07:58 +08:00
HNS_ROCE_CMD_CREATE_SRQ ,
2018-11-24 16:49:21 +08:00
HNS_ROCE_CMD_TIMEOUT_MSECS ) ;
}
2019-11-05 19:07:58 +08:00
static int hns_roce_hw_destroy_srq ( struct hns_roce_dev * dev ,
struct hns_roce_cmd_mailbox * mailbox ,
unsigned long srq_num )
2018-11-24 16:49:21 +08:00
{
return hns_roce_cmd_mbox ( dev , 0 , mailbox ? mailbox - > dma : 0 , srq_num ,
2019-11-05 19:07:58 +08:00
mailbox ? 0 : 1 , HNS_ROCE_CMD_DESTROY_SRQ ,
2018-11-24 16:49:21 +08:00
HNS_ROCE_CMD_TIMEOUT_MSECS ) ;
}
2020-04-13 19:58:10 +08:00
static int alloc_srqc ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq ,
u32 pdn , u32 cqn , u16 xrcd , u64 db_rec_addr )
2018-11-24 16:49:21 +08:00
{
struct hns_roce_srq_table * srq_table = & hr_dev - > srq_table ;
2020-04-13 19:58:10 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
2018-11-24 16:49:21 +08:00
struct hns_roce_cmd_mailbox * mailbox ;
2020-04-13 19:58:10 +08:00
u64 mtts_wqe [ MTT_MIN_COUNT ] = { 0 } ;
u64 mtts_idx [ MTT_MIN_COUNT ] = { 0 } ;
dma_addr_t dma_handle_wqe = 0 ;
dma_addr_t dma_handle_idx = 0 ;
2018-11-24 16:49:21 +08:00
int ret ;
/* Get the physical address of srq buf */
2020-04-13 19:58:10 +08:00
ret = hns_roce_mtr_find ( hr_dev , & srq - > buf_mtr , 0 , mtts_wqe ,
ARRAY_SIZE ( mtts_wqe ) , & dma_handle_wqe ) ;
if ( ret < 1 ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to find mtr for SRQ WQE, ret = %d. \n " ,
ret ) ;
2020-04-13 19:58:10 +08:00
return - ENOBUFS ;
2018-11-24 16:49:21 +08:00
}
/* Get physical address of idx que buf */
2020-04-13 19:58:10 +08:00
ret = hns_roce_mtr_find ( hr_dev , & srq - > idx_que . mtr , 0 , mtts_idx ,
ARRAY_SIZE ( mtts_idx ) , & dma_handle_idx ) ;
if ( ret < 1 ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to find mtr for SRQ idx, ret = %d. \n " ,
ret ) ;
2020-04-13 19:58:10 +08:00
return - ENOBUFS ;
2018-11-24 16:49:21 +08:00
}
ret = hns_roce_bitmap_alloc ( & srq_table - > bitmap , & srq - > srqn ) ;
2019-11-05 19:08:01 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev ,
" failed to alloc SRQ number, ret = %d. \n " , ret ) ;
2018-11-24 16:49:21 +08:00
return - ENOMEM ;
}
ret = hns_roce_table_get ( hr_dev , & srq_table - > table , srq - > srqn ) ;
2020-04-13 19:58:10 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to get SRQC table, ret = %d. \n " , ret ) ;
2018-11-24 16:49:21 +08:00
goto err_out ;
2020-04-13 19:58:10 +08:00
}
2018-11-24 16:49:21 +08:00
ret = xa_err ( xa_store ( & srq_table - > xa , srq - > srqn , srq , GFP_KERNEL ) ) ;
2020-04-13 19:58:10 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to store SRQC, ret = %d. \n " , ret ) ;
2018-11-24 16:49:21 +08:00
goto err_put ;
2020-04-13 19:58:10 +08:00
}
2018-11-24 16:49:21 +08:00
mailbox = hns_roce_alloc_cmd_mailbox ( hr_dev ) ;
2020-04-13 19:58:10 +08:00
if ( IS_ERR_OR_NULL ( mailbox ) ) {
ret = - ENOMEM ;
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to alloc mailbox for SRQC. \n " ) ;
2018-11-24 16:49:21 +08:00
goto err_xa ;
}
hr_dev - > hw - > write_srqc ( hr_dev , srq , pdn , xrcd , cqn , mailbox - > buf ,
mtts_wqe , mtts_idx , dma_handle_wqe ,
dma_handle_idx ) ;
2019-11-05 19:07:58 +08:00
ret = hns_roce_hw_create_srq ( hr_dev , mailbox , srq - > srqn ) ;
2018-11-24 16:49:21 +08:00
hns_roce_free_cmd_mailbox ( hr_dev , mailbox ) ;
2020-04-13 19:58:10 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to config SRQC, ret = %d. \n " , ret ) ;
2018-11-24 16:49:21 +08:00
goto err_xa ;
2020-04-13 19:58:10 +08:00
}
2018-11-24 16:49:21 +08:00
atomic_set ( & srq - > refcount , 1 ) ;
init_completion ( & srq - > free ) ;
return ret ;
err_xa :
xa_erase ( & srq_table - > xa , srq - > srqn ) ;
err_put :
hns_roce_table_put ( hr_dev , & srq_table - > table , srq - > srqn ) ;
err_out :
hns_roce_bitmap_free ( & srq_table - > bitmap , srq - > srqn , BITMAP_NO_RR ) ;
return ret ;
}
2020-04-13 19:58:10 +08:00
static void free_srqc ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq )
2018-11-24 16:49:21 +08:00
{
struct hns_roce_srq_table * srq_table = & hr_dev - > srq_table ;
int ret ;
2019-11-05 19:07:58 +08:00
ret = hns_roce_hw_destroy_srq ( hr_dev , NULL , srq - > srqn ) ;
2018-11-24 16:49:21 +08:00
if ( ret )
2019-11-05 19:07:58 +08:00
dev_err ( hr_dev - > dev , " DESTROY_SRQ failed (%d) for SRQN %06lx \n " ,
2018-11-24 16:49:21 +08:00
ret , srq - > srqn ) ;
xa_erase ( & srq_table - > xa , srq - > srqn ) ;
if ( atomic_dec_and_test ( & srq - > refcount ) )
complete ( & srq - > free ) ;
wait_for_completion ( & srq - > free ) ;
hns_roce_table_put ( hr_dev , & srq_table - > table , srq - > srqn ) ;
hns_roce_bitmap_free ( & srq_table - > bitmap , srq - > srqn , BITMAP_NO_RR ) ;
}
2020-04-13 19:58:10 +08:00
static int alloc_srq_buf ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq ,
struct ib_udata * udata , unsigned long addr )
2019-07-08 21:41:18 +08:00
{
2020-04-13 19:58:10 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_buf_attr buf_attr = { } ;
int err ;
2019-07-08 21:41:18 +08:00
2020-04-28 19:03:43 +08:00
srq - > wqe_shift = ilog2 ( roundup_pow_of_two ( max ( HNS_ROCE_SGE_SIZE ,
HNS_ROCE_SGE_SIZE *
srq - > max_gs ) ) ) ;
2019-07-08 21:41:18 +08:00
2020-05-08 17:45:58 +08:00
buf_attr . page_shift = hr_dev - > caps . srqwqe_buf_pg_sz + HNS_HW_PAGE_SHIFT ;
2020-04-28 19:03:43 +08:00
buf_attr . region [ 0 ] . size = to_hr_hem_entries_size ( srq - > wqe_cnt ,
srq - > wqe_shift ) ;
2020-04-13 19:58:10 +08:00
buf_attr . region [ 0 ] . hopnum = hr_dev - > caps . srqwqe_hop_num ;
buf_attr . region_count = 1 ;
2019-07-08 21:41:18 +08:00
2020-04-13 19:58:10 +08:00
err = hns_roce_mtr_create ( hr_dev , & srq - > buf_mtr , & buf_attr ,
hr_dev - > caps . srqwqe_ba_pg_sz +
2020-05-08 17:45:58 +08:00
HNS_HW_PAGE_SHIFT , udata , addr ) ;
2020-04-13 19:58:10 +08:00
if ( err )
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev ,
" failed to alloc SRQ buf mtr, ret = %d. \n " , err ) ;
2019-07-08 21:41:18 +08:00
2020-04-13 19:58:10 +08:00
return err ;
}
2019-07-08 21:41:18 +08:00
2020-04-13 19:58:10 +08:00
static void free_srq_buf ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq )
{
hns_roce_mtr_destroy ( hr_dev , & srq - > buf_mtr ) ;
2019-07-08 21:41:18 +08:00
}
2020-04-13 19:58:10 +08:00
static int alloc_srq_idx ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq ,
struct ib_udata * udata , unsigned long addr )
2018-11-24 16:49:21 +08:00
{
struct hns_roce_idx_que * idx_que = & srq - > idx_que ;
2020-04-13 19:58:10 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_buf_attr buf_attr = { } ;
int err ;
2018-11-24 16:49:21 +08:00
2020-04-28 19:03:43 +08:00
srq - > idx_que . entry_shift = ilog2 ( HNS_ROCE_IDX_QUE_ENTRY_SZ ) ;
2020-04-13 19:58:10 +08:00
2020-05-08 17:45:58 +08:00
buf_attr . page_shift = hr_dev - > caps . idx_buf_pg_sz + HNS_HW_PAGE_SHIFT ;
2020-04-28 19:03:43 +08:00
buf_attr . region [ 0 ] . size = to_hr_hem_entries_size ( srq - > wqe_cnt ,
srq - > idx_que . entry_shift ) ;
2020-04-13 19:58:10 +08:00
buf_attr . region [ 0 ] . hopnum = hr_dev - > caps . idx_hop_num ;
buf_attr . region_count = 1 ;
err = hns_roce_mtr_create ( hr_dev , & idx_que - > mtr , & buf_attr ,
2020-05-08 17:45:58 +08:00
hr_dev - > caps . idx_ba_pg_sz + HNS_HW_PAGE_SHIFT ,
2020-04-13 19:58:10 +08:00
udata , addr ) ;
if ( err ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev ,
" failed to alloc SRQ idx mtr, ret = %d. \n " , err ) ;
2020-04-13 19:58:10 +08:00
return err ;
}
2018-11-24 16:49:21 +08:00
2020-04-13 19:58:10 +08:00
if ( ! udata ) {
idx_que - > bitmap = bitmap_zalloc ( srq - > wqe_cnt , GFP_KERNEL ) ;
if ( ! idx_que - > bitmap ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to alloc SRQ idx bitmap. \n " ) ;
2020-04-13 19:58:10 +08:00
err = - ENOMEM ;
goto err_idx_mtr ;
}
2018-11-24 16:49:21 +08:00
}
return 0 ;
2020-04-13 19:58:10 +08:00
err_idx_mtr :
hns_roce_mtr_destroy ( hr_dev , & idx_que - > mtr ) ;
return err ;
2018-11-24 16:49:21 +08:00
}
2020-04-13 19:58:10 +08:00
static void free_srq_idx ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq )
2019-07-08 21:41:18 +08:00
{
2020-04-13 19:58:10 +08:00
struct hns_roce_idx_que * idx_que = & srq - > idx_que ;
2019-07-08 21:41:18 +08:00
2020-04-13 19:58:10 +08:00
bitmap_free ( idx_que - > bitmap ) ;
idx_que - > bitmap = NULL ;
hns_roce_mtr_destroy ( hr_dev , & idx_que - > mtr ) ;
}
2019-07-08 21:41:18 +08:00
2020-04-13 19:58:10 +08:00
static int alloc_srq_wrid ( struct hns_roce_dev * hr_dev , struct hns_roce_srq * srq )
{
2019-07-08 21:41:18 +08:00
srq - > head = 0 ;
2019-11-05 19:07:57 +08:00
srq - > tail = srq - > wqe_cnt - 1 ;
srq - > wrid = kvmalloc_array ( srq - > wqe_cnt , sizeof ( u64 ) , GFP_KERNEL ) ;
2020-04-13 19:58:10 +08:00
if ( ! srq - > wrid )
return - ENOMEM ;
2019-07-08 21:41:18 +08:00
return 0 ;
}
2020-05-22 21:02:57 +08:00
static void free_srq_wrid ( struct hns_roce_srq * srq )
2019-07-08 21:41:18 +08:00
{
2020-04-13 19:58:10 +08:00
kfree ( srq - > wrid ) ;
srq - > wrid = NULL ;
2019-07-08 21:41:18 +08:00
}
2021-01-30 16:57:59 +08:00
static u32 proc_srq_sge ( struct hns_roce_dev * dev , struct hns_roce_srq * hr_srq ,
bool user )
{
u32 max_sge = dev - > caps . max_srq_sges ;
if ( dev - > pci_dev - > revision > = PCI_REVISION_ID_HIP09 )
return max_sge ;
/* Reserve SGEs only for HIP08 in kernel; The userspace driver will
* calculate number of max_sge with reserved SGEs when allocating wqe
* buf , so there is no need to do this again in kernel . But the number
* may exceed the capacity of SGEs recorded in the firmware , so the
* kernel driver should just adapt the value accordingly .
*/
if ( user )
max_sge = roundup_pow_of_two ( max_sge + 1 ) ;
else
hr_srq - > rsv_sge = 1 ;
return max_sge ;
}
2019-04-03 16:42:43 +03:00
int hns_roce_create_srq ( struct ib_srq * ib_srq ,
2019-11-05 19:07:57 +08:00
struct ib_srq_init_attr * init_attr ,
2019-04-03 16:42:43 +03:00
struct ib_udata * udata )
2018-11-24 16:49:21 +08:00
{
2019-04-03 16:42:43 +03:00
struct hns_roce_dev * hr_dev = to_hr_dev ( ib_srq - > device ) ;
2019-01-23 10:09:27 +08:00
struct hns_roce_ib_create_srq_resp resp = { } ;
2019-04-03 16:42:43 +03:00
struct hns_roce_srq * srq = to_hr_srq ( ib_srq ) ;
2020-04-13 19:58:10 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_ib_create_srq ucmd = { } ;
2021-01-30 16:57:59 +08:00
u32 max_sge ;
2020-09-08 14:52:24 +08:00
int ret ;
2018-11-24 16:49:21 +08:00
u32 cqn ;
2020-10-03 20:20:05 -03:00
if ( init_attr - > srq_type ! = IB_SRQT_BASIC & &
init_attr - > srq_type ! = IB_SRQT_XRC )
return - EOPNOTSUPP ;
2021-01-30 16:57:59 +08:00
max_sge = proc_srq_sge ( hr_dev , srq , ! ! udata ) ;
2019-11-05 19:07:57 +08:00
if ( init_attr - > attr . max_wr > = hr_dev - > caps . max_srq_wrs | |
2021-01-30 16:57:59 +08:00
init_attr - > attr . max_sge > max_sge ) {
ibdev_err ( & hr_dev - > ib_dev ,
" SRQ config error, depth = %u, sge = %d \n " ,
init_attr - > attr . max_wr , init_attr - > attr . max_sge ) ;
2019-04-03 16:42:43 +03:00
return - EINVAL ;
2021-01-30 16:57:59 +08:00
}
2018-11-24 16:49:21 +08:00
mutex_init ( & srq - > mutex ) ;
spin_lock_init ( & srq - > lock ) ;
2019-11-05 19:07:57 +08:00
srq - > wqe_cnt = roundup_pow_of_two ( init_attr - > attr . max_wr + 1 ) ;
2021-01-30 16:57:59 +08:00
srq - > max_gs =
roundup_pow_of_two ( init_attr - > attr . max_sge + srq - > rsv_sge ) ;
init_attr - > attr . max_wr = srq - > wqe_cnt ;
init_attr - > attr . max_sge = srq - > max_gs ;
2018-11-24 16:49:21 +08:00
if ( udata ) {
2020-12-11 09:37:27 +08:00
ret = ib_copy_from_udata ( & ucmd , udata ,
min ( udata - > inlen , sizeof ( ucmd ) ) ) ;
2018-11-24 16:49:21 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to copy SRQ udata, ret = %d. \n " ,
2020-04-13 19:58:10 +08:00
ret ) ;
return ret ;
2018-11-24 16:49:21 +08:00
}
2020-04-13 19:58:10 +08:00
}
ret = alloc_srq_buf ( hr_dev , srq , udata , ucmd . buf_addr ) ;
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev ,
" failed to alloc SRQ buffer, ret = %d. \n " , ret ) ;
2020-04-13 19:58:10 +08:00
return ret ;
}
ret = alloc_srq_idx ( hr_dev , srq , udata , ucmd . que_addr ) ;
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to alloc SRQ idx, ret = %d. \n " , ret ) ;
2020-04-13 19:58:10 +08:00
goto err_buf_alloc ;
}
if ( ! udata ) {
ret = alloc_srq_wrid ( hr_dev , srq ) ;
2018-11-24 16:49:21 +08:00
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev , " failed to alloc SRQ wrid, ret = %d. \n " ,
2020-04-13 19:58:10 +08:00
ret ) ;
goto err_idx_alloc ;
2018-11-24 16:49:21 +08:00
}
}
2019-11-05 19:07:57 +08:00
cqn = ib_srq_has_cq ( init_attr - > srq_type ) ?
to_hr_cq ( init_attr - > ext . cq ) - > cqn : 0 ;
2018-11-24 16:49:21 +08:00
srq - > db_reg_l = hr_dev - > reg_base + SRQ_DB_REG ;
2020-04-13 19:58:10 +08:00
ret = alloc_srqc ( hr_dev , srq , to_hr_pd ( ib_srq - > pd ) - > pdn , cqn , 0 , 0 ) ;
if ( ret ) {
2020-12-11 09:37:36 +08:00
ibdev_err ( ibdev ,
" failed to alloc SRQ context, ret = %d. \n " , ret ) ;
2020-04-13 19:58:10 +08:00
goto err_wrid_alloc ;
}
2018-11-24 16:49:21 +08:00
srq - > event = hns_roce_ib_srq_event ;
2019-01-23 10:09:27 +08:00
resp . srqn = srq - > srqn ;
2021-01-30 16:57:59 +08:00
srq - > max_gs = init_attr - > attr . max_sge ;
init_attr - > attr . max_sge = srq - > max_gs - srq - > rsv_sge ;
2018-11-24 16:49:21 +08:00
2018-12-17 17:15:18 +02:00
if ( udata ) {
2020-12-11 09:37:27 +08:00
ret = ib_copy_to_udata ( udata , & resp ,
min ( udata - > outlen , sizeof ( resp ) ) ) ;
if ( ret )
2019-01-23 10:09:27 +08:00
goto err_srqc_alloc ;
2018-11-24 16:49:21 +08:00
}
2019-04-03 16:42:43 +03:00
return 0 ;
2018-11-24 16:49:21 +08:00
2019-01-23 10:09:27 +08:00
err_srqc_alloc :
2020-04-13 19:58:10 +08:00
free_srqc ( hr_dev , srq ) ;
err_wrid_alloc :
2020-05-22 21:02:57 +08:00
free_srq_wrid ( srq ) ;
2020-04-13 19:58:10 +08:00
err_idx_alloc :
free_srq_idx ( hr_dev , srq ) ;
err_buf_alloc :
free_srq_buf ( hr_dev , srq ) ;
2019-04-03 16:42:43 +03:00
return ret ;
2018-11-24 16:49:21 +08:00
}
2020-09-07 15:09:16 +03:00
int hns_roce_destroy_srq ( struct ib_srq * ibsrq , struct ib_udata * udata )
2018-11-24 16:49:21 +08:00
{
struct hns_roce_dev * hr_dev = to_hr_dev ( ibsrq - > device ) ;
struct hns_roce_srq * srq = to_hr_srq ( ibsrq ) ;
2020-04-13 19:58:10 +08:00
free_srqc ( hr_dev , srq ) ;
free_srq_idx ( hr_dev , srq ) ;
2020-05-22 21:02:57 +08:00
free_srq_wrid ( srq ) ;
2020-04-13 19:58:10 +08:00
free_srq_buf ( hr_dev , srq ) ;
2020-09-07 15:09:16 +03:00
return 0 ;
2018-11-24 16:49:21 +08:00
}
2018-11-24 16:49:20 +08:00
int hns_roce_init_srq_table ( struct hns_roce_dev * hr_dev )
{
struct hns_roce_srq_table * srq_table = & hr_dev - > srq_table ;
xa_init ( & srq_table - > xa ) ;
return hns_roce_bitmap_init ( & srq_table - > bitmap , hr_dev - > caps . num_srqs ,
hr_dev - > caps . num_srqs - 1 ,
hr_dev - > caps . reserved_srqs , 0 ) ;
}
void hns_roce_cleanup_srq_table ( struct hns_roce_dev * hr_dev )
{
hns_roce_bitmap_cleanup ( & hr_dev - > srq_table . bitmap ) ;
}