2016-07-21 19:06:38 +08:00
/*
* Copyright ( c ) 2016 Hisilicon Limited .
* Copyright ( c ) 2007 , 2008 Mellanox Technologies . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
2018-09-30 17:00:31 +08:00
# include <linux/pci.h>
2016-07-21 19:06:38 +08:00
# include <linux/platform_device.h>
2016-09-20 17:07:07 +01:00
# include <rdma/ib_addr.h>
2016-07-21 19:06:38 +08:00
# include <rdma/ib_umem.h>
2019-02-07 18:44:49 +02:00
# include <rdma/uverbs_ioctl.h>
2016-07-21 19:06:38 +08:00
# include "hns_roce_common.h"
# include "hns_roce_device.h"
# include "hns_roce_hem.h"
2016-10-19 20:13:07 +03:00
# include <rdma/hns-abi.h>
2016-07-21 19:06:38 +08:00
2016-09-20 17:07:00 +01:00
# define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
2016-07-21 19:06:38 +08:00
2020-02-06 17:56:44 +08:00
static void flush_work_handle ( struct work_struct * work )
{
struct hns_roce_work * flush_work = container_of ( work ,
struct hns_roce_work , work ) ;
struct hns_roce_qp * hr_qp = container_of ( flush_work ,
struct hns_roce_qp , flush_work ) ;
struct device * dev = flush_work - > hr_dev - > dev ;
struct ib_qp_attr attr ;
int attr_mask ;
int ret ;
attr_mask = IB_QP_STATE ;
attr . qp_state = IB_QPS_ERR ;
2020-02-06 17:56:45 +08:00
if ( test_and_clear_bit ( HNS_ROCE_FLUSH_FLAG , & hr_qp - > flush_flag ) ) {
ret = hns_roce_modify_qp ( & hr_qp - > ibqp , & attr , attr_mask , NULL ) ;
if ( ret )
dev_err ( dev , " Modify QP to error state failed(%d) during CQE flush \n " ,
ret ) ;
}
2020-02-06 17:56:44 +08:00
/*
* make sure we signal QP destroy leg that flush QP was completed
* so that it can safely proceed ahead now and destroy QP
*/
if ( atomic_dec_and_test ( & hr_qp - > refcount ) )
complete ( & hr_qp - > free ) ;
}
void init_flush_work ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
{
struct hns_roce_work * flush_work = & hr_qp - > flush_work ;
flush_work - > hr_dev = hr_dev ;
INIT_WORK ( & flush_work - > work , flush_work_handle ) ;
atomic_inc ( & hr_qp - > refcount ) ;
queue_work ( hr_dev - > irq_workq , & flush_work - > work ) ;
}
2016-07-21 19:06:38 +08:00
void hns_roce_qp_event ( struct hns_roce_dev * hr_dev , u32 qpn , int event_type )
{
2017-08-30 17:23:02 +08:00
struct device * dev = hr_dev - > dev ;
2016-07-21 19:06:38 +08:00
struct hns_roce_qp * qp ;
2018-10-25 11:15:34 -04:00
xa_lock ( & hr_dev - > qp_table_xa ) ;
2016-07-21 19:06:38 +08:00
qp = __hns_roce_qp_lookup ( hr_dev , qpn ) ;
if ( qp )
atomic_inc ( & qp - > refcount ) ;
2018-10-25 11:15:34 -04:00
xa_unlock ( & hr_dev - > qp_table_xa ) ;
2016-07-21 19:06:38 +08:00
if ( ! qp ) {
dev_warn ( dev , " Async event for bogus QP %08x \n " , qpn ) ;
return ;
}
2020-02-22 18:25:57 +08:00
if ( hr_dev - > hw_rev ! = HNS_ROCE_HW_VER1 & &
( event_type = = HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR | |
event_type = = HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR | |
event_type = = HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ) ) {
qp - > state = IB_QPS_ERR ;
if ( ! test_and_set_bit ( HNS_ROCE_FLUSH_FLAG , & qp - > flush_flag ) )
init_flush_work ( hr_dev , qp ) ;
}
2016-07-21 19:06:38 +08:00
qp - > event ( qp , ( enum hns_roce_event ) event_type ) ;
if ( atomic_dec_and_test ( & qp - > refcount ) )
complete ( & qp - > free ) ;
}
static void hns_roce_ib_qp_event ( struct hns_roce_qp * hr_qp ,
enum hns_roce_event type )
{
struct ib_event event ;
struct ib_qp * ibqp = & hr_qp - > ibqp ;
if ( ibqp - > event_handler ) {
event . device = ibqp - > device ;
event . element . qp = ibqp ;
switch ( type ) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG :
event . event = IB_EVENT_PATH_MIG ;
break ;
case HNS_ROCE_EVENT_TYPE_COMM_EST :
event . event = IB_EVENT_COMM_EST ;
break ;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED :
event . event = IB_EVENT_SQ_DRAINED ;
break ;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH :
event . event = IB_EVENT_QP_LAST_WQE_REACHED ;
break ;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR :
event . event = IB_EVENT_QP_FATAL ;
break ;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED :
event . event = IB_EVENT_PATH_MIG_ERR ;
break ;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR :
event . event = IB_EVENT_QP_REQ_ERR ;
break ;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR :
event . event = IB_EVENT_QP_ACCESS_ERR ;
break ;
default :
2017-01-20 13:04:18 -08:00
dev_dbg ( ibqp - > device - > dev . parent , " roce_ib: Unexpected event type %d on QP %06lx \n " ,
2016-07-21 19:06:38 +08:00
type , hr_qp - > qpn ) ;
return ;
}
ibqp - > event_handler ( & event , ibqp - > qp_context ) ;
}
}
2020-02-24 14:37:34 +08:00
static int alloc_qpn ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
2016-07-21 19:06:38 +08:00
{
2020-02-24 14:37:34 +08:00
unsigned long num = 0 ;
int ret ;
if ( hr_qp - > ibqp . qp_type = = IB_QPT_GSI ) {
/* when hw version is v1, the sqpn is allocated */
if ( hr_dev - > hw_rev = = HNS_ROCE_HW_VER1 )
num = HNS_ROCE_MAX_PORTS +
hr_dev - > iboe . phy_port [ hr_qp - > port ] ;
else
num = 1 ;
hr_qp - > doorbell_qpn = 1 ;
} else {
ret = hns_roce_bitmap_alloc_range ( & hr_dev - > qp_table . bitmap ,
1 , 1 , & num ) ;
if ( ret ) {
ibdev_err ( & hr_dev - > ib_dev , " Failed to alloc bitmap \n " ) ;
return - ENOMEM ;
}
hr_qp - > doorbell_qpn = ( u32 ) num ;
}
hr_qp - > qpn = num ;
2016-07-21 19:06:38 +08:00
2020-02-24 14:37:34 +08:00
return 0 ;
2016-07-21 19:06:38 +08:00
}
enum hns_roce_qp_state to_hns_roce_state ( enum ib_qp_state state )
{
switch ( state ) {
case IB_QPS_RESET :
return HNS_ROCE_QP_STATE_RST ;
case IB_QPS_INIT :
return HNS_ROCE_QP_STATE_INIT ;
case IB_QPS_RTR :
return HNS_ROCE_QP_STATE_RTR ;
case IB_QPS_RTS :
return HNS_ROCE_QP_STATE_RTS ;
case IB_QPS_SQD :
return HNS_ROCE_QP_STATE_SQD ;
case IB_QPS_ERR :
return HNS_ROCE_QP_STATE_ERR ;
default :
return HNS_ROCE_QP_NUM_STATE ;
}
}
2020-02-24 14:37:33 +08:00
static void add_qp_to_list ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp ,
struct ib_cq * send_cq , struct ib_cq * recv_cq )
{
struct hns_roce_cq * hr_send_cq , * hr_recv_cq ;
unsigned long flags ;
hr_send_cq = send_cq ? to_hr_cq ( send_cq ) : NULL ;
hr_recv_cq = recv_cq ? to_hr_cq ( recv_cq ) : NULL ;
spin_lock_irqsave ( & hr_dev - > qp_list_lock , flags ) ;
hns_roce_lock_cqs ( hr_send_cq , hr_recv_cq ) ;
list_add_tail ( & hr_qp - > node , & hr_dev - > qp_list ) ;
if ( hr_send_cq )
list_add_tail ( & hr_qp - > sq_node , & hr_send_cq - > sq_list ) ;
if ( hr_recv_cq )
list_add_tail ( & hr_qp - > rq_node , & hr_recv_cq - > rq_list ) ;
hns_roce_unlock_cqs ( hr_send_cq , hr_recv_cq ) ;
spin_unlock_irqrestore ( & hr_dev - > qp_list_lock , flags ) ;
}
static int hns_roce_qp_store ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp ,
struct ib_qp_init_attr * init_attr )
2016-07-21 19:06:38 +08:00
{
2018-10-25 11:15:34 -04:00
struct xarray * xa = & hr_dev - > qp_table_xa ;
2016-07-21 19:06:38 +08:00
int ret ;
2020-02-24 14:37:33 +08:00
if ( ! hr_qp - > qpn )
2016-07-21 19:06:38 +08:00
return - EINVAL ;
2020-02-24 14:37:33 +08:00
ret = xa_err ( xa_store_irq ( xa , hr_qp - > qpn , hr_qp , GFP_KERNEL ) ) ;
2018-10-25 11:15:34 -04:00
if ( ret )
2020-02-24 14:37:33 +08:00
dev_err ( hr_dev - > dev , " Failed to xa store for QPC \n " ) ;
else
/* add QP to device's QP list for softwc */
add_qp_to_list ( hr_dev , hr_qp , init_attr - > send_cq ,
init_attr - > recv_cq ) ;
2016-07-21 19:06:38 +08:00
return ret ;
}
2020-02-24 14:37:33 +08:00
static int alloc_qpc ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
2016-07-21 19:06:38 +08:00
{
struct hns_roce_qp_table * qp_table = & hr_dev - > qp_table ;
2017-08-30 17:23:02 +08:00
struct device * dev = hr_dev - > dev ;
2016-07-21 19:06:38 +08:00
int ret ;
2020-02-24 14:37:33 +08:00
if ( ! hr_qp - > qpn )
2016-07-21 19:06:38 +08:00
return - EINVAL ;
2020-02-24 14:37:33 +08:00
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
if ( hr_qp - > ibqp . qp_type = = IB_QPT_GSI & &
hr_dev - > hw_rev = = HNS_ROCE_HW_VER1 )
return 0 ;
2016-07-21 19:06:38 +08:00
/* Alloc memory for QPC */
ret = hns_roce_table_get ( hr_dev , & qp_table - > qp_table , hr_qp - > qpn ) ;
if ( ret ) {
2020-02-24 14:37:33 +08:00
dev_err ( dev , " Failed to get QPC table \n " ) ;
2016-07-21 19:06:38 +08:00
goto err_out ;
}
/* Alloc memory for IRRL */
ret = hns_roce_table_get ( hr_dev , & qp_table - > irrl_table , hr_qp - > qpn ) ;
if ( ret ) {
2020-02-24 14:37:33 +08:00
dev_err ( dev , " Failed to get IRRL table \n " ) ;
2016-07-21 19:06:38 +08:00
goto err_put_qp ;
}
2017-11-10 16:55:44 +08:00
if ( hr_dev - > caps . trrl_entry_sz ) {
/* Alloc memory for TRRL */
ret = hns_roce_table_get ( hr_dev , & qp_table - > trrl_table ,
hr_qp - > qpn ) ;
if ( ret ) {
2020-02-24 14:37:33 +08:00
dev_err ( dev , " Failed to get TRRL table \n " ) ;
2017-11-10 16:55:44 +08:00
goto err_put_irrl ;
}
}
2018-12-18 21:21:53 +08:00
if ( hr_dev - > caps . sccc_entry_sz ) {
/* Alloc memory for SCC CTX */
ret = hns_roce_table_get ( hr_dev , & qp_table - > sccc_table ,
hr_qp - > qpn ) ;
if ( ret ) {
2020-02-24 14:37:33 +08:00
dev_err ( dev , " Failed to get SCC CTX table \n " ) ;
2018-12-18 21:21:53 +08:00
goto err_put_trrl ;
}
}
2016-07-21 19:06:38 +08:00
return 0 ;
2017-11-10 16:55:44 +08:00
err_put_trrl :
if ( hr_dev - > caps . trrl_entry_sz )
hns_roce_table_put ( hr_dev , & qp_table - > trrl_table , hr_qp - > qpn ) ;
2016-07-21 19:06:38 +08:00
err_put_irrl :
hns_roce_table_put ( hr_dev , & qp_table - > irrl_table , hr_qp - > qpn ) ;
err_put_qp :
hns_roce_table_put ( hr_dev , & qp_table - > qp_table , hr_qp - > qpn ) ;
err_out :
return ret ;
}
void hns_roce_qp_remove ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
{
2018-10-25 11:15:34 -04:00
struct xarray * xa = & hr_dev - > qp_table_xa ;
2016-07-21 19:06:38 +08:00
unsigned long flags ;
2020-02-24 14:37:33 +08:00
list_del ( & hr_qp - > node ) ;
list_del ( & hr_qp - > sq_node ) ;
list_del ( & hr_qp - > rq_node ) ;
2018-10-25 11:15:34 -04:00
xa_lock_irqsave ( xa , flags ) ;
__xa_erase ( xa , hr_qp - > qpn & ( hr_dev - > caps . num_qps - 1 ) ) ;
xa_unlock_irqrestore ( xa , flags ) ;
2016-07-21 19:06:38 +08:00
}
2020-02-24 14:37:33 +08:00
static void free_qpc ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
2016-07-21 19:06:38 +08:00
{
struct hns_roce_qp_table * qp_table = & hr_dev - > qp_table ;
2020-02-24 14:37:33 +08:00
/* In v1 engine, GSI QP context is saved in the RoCE hw's register */
if ( hr_qp - > ibqp . qp_type = = IB_QPT_GSI & &
hr_dev - > hw_rev = = HNS_ROCE_HW_VER1 )
return ;
2016-07-21 19:06:38 +08:00
2020-02-24 14:37:33 +08:00
if ( hr_dev - > caps . trrl_entry_sz )
hns_roce_table_put ( hr_dev , & qp_table - > trrl_table , hr_qp - > qpn ) ;
hns_roce_table_put ( hr_dev , & qp_table - > irrl_table , hr_qp - > qpn ) ;
2016-07-21 19:06:38 +08:00
}
2020-02-24 14:37:34 +08:00
static void free_qpn ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
2016-07-21 19:06:38 +08:00
{
struct hns_roce_qp_table * qp_table = & hr_dev - > qp_table ;
2020-02-24 14:37:34 +08:00
if ( hr_qp - > ibqp . qp_type = = IB_QPT_GSI )
return ;
if ( hr_qp - > qpn < hr_dev - > caps . reserved_qps )
2016-07-21 19:06:38 +08:00
return ;
2020-02-24 14:37:34 +08:00
hns_roce_bitmap_free_range ( & qp_table - > bitmap , hr_qp - > qpn , 1 , BITMAP_RR ) ;
2016-07-21 19:06:38 +08:00
}
2020-04-28 19:03:41 +08:00
static int set_rq_size ( struct hns_roce_dev * hr_dev , struct ib_qp_cap * cap ,
struct hns_roce_qp * hr_qp , int has_rq )
2016-07-21 19:06:38 +08:00
{
2020-04-28 19:03:41 +08:00
u32 cnt ;
2016-07-21 19:06:38 +08:00
2018-11-24 16:49:21 +08:00
/* If srq exist, set zero for relative number of rq */
if ( ! has_rq ) {
hr_qp - > rq . wqe_cnt = 0 ;
hr_qp - > rq . max_gs = 0 ;
2020-04-28 19:03:41 +08:00
hr_qp - > rq_inl_buf . wqe_cnt = 0 ;
2018-11-24 16:49:21 +08:00
cap - > max_recv_wr = 0 ;
cap - > max_recv_sge = 0 ;
2016-07-21 19:06:38 +08:00
2020-03-12 17:50:24 +08:00
return 0 ;
}
2017-08-30 17:23:13 +08:00
2020-03-12 17:50:24 +08:00
/* Check the validity of QP support capacity */
if ( ! cap - > max_recv_wr | | cap - > max_recv_wr > hr_dev - > caps . max_wqes | |
cap - > max_recv_sge > hr_dev - > caps . max_rq_sg ) {
ibdev_err ( & hr_dev - > ib_dev , " RQ config error, depth=%u, sge=%d \n " ,
cap - > max_recv_wr , cap - > max_recv_sge ) ;
return - EINVAL ;
}
2016-07-21 19:06:38 +08:00
2020-04-28 19:03:41 +08:00
cnt = roundup_pow_of_two ( max ( cap - > max_recv_wr , hr_dev - > caps . min_wqes ) ) ;
if ( cnt > hr_dev - > caps . max_wqes ) {
2020-03-12 17:50:24 +08:00
ibdev_err ( & hr_dev - > ib_dev , " rq depth %u too large \n " ,
cap - > max_recv_wr ) ;
return - EINVAL ;
2016-07-21 19:06:38 +08:00
}
2020-05-08 17:45:59 +08:00
hr_qp - > rq . max_gs = roundup_pow_of_two ( max ( 1U , cap - > max_recv_sge ) +
HNS_ROCE_RESERVED_SGE ) ;
2020-03-12 17:50:24 +08:00
if ( hr_dev - > caps . max_rq_sg < = HNS_ROCE_SGE_IN_WQE )
hr_qp - > rq . wqe_shift = ilog2 ( hr_dev - > caps . max_rq_desc_sz ) ;
else
hr_qp - > rq . wqe_shift = ilog2 ( hr_dev - > caps . max_rq_desc_sz *
hr_qp - > rq . max_gs ) ;
2020-04-28 19:03:41 +08:00
hr_qp - > rq . wqe_cnt = cnt ;
if ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_RQ_INLINE )
hr_qp - > rq_inl_buf . wqe_cnt = cnt ;
else
hr_qp - > rq_inl_buf . wqe_cnt = 0 ;
cap - > max_recv_wr = cnt ;
2020-05-08 17:45:59 +08:00
cap - > max_recv_sge = hr_qp - > rq . max_gs - HNS_ROCE_RESERVED_SGE ;
2016-07-21 19:06:38 +08:00
return 0 ;
}
2020-04-28 19:03:41 +08:00
static int set_extend_sge_param ( struct hns_roce_dev * hr_dev , u32 sq_wqe_cnt ,
struct hns_roce_qp * hr_qp ,
struct ib_qp_cap * cap )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
u32 cnt ;
cnt = max ( 1U , cap - > max_send_sge ) ;
if ( hr_dev - > hw_rev = = HNS_ROCE_HW_VER1 ) {
hr_qp - > sq . max_gs = roundup_pow_of_two ( cnt ) ;
hr_qp - > sge . sge_cnt = 0 ;
return 0 ;
}
hr_qp - > sq . max_gs = cnt ;
/* UD sqwqe's sge use extend sge */
if ( hr_qp - > ibqp . qp_type = = IB_QPT_GSI | |
hr_qp - > ibqp . qp_type = = IB_QPT_UD ) {
cnt = roundup_pow_of_two ( sq_wqe_cnt * hr_qp - > sq . max_gs ) ;
} else if ( hr_qp - > sq . max_gs > HNS_ROCE_SGE_IN_WQE ) {
cnt = roundup_pow_of_two ( sq_wqe_cnt *
( hr_qp - > sq . max_gs - HNS_ROCE_SGE_IN_WQE ) ) ;
if ( hr_dev - > pci_dev - > revision = = PCI_REVISION_ID_HIP08_A ) {
if ( cnt > hr_dev - > caps . max_extend_sg ) {
ibdev_err ( ibdev ,
" failed to check exSGE num, exSGE num = %d. \n " ,
cnt ) ;
return - EINVAL ;
}
}
} else {
cnt = 0 ;
}
hr_qp - > sge . sge_shift = HNS_ROCE_SGE_SHIFT ;
hr_qp - > sge . sge_cnt = cnt ;
return 0 ;
}
2019-08-08 22:53:41 +08:00
static int check_sq_size_with_integrity ( struct hns_roce_dev * hr_dev ,
struct ib_qp_cap * cap ,
struct hns_roce_ib_create_qp * ucmd )
2016-07-21 19:06:38 +08:00
{
u32 roundup_sq_stride = roundup_pow_of_two ( hr_dev - > caps . max_sq_desc_sz ) ;
u8 max_sq_stride = ilog2 ( roundup_sq_stride ) ;
/* Sanity check SQ size before proceeding */
2019-06-08 12:25:14 +03:00
if ( ucmd - > log_sq_stride > max_sq_stride | |
ucmd - > log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE ) {
2020-02-24 14:37:36 +08:00
ibdev_err ( & hr_dev - > ib_dev , " Failed to check SQ stride size \n " ) ;
2016-07-21 19:06:38 +08:00
return - EINVAL ;
}
2017-08-30 17:23:13 +08:00
if ( cap - > max_send_sge > hr_dev - > caps . max_sq_sg ) {
2020-02-24 14:37:36 +08:00
ibdev_err ( & hr_dev - > ib_dev , " Failed to check SQ SGE size %d \n " ,
2019-08-08 22:53:54 +08:00
cap - > max_send_sge ) ;
2017-08-30 17:23:13 +08:00
return - EINVAL ;
}
2019-08-08 22:53:41 +08:00
return 0 ;
}
2020-02-24 14:37:36 +08:00
static int set_user_sq_size ( struct hns_roce_dev * hr_dev ,
struct ib_qp_cap * cap , struct hns_roce_qp * hr_qp ,
struct hns_roce_ib_create_qp * ucmd )
2019-08-08 22:53:41 +08:00
{
2020-04-28 19:03:41 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
u32 cnt = 0 ;
2019-08-08 22:53:41 +08:00
int ret ;
2020-04-28 19:03:41 +08:00
if ( check_shl_overflow ( 1 , ucmd - > log_sq_bb_count , & cnt ) | |
cnt > hr_dev - > caps . max_wqes )
2019-06-08 12:25:14 +03:00
return - EINVAL ;
2019-08-08 22:53:41 +08:00
ret = check_sq_size_with_integrity ( hr_dev , cap , ucmd ) ;
if ( ret ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev , " failed to check user SQ size, ret = %d. \n " ,
ret ) ;
2019-08-08 22:53:41 +08:00
return ret ;
}
2020-04-28 19:03:41 +08:00
ret = set_extend_sge_param ( hr_dev , cnt , hr_qp , cap ) ;
if ( ret )
return ret ;
2017-08-30 17:23:13 +08:00
2020-04-28 19:03:41 +08:00
hr_qp - > sq . wqe_shift = ucmd - > log_sq_stride ;
hr_qp - > sq . wqe_cnt = cnt ;
2016-07-21 19:06:38 +08:00
return 0 ;
}
2020-05-20 21:53:15 +08:00
static int set_wqe_buf_attr ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp ,
struct hns_roce_buf_attr * buf_attr )
2019-06-08 14:46:10 +08:00
{
int buf_size ;
2020-04-13 19:58:09 +08:00
int idx = 0 ;
2019-06-08 14:46:10 +08:00
2020-04-28 19:03:41 +08:00
hr_qp - > buff_size = 0 ;
2019-06-08 14:46:10 +08:00
2020-04-13 19:58:09 +08:00
/* SQ WQE */
2020-04-28 19:03:41 +08:00
hr_qp - > sq . offset = 0 ;
buf_size = to_hr_hem_entries_size ( hr_qp - > sq . wqe_cnt ,
hr_qp - > sq . wqe_shift ) ;
2020-04-13 19:58:09 +08:00
if ( buf_size > 0 & & idx < ARRAY_SIZE ( buf_attr - > region ) ) {
buf_attr - > region [ idx ] . size = buf_size ;
buf_attr - > region [ idx ] . hopnum = hr_dev - > caps . wqe_sq_hop_num ;
idx + + ;
2020-04-28 19:03:41 +08:00
hr_qp - > buff_size + = buf_size ;
2019-06-08 14:46:10 +08:00
}
2020-04-28 19:03:41 +08:00
/* extend SGE WQE in SQ */
hr_qp - > sge . offset = hr_qp - > buff_size ;
buf_size = to_hr_hem_entries_size ( hr_qp - > sge . sge_cnt ,
hr_qp - > sge . sge_shift ) ;
if ( buf_size > 0 & & idx < ARRAY_SIZE ( buf_attr - > region ) ) {
2020-04-13 19:58:09 +08:00
buf_attr - > region [ idx ] . size = buf_size ;
2020-04-28 19:03:41 +08:00
buf_attr - > region [ idx ] . hopnum = hr_dev - > caps . wqe_sge_hop_num ;
2020-04-13 19:58:09 +08:00
idx + + ;
2020-04-28 19:03:41 +08:00
hr_qp - > buff_size + = buf_size ;
2019-06-08 14:46:10 +08:00
}
2020-04-13 19:58:09 +08:00
/* RQ WQE */
2020-04-28 19:03:41 +08:00
hr_qp - > rq . offset = hr_qp - > buff_size ;
buf_size = to_hr_hem_entries_size ( hr_qp - > rq . wqe_cnt ,
hr_qp - > rq . wqe_shift ) ;
2020-04-13 19:58:09 +08:00
if ( buf_size > 0 & & idx < ARRAY_SIZE ( buf_attr - > region ) ) {
buf_attr - > region [ idx ] . size = buf_size ;
buf_attr - > region [ idx ] . hopnum = hr_dev - > caps . wqe_rq_hop_num ;
idx + + ;
2020-04-28 19:03:41 +08:00
hr_qp - > buff_size + = buf_size ;
2019-06-08 14:46:10 +08:00
}
2020-04-28 19:03:41 +08:00
if ( hr_qp - > buff_size < 1 )
return - EINVAL ;
2019-07-08 21:41:20 +08:00
2020-05-08 17:45:58 +08:00
buf_attr - > page_shift = HNS_HW_PAGE_SHIFT + hr_dev - > caps . mtt_buf_pg_sz ;
2020-04-28 19:03:41 +08:00
buf_attr - > fixed_page = true ;
buf_attr - > region_count = idx ;
2019-07-08 21:41:20 +08:00
return 0 ;
}
2020-02-24 14:37:36 +08:00
static int set_kernel_sq_size ( struct hns_roce_dev * hr_dev ,
struct ib_qp_cap * cap , struct hns_roce_qp * hr_qp )
2016-07-21 19:06:38 +08:00
{
2020-04-28 19:03:41 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
u32 cnt ;
2019-07-08 21:41:20 +08:00
int ret ;
2016-07-21 19:06:38 +08:00
2020-03-12 17:50:24 +08:00
if ( ! cap - > max_send_wr | | cap - > max_send_wr > hr_dev - > caps . max_wqes | |
2016-07-21 19:06:38 +08:00
cap - > max_send_sge > hr_dev - > caps . max_sq_sg | |
cap - > max_inline_data > hr_dev - > caps . max_sq_inline ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev ,
" failed to check SQ WR, SGE or inline num, ret = %d. \n " ,
- EINVAL ) ;
2016-07-21 19:06:38 +08:00
return - EINVAL ;
}
2020-04-28 19:03:41 +08:00
cnt = roundup_pow_of_two ( max ( cap - > max_send_wr , hr_dev - > caps . min_wqes ) ) ;
if ( cnt > hr_dev - > caps . max_wqes ) {
ibdev_err ( ibdev , " failed to check WQE num, WQE num = %d. \n " ,
cnt ) ;
2016-07-21 19:06:38 +08:00
return - EINVAL ;
}
2020-04-28 19:03:41 +08:00
hr_qp - > sq . wqe_shift = ilog2 ( hr_dev - > caps . max_sq_desc_sz ) ;
hr_qp - > sq . wqe_cnt = cnt ;
2016-07-21 19:06:38 +08:00
2020-04-28 19:03:41 +08:00
ret = set_extend_sge_param ( hr_dev , cnt , hr_qp , cap ) ;
if ( ret )
2019-07-08 21:41:20 +08:00
return ret ;
2018-09-30 17:00:31 +08:00
2020-04-28 19:03:41 +08:00
/* sync the parameters of kernel QP to user's configuration */
cap - > max_send_wr = cnt ;
2016-07-21 19:06:38 +08:00
cap - > max_send_sge = hr_qp - > sq . max_gs ;
/* We don't support inline sends for kernel QPs (yet) */
cap - > max_inline_data = 0 ;
return 0 ;
}
2018-08-02 10:38:05 +08:00
static int hns_roce_qp_has_sq ( struct ib_qp_init_attr * attr )
{
2019-04-23 17:30:26 +08:00
if ( attr - > qp_type = = IB_QPT_XRC_TGT | | ! attr - > cap . max_send_wr )
2018-08-02 10:38:05 +08:00
return 0 ;
return 1 ;
}
2018-03-09 18:36:29 +08:00
static int hns_roce_qp_has_rq ( struct ib_qp_init_attr * attr )
{
if ( attr - > qp_type = = IB_QPT_XRC_INI | |
2018-12-12 17:49:07 +08:00
attr - > qp_type = = IB_QPT_XRC_TGT | | attr - > srq | |
! attr - > cap . max_recv_wr )
2018-03-09 18:36:29 +08:00
return 0 ;
return 1 ;
}
2019-08-29 16:41:42 +08:00
static int alloc_rq_inline_buf ( struct hns_roce_qp * hr_qp ,
struct ib_qp_init_attr * init_attr )
{
u32 max_recv_sge = init_attr - > cap . max_recv_sge ;
2020-04-28 19:03:41 +08:00
u32 wqe_cnt = hr_qp - > rq_inl_buf . wqe_cnt ;
2019-08-29 16:41:42 +08:00
struct hns_roce_rinl_wqe * wqe_list ;
int i ;
/* allocate recv inline buf */
wqe_list = kcalloc ( wqe_cnt , sizeof ( struct hns_roce_rinl_wqe ) ,
GFP_KERNEL ) ;
if ( ! wqe_list )
goto err ;
/* Allocate a continuous buffer for all inline sge we need */
wqe_list [ 0 ] . sg_list = kcalloc ( wqe_cnt , ( max_recv_sge *
sizeof ( struct hns_roce_rinl_sge ) ) ,
GFP_KERNEL ) ;
if ( ! wqe_list [ 0 ] . sg_list )
goto err_wqe_list ;
/* Assign buffers of sg_list to each inline wqe */
for ( i = 1 ; i < wqe_cnt ; i + + )
wqe_list [ i ] . sg_list = & wqe_list [ 0 ] . sg_list [ i * max_recv_sge ] ;
hr_qp - > rq_inl_buf . wqe_list = wqe_list ;
return 0 ;
err_wqe_list :
kfree ( wqe_list ) ;
err :
return - ENOMEM ;
}
static void free_rq_inline_buf ( struct hns_roce_qp * hr_qp )
{
2020-04-28 19:03:41 +08:00
if ( hr_qp - > rq_inl_buf . wqe_list )
kfree ( hr_qp - > rq_inl_buf . wqe_list [ 0 ] . sg_list ) ;
2019-08-29 16:41:42 +08:00
kfree ( hr_qp - > rq_inl_buf . wqe_list ) ;
}
2020-02-24 14:37:35 +08:00
static int alloc_qp_buf ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata , unsigned long addr )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
2020-04-13 19:58:09 +08:00
struct hns_roce_buf_attr buf_attr = { } ;
2020-02-24 14:37:35 +08:00
int ret ;
2020-04-28 19:03:41 +08:00
if ( ! udata & & hr_qp - > rq_inl_buf . wqe_cnt ) {
2020-02-24 14:37:35 +08:00
ret = alloc_rq_inline_buf ( hr_qp , init_attr ) ;
if ( ret ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev ,
" failed to alloc inline buf, ret = %d. \n " ,
ret ) ;
2020-02-24 14:37:35 +08:00
return ret ;
}
2020-04-28 19:03:41 +08:00
} else {
hr_qp - > rq_inl_buf . wqe_list = NULL ;
2020-02-24 14:37:35 +08:00
}
2020-05-20 21:53:15 +08:00
ret = set_wqe_buf_attr ( hr_dev , hr_qp , & buf_attr ) ;
2020-04-13 19:58:09 +08:00
if ( ret ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev , " failed to split WQE buf, ret = %d. \n " , ret ) ;
2020-04-13 19:58:09 +08:00
goto err_inline ;
}
ret = hns_roce_mtr_create ( hr_dev , & hr_qp - > mtr , & buf_attr ,
2020-05-08 17:45:58 +08:00
HNS_HW_PAGE_SHIFT + hr_dev - > caps . mtt_ba_pg_sz ,
2020-04-13 19:58:09 +08:00
udata , addr ) ;
if ( ret ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev , " failed to create WQE mtr, ret = %d. \n " , ret ) ;
2020-04-13 19:58:09 +08:00
goto err_inline ;
2020-02-24 14:37:35 +08:00
}
return 0 ;
err_inline :
2020-04-28 19:03:41 +08:00
free_rq_inline_buf ( hr_qp ) ;
2020-02-24 14:37:35 +08:00
return ret ;
}
static void free_qp_buf ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp )
{
2020-04-13 19:58:09 +08:00
hns_roce_mtr_destroy ( hr_dev , & hr_qp - > mtr ) ;
2020-04-28 19:03:41 +08:00
free_rq_inline_buf ( hr_qp ) ;
2020-02-24 14:37:35 +08:00
}
2020-02-24 14:37:36 +08:00
2020-02-24 14:37:38 +08:00
static inline bool user_qp_has_sdb ( struct hns_roce_dev * hr_dev ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata ,
struct hns_roce_ib_create_qp_resp * resp ,
struct hns_roce_ib_create_qp * ucmd )
{
return ( ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB ) & &
udata - > outlen > = offsetofend ( typeof ( * resp ) , cap_flags ) & &
hns_roce_qp_has_sq ( init_attr ) & &
udata - > inlen > = offsetofend ( typeof ( * ucmd ) , sdb_addr ) ) ;
}
static inline bool user_qp_has_rdb ( struct hns_roce_dev * hr_dev ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata ,
struct hns_roce_ib_create_qp_resp * resp )
{
return ( ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_RECORD_DB ) & &
udata - > outlen > = offsetofend ( typeof ( * resp ) , cap_flags ) & &
hns_roce_qp_has_rq ( init_attr ) ) ;
}
static inline bool kernel_qp_has_rdb ( struct hns_roce_dev * hr_dev ,
struct ib_qp_init_attr * init_attr )
{
return ( ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_RECORD_DB ) & &
hns_roce_qp_has_rq ( init_attr ) ) ;
}
static int alloc_qp_db ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata ,
struct hns_roce_ib_create_qp * ucmd ,
struct hns_roce_ib_create_qp_resp * resp )
{
struct hns_roce_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct hns_roce_ucontext , ibucontext ) ;
struct ib_device * ibdev = & hr_dev - > ib_dev ;
int ret ;
if ( udata ) {
if ( user_qp_has_sdb ( hr_dev , init_attr , udata , resp , ucmd ) ) {
ret = hns_roce_db_map_user ( uctx , udata , ucmd - > sdb_addr ,
& hr_qp - > sdb ) ;
if ( ret ) {
ibdev_err ( ibdev ,
" Failed to map user SQ doorbell \n " ) ;
goto err_out ;
}
2020-05-05 18:30:07 +08:00
hr_qp - > en_flags | = HNS_ROCE_QP_CAP_SQ_RECORD_DB ;
resp - > cap_flags | = HNS_ROCE_QP_CAP_SQ_RECORD_DB ;
2020-02-24 14:37:38 +08:00
}
if ( user_qp_has_rdb ( hr_dev , init_attr , udata , resp ) ) {
ret = hns_roce_db_map_user ( uctx , udata , ucmd - > db_addr ,
& hr_qp - > rdb ) ;
if ( ret ) {
ibdev_err ( ibdev ,
" Failed to map user RQ doorbell \n " ) ;
goto err_sdb ;
}
2020-05-05 18:30:07 +08:00
hr_qp - > en_flags | = HNS_ROCE_QP_CAP_RQ_RECORD_DB ;
resp - > cap_flags | = HNS_ROCE_QP_CAP_RQ_RECORD_DB ;
2020-02-24 14:37:38 +08:00
}
} else {
/* QP doorbell register address */
hr_qp - > sq . db_reg_l = hr_dev - > reg_base + hr_dev - > sdb_offset +
DB_REG_OFFSET * hr_dev - > priv_uar . index ;
hr_qp - > rq . db_reg_l = hr_dev - > reg_base + hr_dev - > odb_offset +
DB_REG_OFFSET * hr_dev - > priv_uar . index ;
if ( kernel_qp_has_rdb ( hr_dev , init_attr ) ) {
ret = hns_roce_alloc_db ( hr_dev , & hr_qp - > rdb , 0 ) ;
if ( ret ) {
ibdev_err ( ibdev ,
" Failed to alloc kernel RQ doorbell \n " ) ;
goto err_out ;
}
* hr_qp - > rdb . db_record = 0 ;
2020-05-05 18:30:07 +08:00
hr_qp - > en_flags | = HNS_ROCE_QP_CAP_RQ_RECORD_DB ;
2020-02-24 14:37:38 +08:00
}
}
return 0 ;
err_sdb :
2020-05-05 18:30:07 +08:00
if ( udata & & hr_qp - > en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB )
2020-02-24 14:37:38 +08:00
hns_roce_db_unmap_user ( uctx , & hr_qp - > sdb ) ;
err_out :
return ret ;
}
static void free_qp_db ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp ,
struct ib_udata * udata )
{
struct hns_roce_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct hns_roce_ucontext , ibucontext ) ;
if ( udata ) {
2020-05-05 18:30:07 +08:00
if ( hr_qp - > en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB )
2020-02-24 14:37:38 +08:00
hns_roce_db_unmap_user ( uctx , & hr_qp - > rdb ) ;
2020-05-05 18:30:07 +08:00
if ( hr_qp - > en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB )
2020-02-24 14:37:38 +08:00
hns_roce_db_unmap_user ( uctx , & hr_qp - > sdb ) ;
} else {
2020-05-05 18:30:07 +08:00
if ( hr_qp - > en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB )
2020-02-24 14:37:38 +08:00
hns_roce_free_db ( hr_dev , & hr_qp - > rdb ) ;
}
}
2020-02-24 14:37:37 +08:00
static int alloc_kernel_wrid ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
u64 * sq_wrid = NULL ;
u64 * rq_wrid = NULL ;
int ret ;
sq_wrid = kcalloc ( hr_qp - > sq . wqe_cnt , sizeof ( u64 ) , GFP_KERNEL ) ;
if ( ZERO_OR_NULL_PTR ( sq_wrid ) ) {
ibdev_err ( ibdev , " Failed to alloc SQ wrid \n " ) ;
return - ENOMEM ;
}
if ( hr_qp - > rq . wqe_cnt ) {
rq_wrid = kcalloc ( hr_qp - > rq . wqe_cnt , sizeof ( u64 ) , GFP_KERNEL ) ;
if ( ZERO_OR_NULL_PTR ( rq_wrid ) ) {
ibdev_err ( ibdev , " Failed to alloc RQ wrid \n " ) ;
ret = - ENOMEM ;
goto err_sq ;
}
}
hr_qp - > sq . wrid = sq_wrid ;
hr_qp - > rq . wrid = rq_wrid ;
return 0 ;
err_sq :
kfree ( sq_wrid ) ;
return ret ;
}
static void free_kernel_wrid ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp )
{
kfree ( hr_qp - > rq . wrid ) ;
kfree ( hr_qp - > sq . wrid ) ;
}
2020-02-24 14:37:36 +08:00
static int set_qp_param ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata ,
struct hns_roce_ib_create_qp * ucmd )
{
struct ib_device * ibdev = & hr_dev - > ib_dev ;
int ret ;
hr_qp - > ibqp . qp_type = init_attr - > qp_type ;
if ( init_attr - > sq_sig_type = = IB_SIGNAL_ALL_WR )
hr_qp - > sq_signal_bits = IB_SIGNAL_ALL_WR ;
else
hr_qp - > sq_signal_bits = IB_SIGNAL_REQ_WR ;
2020-04-28 19:03:41 +08:00
ret = set_rq_size ( hr_dev , & init_attr - > cap , hr_qp ,
hns_roce_qp_has_rq ( init_attr ) ) ;
2020-02-24 14:37:36 +08:00
if ( ret ) {
2020-04-28 19:03:41 +08:00
ibdev_err ( ibdev , " failed to set user RQ size, ret = %d. \n " ,
ret ) ;
2020-02-24 14:37:36 +08:00
return ret ;
}
if ( udata ) {
if ( ib_copy_from_udata ( ucmd , udata , sizeof ( * ucmd ) ) ) {
ibdev_err ( ibdev , " Failed to copy QP ucmd \n " ) ;
return - EFAULT ;
}
ret = set_user_sq_size ( hr_dev , & init_attr - > cap , hr_qp , ucmd ) ;
if ( ret )
ibdev_err ( ibdev , " Failed to set user SQ size \n " ) ;
} else {
if ( init_attr - > create_flags &
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK ) {
ibdev_err ( ibdev , " Failed to check multicast loopback \n " ) ;
return - EINVAL ;
}
if ( init_attr - > create_flags & IB_QP_CREATE_IPOIB_UD_LSO ) {
ibdev_err ( ibdev , " Failed to check ipoib ud lso \n " ) ;
return - EINVAL ;
}
ret = set_kernel_sq_size ( hr_dev , & init_attr - > cap , hr_qp ) ;
if ( ret )
ibdev_err ( ibdev , " Failed to set kernel SQ size \n " ) ;
}
return ret ;
}
2016-07-21 19:06:38 +08:00
static int hns_roce_create_qp_common ( struct hns_roce_dev * hr_dev ,
struct ib_pd * ib_pd ,
struct ib_qp_init_attr * init_attr ,
2020-02-24 14:37:34 +08:00
struct ib_udata * udata ,
2016-07-21 19:06:38 +08:00
struct hns_roce_qp * hr_qp )
{
2018-03-15 15:23:14 +08:00
struct hns_roce_ib_create_qp_resp resp = { } ;
2020-02-24 14:37:38 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
struct hns_roce_ib_create_qp ucmd ;
2019-06-08 14:46:10 +08:00
int ret ;
2016-07-21 19:06:38 +08:00
mutex_init ( & hr_qp - > mutex ) ;
spin_lock_init ( & hr_qp - > sq . lock ) ;
spin_lock_init ( & hr_qp - > rq . lock ) ;
hr_qp - > state = IB_QPS_RESET ;
2020-02-06 17:56:45 +08:00
hr_qp - > flush_flag = 0 ;
2016-07-21 19:06:38 +08:00
2020-02-24 14:37:36 +08:00
ret = set_qp_param ( hr_dev , hr_qp , init_attr , udata , & ucmd ) ;
2016-07-21 19:06:38 +08:00
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to set QP param \n " ) ;
2020-02-24 14:37:36 +08:00
return ret ;
2016-07-21 19:06:38 +08:00
}
2020-02-24 14:37:38 +08:00
if ( ! udata ) {
2020-02-24 14:37:37 +08:00
ret = alloc_kernel_wrid ( hr_dev , hr_qp ) ;
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to alloc wrid \n " ) ;
return ret ;
2019-08-09 17:40:59 +08:00
}
2016-07-21 19:06:38 +08:00
}
2020-02-24 14:37:38 +08:00
ret = alloc_qp_db ( hr_dev , hr_qp , init_attr , udata , & ucmd , & resp ) ;
if ( ret ) {
ibdev_err ( ibdev , " Failed to alloc QP doorbell \n " ) ;
goto err_wrid ;
}
2020-02-24 14:37:35 +08:00
ret = alloc_qp_buf ( hr_dev , hr_qp , init_attr , udata , ucmd . buf_addr ) ;
2019-06-08 14:46:10 +08:00
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to alloc QP buffer \n " ) ;
2020-02-24 14:37:35 +08:00
goto err_db ;
2020-02-24 14:37:34 +08:00
}
ret = alloc_qpn ( hr_dev , hr_qp ) ;
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to alloc QPN \n " ) ;
2020-02-24 14:37:35 +08:00
goto err_buf ;
2019-06-08 14:46:10 +08:00
}
2020-02-24 14:37:33 +08:00
ret = alloc_qpc ( hr_dev , hr_qp ) ;
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to alloc QP context \n " ) ;
2020-02-24 14:37:33 +08:00
goto err_qpn ;
}
ret = hns_roce_qp_store ( hr_dev , hr_qp , init_attr ) ;
if ( ret ) {
2020-02-24 14:37:38 +08:00
ibdev_err ( ibdev , " Failed to store QP \n " ) ;
2020-02-24 14:37:33 +08:00
goto err_qpc ;
2016-07-21 19:06:38 +08:00
}
2019-01-12 18:36:29 +08:00
if ( udata ) {
ret = ib_copy_to_udata ( udata , & resp ,
min ( udata - > outlen , sizeof ( resp ) ) ) ;
2020-02-24 14:37:38 +08:00
if ( ret ) {
ibdev_err ( ibdev , " copy qp resp failed! \n " ) ;
2020-02-24 14:37:33 +08:00
goto err_store ;
2020-02-24 14:37:38 +08:00
}
2018-03-09 18:36:29 +08:00
}
2018-12-18 21:21:54 +08:00
if ( hr_dev - > caps . flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL ) {
ret = hr_dev - > hw - > qp_flow_control_init ( hr_dev , hr_qp ) ;
if ( ret )
2020-02-24 14:37:33 +08:00
goto err_store ;
2018-12-18 21:21:54 +08:00
}
2020-02-24 14:37:34 +08:00
hr_qp - > ibqp . qp_num = hr_qp - > qpn ;
2016-07-21 19:06:38 +08:00
hr_qp - > event = hns_roce_ib_qp_event ;
2020-02-24 14:37:33 +08:00
atomic_set ( & hr_qp - > refcount , 1 ) ;
init_completion ( & hr_qp - > free ) ;
2020-01-09 20:20:12 +08:00
2016-07-21 19:06:38 +08:00
return 0 ;
2020-02-24 14:37:33 +08:00
err_store :
hns_roce_qp_remove ( hr_dev , hr_qp ) ;
err_qpc :
free_qpc ( hr_dev , hr_qp ) ;
2016-07-21 19:06:38 +08:00
err_qpn :
2020-02-24 14:37:34 +08:00
free_qpn ( hr_dev , hr_qp ) ;
2020-02-24 14:37:35 +08:00
err_buf :
free_qp_buf ( hr_dev , hr_qp ) ;
2018-03-09 18:36:31 +08:00
err_db :
2020-02-24 14:37:38 +08:00
free_qp_db ( hr_dev , hr_qp , udata ) ;
err_wrid :
free_kernel_wrid ( hr_dev , hr_qp ) ;
2016-07-21 19:06:38 +08:00
return ret ;
}
2020-02-24 14:37:32 +08:00
void hns_roce_qp_destroy ( struct hns_roce_dev * hr_dev , struct hns_roce_qp * hr_qp ,
struct ib_udata * udata )
{
2020-02-24 14:37:33 +08:00
if ( atomic_dec_and_test ( & hr_qp - > refcount ) )
complete ( & hr_qp - > free ) ;
wait_for_completion ( & hr_qp - > free ) ;
free_qpc ( hr_dev , hr_qp ) ;
2020-02-24 14:37:34 +08:00
free_qpn ( hr_dev , hr_qp ) ;
2020-02-24 14:37:35 +08:00
free_qp_buf ( hr_dev , hr_qp ) ;
2020-02-24 14:37:37 +08:00
free_kernel_wrid ( hr_dev , hr_qp ) ;
2020-02-24 14:37:38 +08:00
free_qp_db ( hr_dev , hr_qp , udata ) ;
2020-02-24 14:37:32 +08:00
kfree ( hr_qp ) ;
}
2016-07-21 19:06:38 +08:00
struct ib_qp * hns_roce_create_qp ( struct ib_pd * pd ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata )
{
struct hns_roce_dev * hr_dev = to_hr_dev ( pd - > device ) ;
2019-08-08 22:53:54 +08:00
struct ib_device * ibdev = & hr_dev - > ib_dev ;
2016-07-21 19:06:38 +08:00
struct hns_roce_qp * hr_qp ;
int ret ;
switch ( init_attr - > qp_type ) {
case IB_QPT_RC : {
hr_qp = kzalloc ( sizeof ( * hr_qp ) , GFP_KERNEL ) ;
if ( ! hr_qp )
return ERR_PTR ( - ENOMEM ) ;
2020-02-24 14:37:34 +08:00
ret = hns_roce_create_qp_common ( hr_dev , pd , init_attr , udata ,
2016-07-21 19:06:38 +08:00
hr_qp ) ;
if ( ret ) {
2019-11-05 19:08:02 +08:00
ibdev_err ( ibdev , " Create QP 0x%06lx failed(%d) \n " ,
2019-08-08 22:53:54 +08:00
hr_qp - > qpn , ret ) ;
2016-07-21 19:06:38 +08:00
kfree ( hr_qp ) ;
return ERR_PTR ( ret ) ;
}
break ;
}
case IB_QPT_GSI : {
/* Userspace is not allowed to create special QPs: */
2018-12-17 17:15:18 +02:00
if ( udata ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( ibdev , " not support usr space GSI \n " ) ;
2016-07-21 19:06:38 +08:00
return ERR_PTR ( - EINVAL ) ;
}
2019-11-05 19:07:55 +08:00
hr_qp = kzalloc ( sizeof ( * hr_qp ) , GFP_KERNEL ) ;
if ( ! hr_qp )
2016-07-21 19:06:38 +08:00
return ERR_PTR ( - ENOMEM ) ;
2016-09-15 23:48:10 +01:00
hr_qp - > port = init_attr - > port_num - 1 ;
hr_qp - > phy_port = hr_dev - > iboe . phy_port [ hr_qp - > port ] ;
2018-01-10 14:39:48 +08:00
2016-07-21 19:06:38 +08:00
ret = hns_roce_create_qp_common ( hr_dev , pd , init_attr , udata ,
2020-02-24 14:37:34 +08:00
hr_qp ) ;
2016-07-21 19:06:38 +08:00
if ( ret ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( ibdev , " Create GSI QP failed! \n " ) ;
2019-11-05 19:07:55 +08:00
kfree ( hr_qp ) ;
2016-07-21 19:06:38 +08:00
return ERR_PTR ( ret ) ;
}
break ;
}
default : {
2019-08-08 22:53:54 +08:00
ibdev_err ( ibdev , " not support QP type %d \n " ,
init_attr - > qp_type ) ;
2020-01-30 10:20:49 +02:00
return ERR_PTR ( - EOPNOTSUPP ) ;
2016-07-21 19:06:38 +08:00
}
}
return & hr_qp - > ibqp ;
}
int to_hr_qp_type ( int qp_type )
{
int transport_type ;
if ( qp_type = = IB_QPT_RC )
transport_type = SERV_TYPE_RC ;
else if ( qp_type = = IB_QPT_UC )
transport_type = SERV_TYPE_UC ;
else if ( qp_type = = IB_QPT_UD )
transport_type = SERV_TYPE_UD ;
else if ( qp_type = = IB_QPT_GSI )
transport_type = SERV_TYPE_UD ;
else
transport_type = - 1 ;
return transport_type ;
}
2019-08-08 22:53:42 +08:00
static int check_mtu_validate ( struct hns_roce_dev * hr_dev ,
struct hns_roce_qp * hr_qp ,
struct ib_qp_attr * attr , int attr_mask )
2016-07-21 19:06:38 +08:00
{
2016-09-20 17:07:07 +01:00
enum ib_mtu active_mtu ;
2019-08-08 22:53:42 +08:00
int p ;
2016-07-21 19:06:38 +08:00
2019-08-08 22:53:42 +08:00
p = attr_mask & IB_QP_PORT ? ( attr - > port_num - 1 ) : hr_qp - > port ;
2019-08-16 14:39:07 +03:00
active_mtu = iboe_get_mtu ( hr_dev - > iboe . netdevs [ p ] - > mtu ) ;
2019-01-12 18:36:29 +08:00
2019-08-08 22:53:42 +08:00
if ( ( hr_dev - > caps . max_mtu > = IB_MTU_2048 & &
attr - > path_mtu > hr_dev - > caps . max_mtu ) | |
attr - > path_mtu < IB_MTU_256 | | attr - > path_mtu > active_mtu ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
" attr path_mtu(%d)invalid while modify qp " ,
2019-08-08 22:53:42 +08:00
attr - > path_mtu ) ;
return - EINVAL ;
2018-08-02 10:38:05 +08:00
}
2019-08-08 22:53:42 +08:00
return 0 ;
}
static int hns_roce_check_qp_attr ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask )
{
struct hns_roce_dev * hr_dev = to_hr_dev ( ibqp - > device ) ;
struct hns_roce_qp * hr_qp = to_hr_qp ( ibqp ) ;
int p ;
2016-07-21 19:06:38 +08:00
if ( ( attr_mask & IB_QP_PORT ) & &
( attr - > port_num = = 0 | | attr - > port_num > hr_dev - > caps . num_ports ) ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
" attr port_num invalid.attr->port_num=%d \n " ,
2016-07-21 19:06:38 +08:00
attr - > port_num ) ;
2019-08-08 22:53:42 +08:00
return - EINVAL ;
2016-07-21 19:06:38 +08:00
}
if ( attr_mask & IB_QP_PKEY_INDEX ) {
p = attr_mask & IB_QP_PORT ? ( attr - > port_num - 1 ) : hr_qp - > port ;
if ( attr - > pkey_index > = hr_dev - > caps . pkey_table_len [ p ] ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
" attr pkey_index invalid.attr->pkey_index=%d \n " ,
2016-07-21 19:06:38 +08:00
attr - > pkey_index ) ;
2019-08-08 22:53:42 +08:00
return - EINVAL ;
2016-09-20 17:07:07 +01:00
}
}
2016-07-21 19:06:38 +08:00
if ( attr_mask & IB_QP_MAX_QP_RD_ATOMIC & &
attr - > max_rd_atomic > hr_dev - > caps . max_qp_init_rdma ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
" attr max_rd_atomic invalid.attr->max_rd_atomic=%d \n " ,
2016-07-21 19:06:38 +08:00
attr - > max_rd_atomic ) ;
2019-08-08 22:53:42 +08:00
return - EINVAL ;
2016-07-21 19:06:38 +08:00
}
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC & &
attr - > max_dest_rd_atomic > hr_dev - > caps . max_qp_dest_rdma ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
" attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d \n " ,
2016-07-21 19:06:38 +08:00
attr - > max_dest_rd_atomic ) ;
2019-08-08 22:53:42 +08:00
return - EINVAL ;
}
if ( attr_mask & IB_QP_PATH_MTU )
return check_mtu_validate ( hr_dev , hr_qp , attr , attr_mask ) ;
return 0 ;
}
int hns_roce_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask , struct ib_udata * udata )
{
struct hns_roce_dev * hr_dev = to_hr_dev ( ibqp - > device ) ;
struct hns_roce_qp * hr_qp = to_hr_qp ( ibqp ) ;
enum ib_qp_state cur_state , new_state ;
int ret = - EINVAL ;
mutex_lock ( & hr_qp - > mutex ) ;
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr - > cur_qp_state : ( enum ib_qp_state ) hr_qp - > state ;
new_state = attr_mask & IB_QP_STATE ? attr - > qp_state : cur_state ;
if ( ibqp - > uobject & &
( attr_mask & IB_QP_STATE ) & & new_state = = IB_QPS_ERR ) {
2020-05-05 18:30:07 +08:00
if ( hr_qp - > en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB ) {
2019-08-08 22:53:42 +08:00
hr_qp - > sq . head = * ( int * ) ( hr_qp - > sdb . virt_addr ) ;
2020-05-05 18:30:07 +08:00
if ( hr_qp - > en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB )
2019-08-08 22:53:42 +08:00
hr_qp - > rq . head = * ( int * ) ( hr_qp - > rdb . virt_addr ) ;
} else {
2019-08-08 22:53:54 +08:00
ibdev_warn ( & hr_dev - > ib_dev ,
" flush cqe is not supported in userspace! \n " ) ;
2019-08-08 22:53:42 +08:00
goto out ;
}
}
if ( ! ib_modify_qp_is_ok ( cur_state , new_state , ibqp - > qp_type ,
attr_mask ) ) {
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev , " ib_modify_qp_is_ok failed \n " ) ;
2016-07-21 19:06:38 +08:00
goto out ;
}
2019-08-08 22:53:42 +08:00
ret = hns_roce_check_qp_attr ( ibqp , attr , attr_mask ) ;
if ( ret )
goto out ;
2016-07-21 19:06:38 +08:00
if ( cur_state = = new_state & & cur_state = = IB_QPS_RESET ) {
2020-03-12 17:50:24 +08:00
if ( hr_dev - > hw_rev = = HNS_ROCE_HW_VER1 ) {
2018-05-04 10:57:08 +08:00
ret = - EPERM ;
2019-08-08 22:53:54 +08:00
ibdev_err ( & hr_dev - > ib_dev ,
2020-03-12 17:50:24 +08:00
" RST2RST state is not supported \n " ) ;
2018-05-04 10:57:08 +08:00
} else {
ret = 0 ;
}
2016-07-21 19:06:38 +08:00
goto out ;
}
ret = hr_dev - > hw - > modify_qp ( ibqp , attr , attr_mask , cur_state ,
new_state ) ;
out :
mutex_unlock ( & hr_qp - > mutex ) ;
return ret ;
}
void hns_roce_lock_cqs ( struct hns_roce_cq * send_cq , struct hns_roce_cq * recv_cq )
__acquires ( & send_cq - > lock ) __acquires ( & recv_cq - > lock )
{
2020-01-09 20:20:12 +08:00
if ( unlikely ( send_cq = = NULL & & recv_cq = = NULL ) ) {
__acquire ( & send_cq - > lock ) ;
__acquire ( & recv_cq - > lock ) ;
} else if ( unlikely ( send_cq ! = NULL & & recv_cq = = NULL ) ) {
spin_lock_irq ( & send_cq - > lock ) ;
__acquire ( & recv_cq - > lock ) ;
} else if ( unlikely ( send_cq = = NULL & & recv_cq ! = NULL ) ) {
spin_lock_irq ( & recv_cq - > lock ) ;
__acquire ( & send_cq - > lock ) ;
} else if ( send_cq = = recv_cq ) {
2016-07-21 19:06:38 +08:00
spin_lock_irq ( & send_cq - > lock ) ;
__acquire ( & recv_cq - > lock ) ;
} else if ( send_cq - > cqn < recv_cq - > cqn ) {
spin_lock_irq ( & send_cq - > lock ) ;
spin_lock_nested ( & recv_cq - > lock , SINGLE_DEPTH_NESTING ) ;
} else {
spin_lock_irq ( & recv_cq - > lock ) ;
spin_lock_nested ( & send_cq - > lock , SINGLE_DEPTH_NESTING ) ;
}
}
void hns_roce_unlock_cqs ( struct hns_roce_cq * send_cq ,
struct hns_roce_cq * recv_cq ) __releases ( & send_cq - > lock )
__releases ( & recv_cq - > lock )
{
2020-01-09 20:20:12 +08:00
if ( unlikely ( send_cq = = NULL & & recv_cq = = NULL ) ) {
__release ( & recv_cq - > lock ) ;
__release ( & send_cq - > lock ) ;
} else if ( unlikely ( send_cq ! = NULL & & recv_cq = = NULL ) ) {
__release ( & recv_cq - > lock ) ;
spin_unlock ( & send_cq - > lock ) ;
} else if ( unlikely ( send_cq = = NULL & & recv_cq ! = NULL ) ) {
__release ( & send_cq - > lock ) ;
spin_unlock ( & recv_cq - > lock ) ;
} else if ( send_cq = = recv_cq ) {
2016-07-21 19:06:38 +08:00
__release ( & recv_cq - > lock ) ;
spin_unlock_irq ( & send_cq - > lock ) ;
} else if ( send_cq - > cqn < recv_cq - > cqn ) {
spin_unlock ( & recv_cq - > lock ) ;
spin_unlock_irq ( & send_cq - > lock ) ;
} else {
spin_unlock ( & send_cq - > lock ) ;
spin_unlock_irq ( & recv_cq - > lock ) ;
}
}
2020-04-13 19:58:09 +08:00
static inline void * get_wqe ( struct hns_roce_qp * hr_qp , int offset )
2016-07-21 19:06:38 +08:00
{
2020-04-13 19:58:09 +08:00
return hns_roce_buf_offset ( hr_qp - > mtr . kmem , offset ) ;
2016-07-21 19:06:38 +08:00
}
2020-03-10 19:18:00 +08:00
void * hns_roce_get_recv_wqe ( struct hns_roce_qp * hr_qp , int n )
2016-07-21 19:06:38 +08:00
{
return get_wqe ( hr_qp , hr_qp - > rq . offset + ( n < < hr_qp - > rq . wqe_shift ) ) ;
}
2020-03-10 19:18:00 +08:00
void * hns_roce_get_send_wqe ( struct hns_roce_qp * hr_qp , int n )
2016-07-21 19:06:38 +08:00
{
return get_wqe ( hr_qp , hr_qp - > sq . offset + ( n < < hr_qp - > sq . wqe_shift ) ) ;
}
2020-03-10 19:18:00 +08:00
void * hns_roce_get_extend_sge ( struct hns_roce_qp * hr_qp , int n )
2017-08-30 17:23:13 +08:00
{
2020-04-13 19:58:09 +08:00
return get_wqe ( hr_qp , hr_qp - > sge . offset + ( n < < hr_qp - > sge . sge_shift ) ) ;
2017-08-30 17:23:13 +08:00
}
2016-07-21 19:06:38 +08:00
bool hns_roce_wq_overflow ( struct hns_roce_wq * hr_wq , int nreq ,
struct ib_cq * ib_cq )
{
struct hns_roce_cq * hr_cq ;
u32 cur ;
cur = hr_wq - > head - hr_wq - > tail ;
2019-11-05 19:07:54 +08:00
if ( likely ( cur + nreq < hr_wq - > wqe_cnt ) )
2017-07-25 13:36:24 +08:00
return false ;
2016-07-21 19:06:38 +08:00
hr_cq = to_hr_cq ( ib_cq ) ;
spin_lock ( & hr_cq - > lock ) ;
cur = hr_wq - > head - hr_wq - > tail ;
spin_unlock ( & hr_cq - > lock ) ;
2019-11-05 19:07:54 +08:00
return cur + nreq > = hr_wq - > wqe_cnt ;
2016-07-21 19:06:38 +08:00
}
int hns_roce_init_qp_table ( struct hns_roce_dev * hr_dev )
{
struct hns_roce_qp_table * qp_table = & hr_dev - > qp_table ;
int reserved_from_top = 0 ;
2018-09-30 17:00:28 +08:00
int reserved_from_bot ;
2016-07-21 19:06:38 +08:00
int ret ;
2018-12-18 21:21:54 +08:00
mutex_init ( & qp_table - > scc_mutex ) ;
2018-10-25 11:15:34 -04:00
xa_init ( & hr_dev - > qp_table_xa ) ;
2016-07-21 19:06:38 +08:00
2019-06-24 19:47:46 +08:00
reserved_from_bot = hr_dev - > caps . reserved_qps ;
2018-09-30 17:00:28 +08:00
2016-07-21 19:06:38 +08:00
ret = hns_roce_bitmap_init ( & qp_table - > bitmap , hr_dev - > caps . num_qps ,
2018-09-30 17:00:28 +08:00
hr_dev - > caps . num_qps - 1 , reserved_from_bot ,
2016-07-21 19:06:38 +08:00
reserved_from_top ) ;
if ( ret ) {
2017-08-30 17:23:02 +08:00
dev_err ( hr_dev - > dev , " qp bitmap init failed!error=%d \n " ,
2016-07-21 19:06:38 +08:00
ret ) ;
return ret ;
}
return 0 ;
}
void hns_roce_cleanup_qp_table ( struct hns_roce_dev * hr_dev )
{
hns_roce_bitmap_cleanup ( & hr_dev - > qp_table . bitmap ) ;
}