2020-08-27 17:54:40 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
2017-01-21 00:04:37 +03:00
# include <linux/dma-mapping.h>
2017-03-14 17:01:57 +03:00
# include <net/addrconf.h>
2019-02-07 19:44:49 +03:00
# include <rdma/uverbs_ioctl.h>
2016-06-16 16:45:23 +03:00
# include "rxe.h"
# include "rxe_loc.h"
# include "rxe_queue.h"
2017-03-10 19:23:56 +03:00
# include "rxe_hw_counters.h"
2016-06-16 16:45:23 +03:00
static int rxe_query_device ( struct ib_device * dev ,
struct ib_device_attr * attr ,
struct ib_udata * uhw )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
if ( uhw - > inlen | | uhw - > outlen )
return - EINVAL ;
* attr = rxe - > attr ;
return 0 ;
}
static int rxe_query_port ( struct ib_device * dev ,
u8 port_num , struct ib_port_attr * attr )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
struct rxe_port * port ;
2018-12-09 14:06:10 +03:00
int rc ;
2016-06-16 16:45:23 +03:00
port = & rxe - > port ;
2017-01-24 14:02:39 +03:00
/* *attr being zeroed by the caller, avoid zeroing it here */
2016-06-16 16:45:23 +03:00
* attr = port - > attr ;
mutex_lock ( & rxe - > usdev_lock ) ;
2017-06-14 23:13:34 +03:00
rc = ib_get_eth_speed ( dev , port_num , & attr - > active_speed ,
& attr - > active_width ) ;
2018-11-01 16:18:45 +03:00
if ( attr - > state = = IB_PORT_ACTIVE )
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_LINK_UP ;
2018-11-01 16:18:45 +03:00
else if ( dev_get_flags ( rxe - > ndev ) & IFF_UP )
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_POLLING ;
2018-11-01 16:18:45 +03:00
else
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_DISABLED ;
2018-11-01 16:18:45 +03:00
2016-06-16 16:45:23 +03:00
mutex_unlock ( & rxe - > usdev_lock ) ;
2017-06-14 23:13:34 +03:00
return rc ;
2016-06-16 16:45:23 +03:00
}
static int rxe_query_pkey ( struct ib_device * device ,
u8 port_num , u16 index , u16 * pkey )
{
2020-07-21 13:16:18 +03:00
if ( index > 0 )
return - EINVAL ;
2016-06-16 16:45:23 +03:00
2020-07-21 13:16:18 +03:00
* pkey = IB_DEFAULT_PKEY_FULL ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
static int rxe_modify_device ( struct ib_device * dev ,
int mask , struct ib_device_modify * attr )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
2019-09-23 13:41:58 +03:00
if ( mask & ~ ( IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC ) )
return - EOPNOTSUPP ;
2016-06-16 16:45:23 +03:00
if ( mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID )
rxe - > attr . sys_image_guid = cpu_to_be64 ( attr - > sys_image_guid ) ;
if ( mask & IB_DEVICE_MODIFY_NODE_DESC ) {
memcpy ( rxe - > ib_dev . node_desc ,
attr - > node_desc , sizeof ( rxe - > ib_dev . node_desc ) ) ;
}
return 0 ;
}
static int rxe_modify_port ( struct ib_device * dev ,
u8 port_num , int mask , struct ib_port_modify * attr )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
struct rxe_port * port ;
port = & rxe - > port ;
port - > attr . port_cap_flags | = attr - > set_port_cap_mask ;
port - > attr . port_cap_flags & = ~ attr - > clr_port_cap_mask ;
if ( mask & IB_PORT_RESET_QKEY_CNTR )
port - > attr . qkey_viol_cntr = 0 ;
return 0 ;
}
static enum rdma_link_layer rxe_get_link_layer ( struct ib_device * dev ,
u8 port_num )
{
2020-07-05 13:43:13 +03:00
return IB_LINK_LAYER_ETHERNET ;
2016-06-16 16:45:23 +03:00
}
2019-02-12 21:39:16 +03:00
static int rxe_alloc_ucontext ( struct ib_ucontext * uctx , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
2019-02-12 21:39:16 +03:00
struct rxe_dev * rxe = to_rdev ( uctx - > device ) ;
struct rxe_ucontext * uc = to_ruc ( uctx ) ;
2016-06-16 16:45:23 +03:00
2019-02-12 21:39:16 +03:00
return rxe_add_to_pool ( & rxe - > uc_pool , & uc - > pelem ) ;
2016-06-16 16:45:23 +03:00
}
2019-02-12 21:39:16 +03:00
static void rxe_dealloc_ucontext ( struct ib_ucontext * ibuc )
2016-06-16 16:45:23 +03:00
{
struct rxe_ucontext * uc = to_ruc ( ibuc ) ;
rxe_drop_ref ( uc ) ;
}
static int rxe_port_immutable ( struct ib_device * dev , u8 port_num ,
struct ib_port_immutable * immutable )
{
int err ;
struct ib_port_attr attr ;
2017-01-24 14:02:39 +03:00
immutable - > core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP ;
err = ib_query_port ( dev , port_num , & attr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
return err ;
immutable - > pkey_tbl_len = attr . pkey_tbl_len ;
immutable - > gid_tbl_len = attr . gid_tbl_len ;
immutable - > max_mad_size = IB_MGMT_MAD_SIZE ;
return 0 ;
}
2019-03-31 19:10:07 +03:00
static int rxe_alloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
2019-02-03 15:55:51 +03:00
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
2016-06-16 16:45:23 +03:00
2019-02-03 15:55:51 +03:00
return rxe_add_to_pool ( & rxe - > pd_pool , & pd - > pelem ) ;
2016-06-16 16:45:23 +03:00
}
2020-09-07 15:09:13 +03:00
static int rxe_dealloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_pd * pd = to_rpd ( ibpd ) ;
rxe_drop_ref ( pd ) ;
2020-09-07 15:09:13 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2020-04-30 22:21:42 +03:00
static int rxe_create_ah ( struct ib_ah * ibah ,
struct rdma_ah_init_attr * init_attr ,
struct ib_udata * udata )
2016-11-23 09:23:24 +03:00
2016-06-16 16:45:23 +03:00
{
int err ;
2019-04-03 16:42:42 +03:00
struct rxe_dev * rxe = to_rdev ( ibah - > device ) ;
struct rxe_ah * ah = to_rah ( ibah ) ;
2016-06-16 16:45:23 +03:00
2020-04-30 22:21:42 +03:00
err = rxe_av_chk_attr ( rxe , init_attr - > ah_attr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-04-03 16:42:42 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2019-04-03 16:42:42 +03:00
err = rxe_add_to_pool ( & rxe - > ah_pool , & ah - > pelem ) ;
if ( err )
return err ;
2016-06-16 16:45:23 +03:00
2020-04-30 22:21:42 +03:00
rxe_init_av ( init_attr - > ah_attr , & ah - > av ) ;
2019-04-03 16:42:42 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2017-04-29 21:41:18 +03:00
static int rxe_modify_ah ( struct ib_ah * ibah , struct rdma_ah_attr * attr )
2016-06-16 16:45:23 +03:00
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibah - > device ) ;
struct rxe_ah * ah = to_rah ( ibah ) ;
err = rxe_av_chk_attr ( rxe , attr ) ;
if ( err )
return err ;
2019-01-29 13:08:49 +03:00
rxe_init_av ( attr , & ah - > av ) ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
2017-04-29 21:41:18 +03:00
static int rxe_query_ah ( struct ib_ah * ibah , struct rdma_ah_attr * attr )
2016-06-16 16:45:23 +03:00
{
struct rxe_ah * ah = to_rah ( ibah ) ;
2017-04-29 21:41:17 +03:00
memset ( attr , 0 , sizeof ( * attr ) ) ;
2017-04-29 21:41:29 +03:00
attr - > type = ibah - > type ;
2018-01-31 14:06:56 +03:00
rxe_av_to_attr ( & ah - > av , attr ) ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
2020-09-07 15:09:14 +03:00
static int rxe_destroy_ah ( struct ib_ah * ibah , u32 flags )
2016-06-16 16:45:23 +03:00
{
struct rxe_ah * ah = to_rah ( ibah ) ;
rxe_drop_ref ( ah ) ;
2020-09-07 15:09:14 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2018-07-18 19:25:32 +03:00
static int post_one_recv ( struct rxe_rq * rq , const struct ib_recv_wr * ibwr )
2016-06-16 16:45:23 +03:00
{
int err ;
int i ;
u32 length ;
struct rxe_recv_wqe * recv_wqe ;
int num_sge = ibwr - > num_sge ;
if ( unlikely ( queue_full ( rq - > queue ) ) ) {
err = - ENOMEM ;
goto err1 ;
}
if ( unlikely ( num_sge > rq - > max_sge ) ) {
err = - EINVAL ;
goto err1 ;
}
length = 0 ;
for ( i = 0 ; i < num_sge ; i + + )
length + = ibwr - > sg_list [ i ] . length ;
recv_wqe = producer_addr ( rq - > queue ) ;
recv_wqe - > wr_id = ibwr - > wr_id ;
recv_wqe - > num_sge = num_sge ;
memcpy ( recv_wqe - > dma . sge , ibwr - > sg_list ,
num_sge * sizeof ( struct ib_sge ) ) ;
recv_wqe - > dma . length = length ;
recv_wqe - > dma . resid = length ;
recv_wqe - > dma . num_sge = num_sge ;
recv_wqe - > dma . cur_sge = 0 ;
recv_wqe - > dma . sge_offset = 0 ;
/* make sure all changes to the work queue are written before we
* update the producer pointer
*/
smp_wmb ( ) ;
advance_producer ( rq - > queue ) ;
return 0 ;
err1 :
return err ;
}
2019-04-03 16:42:43 +03:00
static int rxe_create_srq ( struct ib_srq * ibsrq , struct ib_srq_init_attr * init ,
struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
int err ;
2019-04-03 16:42:43 +03:00
struct rxe_dev * rxe = to_rdev ( ibsrq - > device ) ;
struct rxe_pd * pd = to_rpd ( ibsrq - > pd ) ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
2018-03-14 01:33:18 +03:00
struct rxe_create_srq_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
2019-04-03 16:42:43 +03:00
return - EINVAL ;
2018-03-14 01:33:18 +03:00
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
err = rxe_srq_chk_attr ( rxe , NULL , & init - > attr , IB_SRQ_INIT_MASK ) ;
if ( err )
goto err1 ;
2019-04-03 16:42:43 +03:00
err = rxe_add_to_pool ( & rxe - > srq_pool , & srq - > pelem ) ;
if ( err )
2016-06-16 16:45:23 +03:00
goto err1 ;
rxe_add_ref ( pd ) ;
srq - > pd = pd ;
2019-03-31 19:10:07 +03:00
err = rxe_srq_from_init ( rxe , srq , init , udata , uresp ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err2 ;
2019-04-03 16:42:43 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
err2 :
rxe_drop_ref ( pd ) ;
rxe_drop_ref ( srq ) ;
err1 :
2019-04-03 16:42:43 +03:00
return err ;
2016-06-16 16:45:23 +03:00
}
static int rxe_modify_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr ,
enum ib_srq_attr_mask mask ,
struct ib_udata * udata )
{
int err ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
struct rxe_dev * rxe = to_rdev ( ibsrq - > device ) ;
2018-03-14 01:33:18 +03:00
struct rxe_modify_srq_cmd ucmd = { } ;
if ( udata ) {
if ( udata - > inlen < sizeof ( ucmd ) )
return - EINVAL ;
err = ib_copy_from_udata ( & ucmd , udata , sizeof ( ucmd ) ) ;
if ( err )
return err ;
}
2016-06-16 16:45:23 +03:00
err = rxe_srq_chk_attr ( rxe , srq , attr , mask ) ;
if ( err )
goto err1 ;
2019-03-31 19:10:07 +03:00
err = rxe_srq_from_attr ( rxe , srq , attr , mask , & ucmd , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_query_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr )
{
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
if ( srq - > error )
return - EINVAL ;
attr - > max_wr = srq - > rq . queue - > buf - > index_mask ;
attr - > max_sge = srq - > rq . max_sge ;
attr - > srq_limit = srq - > limit ;
return 0 ;
}
2019-04-03 16:42:43 +03:00
static void rxe_destroy_srq ( struct ib_srq * ibsrq , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
if ( srq - > rq . queue )
rxe_queue_cleanup ( srq - > rq . queue ) ;
rxe_drop_ref ( srq - > pd ) ;
rxe_drop_ref ( srq ) ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_srq_recv ( struct ib_srq * ibsrq , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
unsigned long flags ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
spin_lock_irqsave ( & srq - > rq . producer_lock , flags ) ;
while ( wr ) {
err = post_one_recv ( & srq - > rq , wr ) ;
if ( unlikely ( err ) )
break ;
wr = wr - > next ;
}
spin_unlock_irqrestore ( & srq - > rq . producer_lock , flags ) ;
if ( err )
* bad_wr = wr ;
return err ;
}
static struct ib_qp * rxe_create_qp ( struct ib_pd * ibpd ,
struct ib_qp_init_attr * init ,
struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
struct rxe_qp * qp ;
2018-03-14 01:33:18 +03:00
struct rxe_create_qp_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
return ERR_PTR ( - EINVAL ) ;
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
err = rxe_qp_chk_init ( rxe , init ) ;
if ( err )
goto err1 ;
qp = rxe_alloc ( & rxe - > qp_pool ) ;
if ( ! qp ) {
err = - ENOMEM ;
goto err1 ;
}
if ( udata ) {
if ( udata - > inlen ) {
err = - EINVAL ;
2016-11-23 20:39:23 +03:00
goto err2 ;
2016-06-16 16:45:23 +03:00
}
qp - > is_user = 1 ;
}
rxe_add_index ( qp ) ;
2018-12-17 18:15:18 +03:00
err = rxe_qp_from_init ( rxe , qp , pd , init , uresp , ibpd , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2016-11-23 20:39:23 +03:00
goto err3 ;
2016-06-16 16:45:23 +03:00
return & qp - > ibqp ;
2016-11-23 20:39:23 +03:00
err3 :
2016-06-16 16:45:23 +03:00
rxe_drop_index ( qp ) ;
2016-11-23 20:39:23 +03:00
err2 :
2016-06-16 16:45:23 +03:00
rxe_drop_ref ( qp ) ;
err1 :
return ERR_PTR ( err ) ;
}
static int rxe_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int mask , struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
err = rxe_qp_chk_attr ( rxe , qp , attr , mask ) ;
if ( err )
goto err1 ;
err = rxe_qp_from_attr ( qp , attr , mask , udata ) ;
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_query_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int mask , struct ib_qp_init_attr * init )
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
rxe_qp_to_init ( qp , init ) ;
rxe_qp_to_attr ( qp , attr , mask ) ;
return 0 ;
}
2019-03-31 19:10:05 +03:00
static int rxe_destroy_qp ( struct ib_qp * ibqp , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
rxe_qp_destroy ( qp ) ;
rxe_drop_index ( qp ) ;
rxe_drop_ref ( qp ) ;
return 0 ;
}
2018-07-18 19:25:14 +03:00
static int validate_send_wr ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-06-16 16:45:23 +03:00
unsigned int mask , unsigned int length )
{
int num_sge = ibwr - > num_sge ;
struct rxe_sq * sq = & qp - > sq ;
if ( unlikely ( num_sge > sq - > max_sge ) )
goto err1 ;
if ( unlikely ( mask & WR_ATOMIC_MASK ) ) {
if ( length < 8 )
goto err1 ;
if ( atomic_wr ( ibwr ) - > remote_addr & 0x7 )
goto err1 ;
}
if ( unlikely ( ( ibwr - > send_flags & IB_SEND_INLINE ) & &
( length > sq - > max_inline ) ) )
goto err1 ;
return 0 ;
err1 :
return - EINVAL ;
}
static void init_send_wr ( struct rxe_qp * qp , struct rxe_send_wr * wr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * ibwr )
2016-06-16 16:45:23 +03:00
{
wr - > wr_id = ibwr - > wr_id ;
wr - > num_sge = ibwr - > num_sge ;
wr - > opcode = ibwr - > opcode ;
wr - > send_flags = ibwr - > send_flags ;
if ( qp_type ( qp ) = = IB_QPT_UD | |
qp_type ( qp ) = = IB_QPT_SMI | |
qp_type ( qp ) = = IB_QPT_GSI ) {
wr - > wr . ud . remote_qpn = ud_wr ( ibwr ) - > remote_qpn ;
wr - > wr . ud . remote_qkey = ud_wr ( ibwr ) - > remote_qkey ;
if ( qp_type ( qp ) = = IB_QPT_GSI )
wr - > wr . ud . pkey_index = ud_wr ( ibwr ) - > pkey_index ;
if ( wr - > opcode = = IB_WR_SEND_WITH_IMM )
wr - > ex . imm_data = ibwr - > ex . imm_data ;
} else {
switch ( wr - > opcode ) {
case IB_WR_RDMA_WRITE_WITH_IMM :
wr - > ex . imm_data = ibwr - > ex . imm_data ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2016-06-16 16:45:23 +03:00
case IB_WR_RDMA_READ :
case IB_WR_RDMA_WRITE :
wr - > wr . rdma . remote_addr = rdma_wr ( ibwr ) - > remote_addr ;
wr - > wr . rdma . rkey = rdma_wr ( ibwr ) - > rkey ;
break ;
case IB_WR_SEND_WITH_IMM :
wr - > ex . imm_data = ibwr - > ex . imm_data ;
break ;
case IB_WR_SEND_WITH_INV :
wr - > ex . invalidate_rkey = ibwr - > ex . invalidate_rkey ;
break ;
case IB_WR_ATOMIC_CMP_AND_SWP :
case IB_WR_ATOMIC_FETCH_AND_ADD :
wr - > wr . atomic . remote_addr =
atomic_wr ( ibwr ) - > remote_addr ;
wr - > wr . atomic . compare_add =
atomic_wr ( ibwr ) - > compare_add ;
wr - > wr . atomic . swap = atomic_wr ( ibwr ) - > swap ;
wr - > wr . atomic . rkey = atomic_wr ( ibwr ) - > rkey ;
break ;
case IB_WR_LOCAL_INV :
wr - > ex . invalidate_rkey = ibwr - > ex . invalidate_rkey ;
break ;
case IB_WR_REG_MR :
wr - > wr . reg . mr = reg_wr ( ibwr ) - > mr ;
wr - > wr . reg . key = reg_wr ( ibwr ) - > key ;
wr - > wr . reg . access = reg_wr ( ibwr ) - > access ;
break ;
default :
break ;
}
}
}
2018-07-18 19:25:14 +03:00
static int init_send_wqe ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-06-16 16:45:23 +03:00
unsigned int mask , unsigned int length ,
struct rxe_send_wqe * wqe )
{
int num_sge = ibwr - > num_sge ;
struct ib_sge * sge ;
int i ;
u8 * p ;
init_send_wr ( qp , & wqe - > wr , ibwr ) ;
if ( qp_type ( qp ) = = IB_QPT_UD | |
qp_type ( qp ) = = IB_QPT_SMI | |
qp_type ( qp ) = = IB_QPT_GSI )
memcpy ( & wqe - > av , & to_rah ( ud_wr ( ibwr ) - > ah ) - > av , sizeof ( wqe - > av ) ) ;
if ( unlikely ( ibwr - > send_flags & IB_SEND_INLINE ) ) {
p = wqe - > dma . inline_data ;
sge = ibwr - > sg_list ;
for ( i = 0 ; i < num_sge ; i + + , sge + + ) {
2017-06-05 15:23:40 +03:00
memcpy ( p , ( void * ) ( uintptr_t ) sge - > addr ,
sge - > length ) ;
2016-06-16 16:45:23 +03:00
p + = sge - > length ;
}
} else if ( mask & WR_REG_MASK ) {
wqe - > mask = mask ;
wqe - > state = wqe_state_posted ;
return 0 ;
} else
memcpy ( wqe - > dma . sge , ibwr - > sg_list ,
num_sge * sizeof ( struct ib_sge ) ) ;
2018-03-02 01:00:29 +03:00
wqe - > iova = mask & WR_ATOMIC_MASK ? atomic_wr ( ibwr ) - > remote_addr :
mask & WR_READ_OR_WRITE_MASK ? rdma_wr ( ibwr ) - > remote_addr : 0 ;
2016-06-16 16:45:23 +03:00
wqe - > mask = mask ;
wqe - > dma . length = length ;
wqe - > dma . resid = length ;
wqe - > dma . num_sge = num_sge ;
wqe - > dma . cur_sge = 0 ;
wqe - > dma . sge_offset = 0 ;
wqe - > state = wqe_state_posted ;
wqe - > ssn = atomic_add_return ( 1 , & qp - > ssn ) ;
return 0 ;
}
2018-07-18 19:25:14 +03:00
static int post_one_send ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-09-28 23:26:26 +03:00
unsigned int mask , u32 length )
2016-06-16 16:45:23 +03:00
{
int err ;
struct rxe_sq * sq = & qp - > sq ;
struct rxe_send_wqe * send_wqe ;
unsigned long flags ;
err = validate_send_wr ( qp , ibwr , mask , length ) ;
if ( err )
return err ;
spin_lock_irqsave ( & qp - > sq . sq_lock , flags ) ;
if ( unlikely ( queue_full ( sq - > queue ) ) ) {
err = - ENOMEM ;
goto err1 ;
}
send_wqe = producer_addr ( sq - > queue ) ;
err = init_send_wqe ( qp , ibwr , mask , length , send_wqe ) ;
if ( unlikely ( err ) )
goto err1 ;
/*
* make sure all changes to the work queue are
* written before we update the producer pointer
*/
smp_wmb ( ) ;
advance_producer ( sq - > queue ) ;
spin_unlock_irqrestore ( & qp - > sq . sq_lock , flags ) ;
return 0 ;
err1 :
spin_unlock_irqrestore ( & qp - > sq . sq_lock , flags ) ;
return err ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_send_kernel ( struct rxe_qp * qp , const struct ib_send_wr * wr ,
const struct ib_send_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
unsigned int mask ;
unsigned int length = 0 ;
int i ;
2020-07-16 22:03:41 +03:00
struct ib_send_wr * next ;
2016-06-16 16:45:23 +03:00
while ( wr ) {
mask = wr_opcode_mask ( wr - > opcode , qp ) ;
if ( unlikely ( ! mask ) ) {
err = - EINVAL ;
* bad_wr = wr ;
break ;
}
if ( unlikely ( ( wr - > send_flags & IB_SEND_INLINE ) & &
! ( mask & WR_INLINE_MASK ) ) ) {
err = - EINVAL ;
* bad_wr = wr ;
break ;
}
2020-07-16 22:03:41 +03:00
next = wr - > next ;
2016-06-16 16:45:23 +03:00
length = 0 ;
for ( i = 0 ; i < wr - > num_sge ; i + + )
length + = wr - > sg_list [ i ] . length ;
err = post_one_send ( qp , wr , mask , length ) ;
if ( err ) {
* bad_wr = wr ;
break ;
}
2020-07-16 22:03:41 +03:00
wr = next ;
2016-06-16 16:45:23 +03:00
}
2018-05-08 12:02:02 +03:00
rxe_run_task ( & qp - > req . task , 1 ) ;
2018-01-09 22:23:40 +03:00
if ( unlikely ( qp - > req . state = = QP_STATE_ERROR ) )
rxe_run_task ( & qp - > comp . task , 1 ) ;
2016-06-16 16:45:23 +03:00
return err ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_send ( struct ib_qp * ibqp , const struct ib_send_wr * wr ,
const struct ib_send_wr * * bad_wr )
2016-09-28 23:24:12 +03:00
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
if ( unlikely ( ! qp - > valid ) ) {
* bad_wr = wr ;
return - EINVAL ;
}
if ( unlikely ( qp - > req . state < QP_STATE_READY ) ) {
* bad_wr = wr ;
return - EINVAL ;
}
if ( qp - > is_user ) {
/* Utilize process context to do protocol processing */
rxe_run_task ( & qp - > req . task , 0 ) ;
return 0 ;
} else
return rxe_post_send_kernel ( qp , wr , bad_wr ) ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_recv ( struct ib_qp * ibqp , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
struct rxe_rq * rq = & qp - > rq ;
unsigned long flags ;
if ( unlikely ( ( qp_state ( qp ) < IB_QPS_INIT ) | | ! qp - > valid ) ) {
* bad_wr = wr ;
err = - EINVAL ;
goto err1 ;
}
if ( unlikely ( qp - > srq ) ) {
* bad_wr = wr ;
err = - EINVAL ;
goto err1 ;
}
spin_lock_irqsave ( & rq - > producer_lock , flags ) ;
while ( wr ) {
err = post_one_recv ( rq , wr ) ;
if ( unlikely ( err ) ) {
* bad_wr = wr ;
break ;
}
wr = wr - > next ;
}
spin_unlock_irqrestore ( & rq - > producer_lock , flags ) ;
2017-06-27 12:19:38 +03:00
if ( qp - > resp . state = = QP_STATE_ERROR )
rxe_run_task ( & qp - > resp . task , 1 ) ;
2016-06-16 16:45:23 +03:00
err1 :
return err ;
}
2019-05-28 14:37:29 +03:00
static int rxe_create_cq ( struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
int err ;
2019-05-28 14:37:29 +03:00
struct ib_device * dev = ibcq - > device ;
2016-06-16 16:45:23 +03:00
struct rxe_dev * rxe = to_rdev ( dev ) ;
2019-05-28 14:37:29 +03:00
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2018-03-14 01:33:18 +03:00
struct rxe_create_cq_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
2019-05-28 14:37:29 +03:00
return - EINVAL ;
2018-03-14 01:33:18 +03:00
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
if ( attr - > flags )
2019-05-28 14:37:29 +03:00
return - EINVAL ;
2016-06-16 16:45:23 +03:00
2018-03-14 01:33:17 +03:00
err = rxe_cq_chk_attr ( rxe , NULL , attr - > cqe , attr - > comp_vector ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-05-28 14:37:29 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2019-03-31 19:10:07 +03:00
err = rxe_cq_from_init ( rxe , cq , attr - > cqe , attr - > comp_vector , udata ,
uresp ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-05-28 14:37:29 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2019-05-28 14:37:29 +03:00
return rxe_add_to_pool ( & rxe - > cq_pool , & cq - > pelem ) ;
2016-06-16 16:45:23 +03:00
}
2019-05-28 14:37:28 +03:00
static void rxe_destroy_cq ( struct ib_cq * ibcq , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2017-08-28 23:11:50 +03:00
rxe_cq_disable ( cq ) ;
2016-06-16 16:45:23 +03:00
rxe_drop_ref ( cq ) ;
}
static int rxe_resize_cq ( struct ib_cq * ibcq , int cqe , struct ib_udata * udata )
{
int err ;
struct rxe_cq * cq = to_rcq ( ibcq ) ;
struct rxe_dev * rxe = to_rdev ( ibcq - > device ) ;
2018-03-14 01:33:18 +03:00
struct rxe_resize_cq_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
return - EINVAL ;
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
2018-03-14 01:33:17 +03:00
err = rxe_cq_chk_attr ( rxe , cq , cqe , 0 ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
2019-03-31 19:10:07 +03:00
err = rxe_cq_resize_queue ( cq , cqe , uresp , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_poll_cq ( struct ib_cq * ibcq , int num_entries , struct ib_wc * wc )
{
int i ;
struct rxe_cq * cq = to_rcq ( ibcq ) ;
struct rxe_cqe * cqe ;
unsigned long flags ;
spin_lock_irqsave ( & cq - > cq_lock , flags ) ;
for ( i = 0 ; i < num_entries ; i + + ) {
cqe = queue_head ( cq - > queue ) ;
if ( ! cqe )
break ;
memcpy ( wc + + , & cqe - > ibwc , sizeof ( * wc ) ) ;
advance_consumer ( cq - > queue ) ;
}
spin_unlock_irqrestore ( & cq - > cq_lock , flags ) ;
return i ;
}
static int rxe_peek_cq ( struct ib_cq * ibcq , int wc_cnt )
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
int count = queue_count ( cq - > queue ) ;
return ( count > wc_cnt ) ? wc_cnt : count ;
}
static int rxe_req_notify_cq ( struct ib_cq * ibcq , enum ib_cq_notify_flags flags )
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2016-11-23 20:39:22 +03:00
unsigned long irq_flags ;
int ret = 0 ;
2016-06-16 16:45:23 +03:00
2016-11-23 20:39:22 +03:00
spin_lock_irqsave ( & cq - > cq_lock , irq_flags ) ;
2016-06-16 16:45:23 +03:00
if ( cq - > notify ! = IB_CQ_NEXT_COMP )
cq - > notify = flags & IB_CQ_SOLICITED_MASK ;
2016-11-23 20:39:22 +03:00
if ( ( flags & IB_CQ_REPORT_MISSED_EVENTS ) & & ! queue_empty ( cq - > queue ) )
ret = 1 ;
spin_unlock_irqrestore ( & cq - > cq_lock , irq_flags ) ;
return ret ;
2016-06-16 16:45:23 +03:00
}
static struct ib_mr * rxe_get_dma_mr ( struct ib_pd * ibpd , int access )
{
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
struct rxe_mem * mr ;
mr = rxe_alloc ( & rxe - > mr_pool ) ;
2020-07-05 13:43:12 +03:00
if ( ! mr )
return ERR_PTR ( - ENOMEM ) ;
2016-06-16 16:45:23 +03:00
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2020-07-05 13:43:12 +03:00
rxe_mem_init_dma ( pd , access , mr ) ;
2016-06-16 16:45:23 +03:00
return & mr - > ibmr ;
}
static struct ib_mr * rxe_reg_user_mr ( struct ib_pd * ibpd ,
u64 start ,
u64 length ,
u64 iova ,
int access , struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
struct rxe_mem * mr ;
mr = rxe_alloc ( & rxe - > mr_pool ) ;
if ( ! mr ) {
err = - ENOMEM ;
goto err2 ;
}
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2018-04-23 10:57:58 +03:00
err = rxe_mem_init_user ( pd , start , length , iova ,
2016-06-16 16:45:23 +03:00
access , udata , mr ) ;
if ( err )
goto err3 ;
return & mr - > ibmr ;
err3 :
rxe_drop_ref ( pd ) ;
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
err2 :
return ERR_PTR ( err ) ;
}
2019-03-31 19:10:05 +03:00
static int rxe_dereg_mr ( struct ib_mr * ibmr , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_mem * mr = to_rmr ( ibmr ) ;
mr - > state = RXE_MEM_STATE_ZOMBIE ;
rxe_drop_ref ( mr - > pd ) ;
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
return 0 ;
}
2019-03-31 19:10:05 +03:00
static struct ib_mr * rxe_alloc_mr ( struct ib_pd * ibpd , enum ib_mr_type mr_type ,
2020-07-06 15:03:43 +03:00
u32 max_num_sg )
2016-06-16 16:45:23 +03:00
{
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
struct rxe_mem * mr ;
int err ;
if ( mr_type ! = IB_MR_TYPE_MEM_REG )
return ERR_PTR ( - EINVAL ) ;
mr = rxe_alloc ( & rxe - > mr_pool ) ;
if ( ! mr ) {
err = - ENOMEM ;
goto err1 ;
}
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2018-04-23 10:57:58 +03:00
err = rxe_mem_init_fast ( pd , max_num_sg , mr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err2 ;
return & mr - > ibmr ;
err2 :
rxe_drop_ref ( pd ) ;
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
err1 :
return ERR_PTR ( err ) ;
}
static int rxe_set_page ( struct ib_mr * ibmr , u64 addr )
{
struct rxe_mem * mr = to_rmr ( ibmr ) ;
struct rxe_map * map ;
struct rxe_phys_buf * buf ;
if ( unlikely ( mr - > nbuf = = mr - > num_buf ) )
return - ENOMEM ;
map = mr - > map [ mr - > nbuf / RXE_BUF_PER_MAP ] ;
buf = & map - > buf [ mr - > nbuf % RXE_BUF_PER_MAP ] ;
buf - > addr = addr ;
buf - > size = ibmr - > page_size ;
mr - > nbuf + + ;
return 0 ;
}
2016-09-28 23:26:26 +03:00
static int rxe_map_mr_sg ( struct ib_mr * ibmr , struct scatterlist * sg ,
int sg_nents , unsigned int * sg_offset )
2016-06-16 16:45:23 +03:00
{
struct rxe_mem * mr = to_rmr ( ibmr ) ;
int n ;
mr - > nbuf = 0 ;
n = ib_sg_to_pages ( ibmr , sg , sg_nents , sg_offset , rxe_set_page ) ;
mr - > va = ibmr - > iova ;
mr - > iova = ibmr - > iova ;
mr - > length = ibmr - > length ;
mr - > page_shift = ilog2 ( ibmr - > page_size ) ;
mr - > page_mask = ibmr - > page_size - 1 ;
mr - > offset = mr - > iova & mr - > page_mask ;
return n ;
}
static int rxe_attach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
struct rxe_mc_grp * grp ;
/* takes a ref on grp if successful */
err = rxe_mcast_get_grp ( rxe , mgid , & grp ) ;
if ( err )
return err ;
err = rxe_mcast_add_grp_elem ( rxe , qp , grp ) ;
rxe_drop_ref ( grp ) ;
return err ;
}
static int rxe_detach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid )
{
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
return rxe_mcast_drop_grp_elem ( rxe , qp , mgid ) ;
}
2017-06-15 11:29:05 +03:00
static ssize_t parent_show ( struct device * device ,
struct device_attribute * attr , char * buf )
2016-06-16 16:45:23 +03:00
{
2018-12-18 15:15:56 +03:00
struct rxe_dev * rxe =
rdma_device_to_drv_device ( device , struct rxe_dev , ib_dev ) ;
2016-06-16 16:45:23 +03:00
2017-01-10 22:15:53 +03:00
return snprintf ( buf , 16 , " %s \n " , rxe_parent_name ( rxe , 1 ) ) ;
2016-06-16 16:45:23 +03:00
}
2017-06-15 11:29:05 +03:00
static DEVICE_ATTR_RO ( parent ) ;
2016-06-16 16:45:23 +03:00
2018-10-11 22:31:54 +03:00
static struct attribute * rxe_dev_attributes [ ] = {
& dev_attr_parent . attr ,
NULL
} ;
static const struct attribute_group rxe_attr_group = {
. attrs = rxe_dev_attributes ,
2016-06-16 16:45:23 +03:00
} ;
2019-02-13 07:12:56 +03:00
static int rxe_enable_driver ( struct ib_device * ib_dev )
{
struct rxe_dev * rxe = container_of ( ib_dev , struct rxe_dev , ib_dev ) ;
rxe_set_port_state ( rxe ) ;
dev_info ( & rxe - > ib_dev . dev , " added %s \n " , netdev_name ( rxe - > ndev ) ) ;
return 0 ;
}
2018-12-10 22:09:46 +03:00
static const struct ib_device_ops rxe_dev_ops = {
2019-06-05 20:39:26 +03:00
. owner = THIS_MODULE ,
2019-06-05 20:39:24 +03:00
. driver_id = RDMA_DRIVER_RXE ,
2019-06-05 20:39:25 +03:00
. uverbs_abi_ver = RXE_UVERBS_ABI_VERSION ,
2019-06-05 20:39:24 +03:00
2018-12-10 22:09:46 +03:00
. alloc_hw_stats = rxe_ib_alloc_hw_stats ,
. alloc_mr = rxe_alloc_mr ,
. alloc_pd = rxe_alloc_pd ,
. alloc_ucontext = rxe_alloc_ucontext ,
. attach_mcast = rxe_attach_mcast ,
. create_ah = rxe_create_ah ,
. create_cq = rxe_create_cq ,
. create_qp = rxe_create_qp ,
. create_srq = rxe_create_srq ,
2019-01-23 02:27:24 +03:00
. dealloc_driver = rxe_dealloc ,
2018-12-10 22:09:46 +03:00
. dealloc_pd = rxe_dealloc_pd ,
. dealloc_ucontext = rxe_dealloc_ucontext ,
. dereg_mr = rxe_dereg_mr ,
. destroy_ah = rxe_destroy_ah ,
. destroy_cq = rxe_destroy_cq ,
. destroy_qp = rxe_destroy_qp ,
. destroy_srq = rxe_destroy_srq ,
. detach_mcast = rxe_detach_mcast ,
2019-02-13 07:12:56 +03:00
. enable_driver = rxe_enable_driver ,
2018-12-10 22:09:46 +03:00
. get_dma_mr = rxe_get_dma_mr ,
. get_hw_stats = rxe_ib_get_hw_stats ,
. get_link_layer = rxe_get_link_layer ,
. get_port_immutable = rxe_port_immutable ,
. map_mr_sg = rxe_map_mr_sg ,
. mmap = rxe_mmap ,
. modify_ah = rxe_modify_ah ,
. modify_device = rxe_modify_device ,
. modify_port = rxe_modify_port ,
. modify_qp = rxe_modify_qp ,
. modify_srq = rxe_modify_srq ,
. peek_cq = rxe_peek_cq ,
. poll_cq = rxe_poll_cq ,
. post_recv = rxe_post_recv ,
. post_send = rxe_post_send ,
. post_srq_recv = rxe_post_srq_recv ,
. query_ah = rxe_query_ah ,
. query_device = rxe_query_device ,
. query_pkey = rxe_query_pkey ,
. query_port = rxe_query_port ,
. query_qp = rxe_query_qp ,
. query_srq = rxe_query_srq ,
. reg_user_mr = rxe_reg_user_mr ,
. req_notify_cq = rxe_req_notify_cq ,
. resize_cq = rxe_resize_cq ,
2019-04-03 16:42:42 +03:00
INIT_RDMA_OBJ_SIZE ( ib_ah , rxe_ah , ibah ) ,
2019-05-28 14:37:29 +03:00
INIT_RDMA_OBJ_SIZE ( ib_cq , rxe_cq , ibcq ) ,
2019-02-03 15:55:51 +03:00
INIT_RDMA_OBJ_SIZE ( ib_pd , rxe_pd , ibpd ) ,
2019-04-03 16:42:43 +03:00
INIT_RDMA_OBJ_SIZE ( ib_srq , rxe_srq , ibsrq ) ,
2019-02-12 21:39:16 +03:00
INIT_RDMA_OBJ_SIZE ( ib_ucontext , rxe_ucontext , ibuc ) ,
2018-12-10 22:09:46 +03:00
} ;
2019-02-15 22:03:57 +03:00
int rxe_register_device ( struct rxe_dev * rxe , const char * ibdev_name )
2016-06-16 16:45:23 +03:00
{
int err ;
struct ib_device * dev = & rxe - > ib_dev ;
2017-10-31 13:16:46 +03:00
struct crypto_shash * tfm ;
2016-06-16 16:45:23 +03:00
strlcpy ( dev - > node_desc , " rxe " , sizeof ( dev - > node_desc ) ) ;
dev - > node_type = RDMA_NODE_IB_CA ;
dev - > phys_port_cnt = 1 ;
2017-05-04 16:23:07 +03:00
dev - > num_comp_vectors = num_possible_cpus ( ) ;
2017-01-21 00:04:29 +03:00
dev - > dev . parent = rxe_dma_device ( rxe ) ;
2016-06-16 16:45:23 +03:00
dev - > local_dma_lkey = 0 ;
2017-03-14 17:01:57 +03:00
addrconf_addr_eui48 ( ( unsigned char * ) & dev - > node_guid ,
rxe - > ndev - > dev_addr ) ;
2017-01-21 00:04:37 +03:00
dev - > dev . dma_ops = & dma_virt_ops ;
2019-10-26 01:58:28 +03:00
dev - > dev . dma_parms = & rxe - > dma_parms ;
rxe - > dma_parms = ( struct device_dma_parameters )
{ . max_segment_size = SZ_2G } ;
2017-06-22 17:10:00 +03:00
dma_coerce_mask_and_coherent ( & dev - > dev ,
IB/rxe: Fix for oops in rxe_register_device on ppc64le arch
On ppc64le arch rxe_add command causes oops in kernel log:
[ 92.495140] Oops: Kernel access of bad area, sig: 11 [#1]
[ 92.499710] SMP NR_CPUS=2048 NUMA pSeries
[ 92.499792] Modules linked in: ipt_MASQUERADE(E) nf_nat_masquerade_ipv4(E) nf_conntrack_netlink(E) nfnetlink(E) xfrm_user(E) iptable
_nat(E) nf_conntrack_ipv4(E) nf_defrag_ipv4(E) nf_nat_ipv4(E) xt_addrtype(E) iptable_filter(E) ip_tables(E) xt_conntrack(E) x_tables(E)
nf_nat(E) nf_conntrack(E) br_netfilter(E) bridge(E) stp(E) llc(E) overlay(E) af_packet(E) rpcrdma(E) ib_isert(E) iscsi_target_mod(E) i
b_iser(E) libiscsi(E) ib_srpt(E) target_core_mod(E) ib_srp(E) ib_ipoib(E) rdma_ucm(E) ib_ucm(E) ib_uverbs(E) ib_umad(E) bochs_drm(E) tt
m(E) drm_kms_helper(E) syscopyarea(E) sysfillrect(E) sysimgblt(E) fb_sys_fops(E) drm(E) agpgart(E) virtio_rng(E) virtio_console(E) rtc_
generic(E) dm_ec(OEN) ttln_rdma(OEN) rdma_cm(E) configfs(E) iw_cm(E) ib_cm(E) rdma_rxe(E) ip6_udp_tunnel(E) udp_tunnel(E) ib_core(E) ql
a2xxx(E)
[ 92.499832] scsi_transport_fc(E) nvme_fc(E) nvme_fabrics(E) nvme_core(E) ipmi_watchdog(E) ipmi_ssif(E) ipmi_poweroff(E) ipmi_powernv(EX) ipmi_devintf(E) ipmi_msghandler(E) dummy(E) ext4(E) crc16(E) jbd2(E) mbcache(E) dm_service_time(E) scsi_transport_iscsi(E) sd_mod(E) sr_mod(E) cdrom(E) hid_generic(E) usbhid(E) virtio_blk(E) virtio_scsi(E) virtio_net(E) ibmvscsi(EX) scsi_transport_srp(E) xhci_pci(E) xhci_hcd(E) usbcore(E) usb_common(E) virtio_pci(E) virtio_ring(E) virtio(E) sunrpc(E) dm_mirror(E) dm_region_hash(E) dm_log(E) sg(E) dm_multipath(E) dm_mod(E) scsi_dh_rdac(E) scsi_dh_emc(E) scsi_dh_alua(E) scsi_mod(E) autofs4(E)
[ 92.499834] Supported: No, Unsupported modules are loaded
[ 92.499839] CPU: 3 PID: 5576 Comm: sh Tainted: G OE NX 4.4.120-ttln.17-default #1
[ 92.499841] task: c0000000afe8a490 ti: c0000000beba8000 task.ti: c0000000beba8000
[ 92.499842] NIP: c00000000008ba3c LR: c000000000027644 CTR: c00000000008ba10
[ 92.499844] REGS: c0000000bebab750 TRAP: 0300 Tainted: G OE NX (4.4.120-ttln.17-default)
[ 92.499850] MSR: 8000000000009033 <SF,EE,ME,IR,DR,RI,LE> CR: 28424428 XER: 20000000
[ 92.499871] CFAR: 0000000000002424 DAR: 0000000000000208 DSISR: 40000000 SOFTE: 1
GPR00: c000000000027644 c0000000bebab9d0 c000000000f09700 0000000000000000
GPR04: d0000000043d7192 0000000000000002 000000000000001a fffffffffffffffe
GPR08: 000000000000009c c00000000008ba10 d0000000043e5848 d0000000043d3828
GPR12: c00000000008ba10 c000000007a02400 0000000010062e38 0000010020388860
GPR16: 0000000000000000 0000000000000000 00000100203885f0 00000000100f6c98
GPR20: c0000000b3f1fcc0 c0000000b3f1fc48 c0000000b3f1fbd0 c0000000b3f1fb58
GPR24: c0000000b3f1fae0 c0000000b3f1fa68 00000000000005dc c0000000b3f1f9f0
GPR28: d0000000043e5848 c0000000b3f1f900 c0000000b3f1f320 c0000000b3f1f000
[ 92.499881] NIP [c00000000008ba3c] dma_get_required_mask_pSeriesLP+0x2c/0x1a0
[ 92.499885] LR [c000000000027644] dma_get_required_mask+0x44/0xac
[ 92.499886] Call Trace:
[ 92.499891] [c0000000bebab9d0] [c0000000bebaba30] 0xc0000000bebaba30 (unreliable)
[ 92.499894] [c0000000bebaba10] [c000000000027644] dma_get_required_mask+0x44/0xac
[ 92.499904] [c0000000bebaba30] [d0000000043cb4b4] rxe_register_device+0xc4/0x430 [rdma_rxe]
[ 92.499910] [c0000000bebabab0] [d0000000043c06c8] rxe_add+0x448/0x4e0 [rdma_rxe]
[ 92.499915] [c0000000bebabb30] [d0000000043d28dc] rxe_net_add+0x4c/0xf0 [rdma_rxe]
[ 92.499921] [c0000000bebabb60] [d0000000043d305c] rxe_param_set_add+0x6c/0x1ac [rdma_rxe]
[ 92.499924] [c0000000bebabbf0] [c0000000000e78c0] param_attr_store+0xa0/0x180
[ 92.499927] [c0000000bebabc70] [c0000000000e6448] module_attr_store+0x48/0x70
[ 92.499932] [c0000000bebabc90] [c000000000391f60] sysfs_kf_write+0x70/0xb0
[ 92.499935] [c0000000bebabcb0] [c000000000390f1c] kernfs_fop_write+0x18c/0x1e0
[ 92.499939] [c0000000bebabd00] [c0000000002e22ac] __vfs_write+0x4c/0x1d0
[ 92.499942] [c0000000bebabd90] [c0000000002e2f94] vfs_write+0xc4/0x200
[ 92.499945] [c0000000bebabde0] [c0000000002e488c] SyS_write+0x6c/0x110
[ 92.499948] [c0000000bebabe30] [c000000000009384] system_call+0x38/0xe4
[ 92.499949] Instruction dump:
[ 92.499954] 4e800020 3c4c00e8 3842dcf0 7c0802a6 f8010010 60000000 7c0802a6 fba1ffe8
[ 92.499958] fbc1fff0 fbe1fff8 f8010010 f821ffc1 <e9230208> 7c7e1b78 2fa90000 419e0078
[ 92.499962] ---[ end trace bed077e15eb420cf ]---
It fails in dma_get_required_mask, that has ppc-specific implementation,
and fail if provided device argument is NULL
Signed-off-by: Mikhail Malygin <mikhail@malygin.me>
Reviewed-by: Yonatan Cohen <yonatanc@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
2018-04-02 12:26:59 +03:00
dma_get_required_mask ( & dev - > dev ) ) ;
2016-06-16 16:45:23 +03:00
dev - > uverbs_cmd_mask = BIT_ULL ( IB_USER_VERBS_CMD_GET_CONTEXT )
| BIT_ULL ( IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL )
| BIT_ULL ( IB_USER_VERBS_CMD_QUERY_DEVICE )
| BIT_ULL ( IB_USER_VERBS_CMD_QUERY_PORT )
| BIT_ULL ( IB_USER_VERBS_CMD_ALLOC_PD )
| BIT_ULL ( IB_USER_VERBS_CMD_DEALLOC_PD )
| BIT_ULL ( IB_USER_VERBS_CMD_CREATE_SRQ )
| BIT_ULL ( IB_USER_VERBS_CMD_MODIFY_SRQ )
| BIT_ULL ( IB_USER_VERBS_CMD_QUERY_SRQ )
| BIT_ULL ( IB_USER_VERBS_CMD_DESTROY_SRQ )
| BIT_ULL ( IB_USER_VERBS_CMD_POST_SRQ_RECV )
| BIT_ULL ( IB_USER_VERBS_CMD_CREATE_QP )
| BIT_ULL ( IB_USER_VERBS_CMD_MODIFY_QP )
| BIT_ULL ( IB_USER_VERBS_CMD_QUERY_QP )
| BIT_ULL ( IB_USER_VERBS_CMD_DESTROY_QP )
| BIT_ULL ( IB_USER_VERBS_CMD_POST_SEND )
| BIT_ULL ( IB_USER_VERBS_CMD_POST_RECV )
| BIT_ULL ( IB_USER_VERBS_CMD_CREATE_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_RESIZE_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_DESTROY_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_POLL_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_PEEK_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_REQ_NOTIFY_CQ )
| BIT_ULL ( IB_USER_VERBS_CMD_REG_MR )
| BIT_ULL ( IB_USER_VERBS_CMD_DEREG_MR )
| BIT_ULL ( IB_USER_VERBS_CMD_CREATE_AH )
| BIT_ULL ( IB_USER_VERBS_CMD_MODIFY_AH )
| BIT_ULL ( IB_USER_VERBS_CMD_QUERY_AH )
| BIT_ULL ( IB_USER_VERBS_CMD_DESTROY_AH )
| BIT_ULL ( IB_USER_VERBS_CMD_ATTACH_MCAST )
| BIT_ULL ( IB_USER_VERBS_CMD_DETACH_MCAST )
;
2018-12-10 22:09:46 +03:00
ib_set_device_ops ( dev , & rxe_dev_ops ) ;
2019-02-13 07:12:52 +03:00
err = ib_device_set_netdev ( & rxe - > ib_dev , rxe - > ndev , 1 ) ;
if ( err )
return err ;
2016-06-16 16:45:23 +03:00
2017-10-31 13:16:46 +03:00
tfm = crypto_alloc_shash ( " crc32 " , 0 , 0 ) ;
if ( IS_ERR ( tfm ) ) {
2017-04-24 12:26:42 +03:00
pr_err ( " failed to allocate crc algorithm err:%ld \n " ,
2017-10-31 13:16:46 +03:00
PTR_ERR ( tfm ) ) ;
return PTR_ERR ( tfm ) ;
2017-04-20 20:55:55 +03:00
}
2017-10-31 13:16:46 +03:00
rxe - > tfm = tfm ;
2017-04-20 20:55:55 +03:00
2018-10-11 22:31:54 +03:00
rdma_set_device_sysfs_group ( dev , & rxe_attr_group ) ;
2019-02-15 22:03:57 +03:00
err = ib_register_device ( dev , ibdev_name ) ;
2019-01-23 02:27:24 +03:00
if ( err )
2017-06-15 11:29:06 +03:00
pr_warn ( " %s failed with error %d \n " , __func__ , err ) ;
2017-04-20 20:55:55 +03:00
2019-02-13 07:12:56 +03:00
/*
* Note that rxe may be invalid at this point if another thread
* unregistered it .
*/
2016-06-16 16:45:23 +03:00
return err ;
}