2020-08-27 17:54:40 +03:00
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2016-06-16 16:45:23 +03:00
/*
* Copyright ( c ) 2016 Mellanox Technologies Ltd . All rights reserved .
* Copyright ( c ) 2015 System Fabric Works , Inc . All rights reserved .
*/
2017-01-21 00:04:37 +03:00
# include <linux/dma-mapping.h>
2017-03-14 17:01:57 +03:00
# include <net/addrconf.h>
2019-02-07 19:44:49 +03:00
# include <rdma/uverbs_ioctl.h>
2016-06-16 16:45:23 +03:00
# include "rxe.h"
# include "rxe_loc.h"
# include "rxe_queue.h"
2017-03-10 19:23:56 +03:00
# include "rxe_hw_counters.h"
2016-06-16 16:45:23 +03:00
static int rxe_query_device ( struct ib_device * dev ,
struct ib_device_attr * attr ,
struct ib_udata * uhw )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
if ( uhw - > inlen | | uhw - > outlen )
return - EINVAL ;
* attr = rxe - > attr ;
return 0 ;
}
static int rxe_query_port ( struct ib_device * dev ,
2021-03-01 10:04:20 +03:00
u32 port_num , struct ib_port_attr * attr )
2016-06-16 16:45:23 +03:00
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
struct rxe_port * port ;
2018-12-09 14:06:10 +03:00
int rc ;
2016-06-16 16:45:23 +03:00
port = & rxe - > port ;
2017-01-24 14:02:39 +03:00
/* *attr being zeroed by the caller, avoid zeroing it here */
2016-06-16 16:45:23 +03:00
* attr = port - > attr ;
mutex_lock ( & rxe - > usdev_lock ) ;
2017-06-14 23:13:34 +03:00
rc = ib_get_eth_speed ( dev , port_num , & attr - > active_speed ,
& attr - > active_width ) ;
2018-11-01 16:18:45 +03:00
if ( attr - > state = = IB_PORT_ACTIVE )
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_LINK_UP ;
2018-11-01 16:18:45 +03:00
else if ( dev_get_flags ( rxe - > ndev ) & IFF_UP )
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_POLLING ;
2018-11-01 16:18:45 +03:00
else
2019-08-07 13:31:35 +03:00
attr - > phys_state = IB_PORT_PHYS_STATE_DISABLED ;
2018-11-01 16:18:45 +03:00
2016-06-16 16:45:23 +03:00
mutex_unlock ( & rxe - > usdev_lock ) ;
2017-06-14 23:13:34 +03:00
return rc ;
2016-06-16 16:45:23 +03:00
}
static int rxe_query_pkey ( struct ib_device * device ,
2021-03-01 10:04:20 +03:00
u32 port_num , u16 index , u16 * pkey )
2016-06-16 16:45:23 +03:00
{
2020-07-21 13:16:18 +03:00
if ( index > 0 )
return - EINVAL ;
2016-06-16 16:45:23 +03:00
2020-07-21 13:16:18 +03:00
* pkey = IB_DEFAULT_PKEY_FULL ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
static int rxe_modify_device ( struct ib_device * dev ,
int mask , struct ib_device_modify * attr )
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
2019-09-23 13:41:58 +03:00
if ( mask & ~ ( IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC ) )
return - EOPNOTSUPP ;
2016-06-16 16:45:23 +03:00
if ( mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID )
rxe - > attr . sys_image_guid = cpu_to_be64 ( attr - > sys_image_guid ) ;
if ( mask & IB_DEVICE_MODIFY_NODE_DESC ) {
memcpy ( rxe - > ib_dev . node_desc ,
attr - > node_desc , sizeof ( rxe - > ib_dev . node_desc ) ) ;
}
return 0 ;
}
static int rxe_modify_port ( struct ib_device * dev ,
2021-03-01 10:04:20 +03:00
u32 port_num , int mask , struct ib_port_modify * attr )
2016-06-16 16:45:23 +03:00
{
struct rxe_dev * rxe = to_rdev ( dev ) ;
struct rxe_port * port ;
port = & rxe - > port ;
port - > attr . port_cap_flags | = attr - > set_port_cap_mask ;
port - > attr . port_cap_flags & = ~ attr - > clr_port_cap_mask ;
if ( mask & IB_PORT_RESET_QKEY_CNTR )
port - > attr . qkey_viol_cntr = 0 ;
return 0 ;
}
static enum rdma_link_layer rxe_get_link_layer ( struct ib_device * dev ,
2021-03-01 10:04:20 +03:00
u32 port_num )
2016-06-16 16:45:23 +03:00
{
2020-07-05 13:43:13 +03:00
return IB_LINK_LAYER_ETHERNET ;
2016-06-16 16:45:23 +03:00
}
2020-12-17 02:15:48 +03:00
static int rxe_alloc_ucontext ( struct ib_ucontext * ibuc , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
2020-12-17 02:15:48 +03:00
struct rxe_dev * rxe = to_rdev ( ibuc - > device ) ;
struct rxe_ucontext * uc = to_ruc ( ibuc ) ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:48 +03:00
return rxe_add_to_pool ( & rxe - > uc_pool , uc ) ;
2016-06-16 16:45:23 +03:00
}
2019-02-12 21:39:16 +03:00
static void rxe_dealloc_ucontext ( struct ib_ucontext * ibuc )
2016-06-16 16:45:23 +03:00
{
struct rxe_ucontext * uc = to_ruc ( ibuc ) ;
rxe_drop_ref ( uc ) ;
}
2021-03-01 10:04:20 +03:00
static int rxe_port_immutable ( struct ib_device * dev , u32 port_num ,
2016-06-16 16:45:23 +03:00
struct ib_port_immutable * immutable )
{
int err ;
struct ib_port_attr attr ;
2017-01-24 14:02:39 +03:00
immutable - > core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP ;
err = ib_query_port ( dev , port_num , & attr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
return err ;
immutable - > pkey_tbl_len = attr . pkey_tbl_len ;
immutable - > gid_tbl_len = attr . gid_tbl_len ;
immutable - > max_mad_size = IB_MGMT_MAD_SIZE ;
return 0 ;
}
2019-03-31 19:10:07 +03:00
static int rxe_alloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
2019-02-03 15:55:51 +03:00
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:48 +03:00
return rxe_add_to_pool ( & rxe - > pd_pool , pd ) ;
2016-06-16 16:45:23 +03:00
}
2020-09-07 15:09:13 +03:00
static int rxe_dealloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_pd * pd = to_rpd ( ibpd ) ;
rxe_drop_ref ( pd ) ;
2020-09-07 15:09:13 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2020-04-30 22:21:42 +03:00
static int rxe_create_ah ( struct ib_ah * ibah ,
struct rdma_ah_init_attr * init_attr ,
struct ib_udata * udata )
2016-11-23 09:23:24 +03:00
2016-06-16 16:45:23 +03:00
{
int err ;
2019-04-03 16:42:42 +03:00
struct rxe_dev * rxe = to_rdev ( ibah - > device ) ;
struct rxe_ah * ah = to_rah ( ibah ) ;
2016-06-16 16:45:23 +03:00
2020-04-30 22:21:42 +03:00
err = rxe_av_chk_attr ( rxe , init_attr - > ah_attr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-04-03 16:42:42 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:48 +03:00
err = rxe_add_to_pool ( & rxe - > ah_pool , ah ) ;
2019-04-03 16:42:42 +03:00
if ( err )
return err ;
2016-06-16 16:45:23 +03:00
2020-04-30 22:21:42 +03:00
rxe_init_av ( init_attr - > ah_attr , & ah - > av ) ;
2019-04-03 16:42:42 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2017-04-29 21:41:18 +03:00
static int rxe_modify_ah ( struct ib_ah * ibah , struct rdma_ah_attr * attr )
2016-06-16 16:45:23 +03:00
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibah - > device ) ;
struct rxe_ah * ah = to_rah ( ibah ) ;
err = rxe_av_chk_attr ( rxe , attr ) ;
if ( err )
return err ;
2019-01-29 13:08:49 +03:00
rxe_init_av ( attr , & ah - > av ) ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
2017-04-29 21:41:18 +03:00
static int rxe_query_ah ( struct ib_ah * ibah , struct rdma_ah_attr * attr )
2016-06-16 16:45:23 +03:00
{
struct rxe_ah * ah = to_rah ( ibah ) ;
2017-04-29 21:41:17 +03:00
memset ( attr , 0 , sizeof ( * attr ) ) ;
2017-04-29 21:41:29 +03:00
attr - > type = ibah - > type ;
2018-01-31 14:06:56 +03:00
rxe_av_to_attr ( & ah - > av , attr ) ;
2016-06-16 16:45:23 +03:00
return 0 ;
}
2020-09-07 15:09:14 +03:00
static int rxe_destroy_ah ( struct ib_ah * ibah , u32 flags )
2016-06-16 16:45:23 +03:00
{
struct rxe_ah * ah = to_rah ( ibah ) ;
rxe_drop_ref ( ah ) ;
2020-09-07 15:09:14 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2018-07-18 19:25:32 +03:00
static int post_one_recv ( struct rxe_rq * rq , const struct ib_recv_wr * ibwr )
2016-06-16 16:45:23 +03:00
{
int err ;
int i ;
u32 length ;
struct rxe_recv_wqe * recv_wqe ;
int num_sge = ibwr - > num_sge ;
2021-05-27 22:47:48 +03:00
int full ;
2016-06-16 16:45:23 +03:00
2021-05-27 22:47:48 +03:00
if ( rq - > is_user )
full = queue_full ( rq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
full = queue_full ( rq - > queue , QUEUE_TYPE_KERNEL ) ;
if ( unlikely ( full ) ) {
2016-06-16 16:45:23 +03:00
err = - ENOMEM ;
goto err1 ;
}
if ( unlikely ( num_sge > rq - > max_sge ) ) {
err = - EINVAL ;
goto err1 ;
}
length = 0 ;
for ( i = 0 ; i < num_sge ; i + + )
length + = ibwr - > sg_list [ i ] . length ;
2021-05-27 22:47:48 +03:00
if ( rq - > is_user )
recv_wqe = producer_addr ( rq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
recv_wqe = producer_addr ( rq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
recv_wqe - > wr_id = ibwr - > wr_id ;
recv_wqe - > num_sge = num_sge ;
memcpy ( recv_wqe - > dma . sge , ibwr - > sg_list ,
num_sge * sizeof ( struct ib_sge ) ) ;
recv_wqe - > dma . length = length ;
recv_wqe - > dma . resid = length ;
recv_wqe - > dma . num_sge = num_sge ;
recv_wqe - > dma . cur_sge = 0 ;
recv_wqe - > dma . sge_offset = 0 ;
2021-05-27 22:47:48 +03:00
if ( rq - > is_user )
advance_producer ( rq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
advance_producer ( rq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
return 0 ;
err1 :
return err ;
}
2019-04-03 16:42:43 +03:00
static int rxe_create_srq ( struct ib_srq * ibsrq , struct ib_srq_init_attr * init ,
struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
int err ;
2019-04-03 16:42:43 +03:00
struct rxe_dev * rxe = to_rdev ( ibsrq - > device ) ;
struct rxe_pd * pd = to_rpd ( ibsrq - > pd ) ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
2018-03-14 01:33:18 +03:00
struct rxe_create_srq_resp __user * uresp = NULL ;
2020-10-04 02:20:05 +03:00
if ( init - > srq_type ! = IB_SRQT_BASIC )
return - EOPNOTSUPP ;
2018-03-14 01:33:18 +03:00
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
2019-04-03 16:42:43 +03:00
return - EINVAL ;
2018-03-14 01:33:18 +03:00
uresp = udata - > outbuf ;
2021-05-27 22:47:48 +03:00
srq - > is_user = true ;
} else {
srq - > is_user = false ;
2018-03-14 01:33:18 +03:00
}
2016-06-16 16:45:23 +03:00
err = rxe_srq_chk_attr ( rxe , NULL , & init - > attr , IB_SRQ_INIT_MASK ) ;
if ( err )
goto err1 ;
2020-12-17 02:15:48 +03:00
err = rxe_add_to_pool ( & rxe - > srq_pool , srq ) ;
2019-04-03 16:42:43 +03:00
if ( err )
2016-06-16 16:45:23 +03:00
goto err1 ;
rxe_add_ref ( pd ) ;
srq - > pd = pd ;
2019-03-31 19:10:07 +03:00
err = rxe_srq_from_init ( rxe , srq , init , udata , uresp ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err2 ;
2019-04-03 16:42:43 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
err2 :
rxe_drop_ref ( pd ) ;
rxe_drop_ref ( srq ) ;
err1 :
2019-04-03 16:42:43 +03:00
return err ;
2016-06-16 16:45:23 +03:00
}
static int rxe_modify_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr ,
enum ib_srq_attr_mask mask ,
struct ib_udata * udata )
{
int err ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
struct rxe_dev * rxe = to_rdev ( ibsrq - > device ) ;
2018-03-14 01:33:18 +03:00
struct rxe_modify_srq_cmd ucmd = { } ;
if ( udata ) {
if ( udata - > inlen < sizeof ( ucmd ) )
return - EINVAL ;
err = ib_copy_from_udata ( & ucmd , udata , sizeof ( ucmd ) ) ;
if ( err )
return err ;
}
2016-06-16 16:45:23 +03:00
err = rxe_srq_chk_attr ( rxe , srq , attr , mask ) ;
if ( err )
goto err1 ;
2019-03-31 19:10:07 +03:00
err = rxe_srq_from_attr ( rxe , srq , attr , mask , & ucmd , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_query_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr )
{
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
if ( srq - > error )
return - EINVAL ;
attr - > max_wr = srq - > rq . queue - > buf - > index_mask ;
attr - > max_sge = srq - > rq . max_sge ;
attr - > srq_limit = srq - > limit ;
return 0 ;
}
2020-09-07 15:09:16 +03:00
static int rxe_destroy_srq ( struct ib_srq * ibsrq , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
if ( srq - > rq . queue )
rxe_queue_cleanup ( srq - > rq . queue ) ;
rxe_drop_ref ( srq - > pd ) ;
rxe_drop_ref ( srq ) ;
2020-09-07 15:09:16 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
2018-07-18 19:25:32 +03:00
static int rxe_post_srq_recv ( struct ib_srq * ibsrq , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
unsigned long flags ;
struct rxe_srq * srq = to_rsrq ( ibsrq ) ;
spin_lock_irqsave ( & srq - > rq . producer_lock , flags ) ;
while ( wr ) {
err = post_one_recv ( & srq - > rq , wr ) ;
if ( unlikely ( err ) )
break ;
wr = wr - > next ;
}
spin_unlock_irqrestore ( & srq - > rq . producer_lock , flags ) ;
if ( err )
* bad_wr = wr ;
return err ;
}
static struct ib_qp * rxe_create_qp ( struct ib_pd * ibpd ,
struct ib_qp_init_attr * init ,
struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
struct rxe_qp * qp ;
2018-03-14 01:33:18 +03:00
struct rxe_create_qp_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
return ERR_PTR ( - EINVAL ) ;
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
2020-10-04 02:20:08 +03:00
if ( init - > create_flags )
return ERR_PTR ( - EOPNOTSUPP ) ;
2016-06-16 16:45:23 +03:00
err = rxe_qp_chk_init ( rxe , init ) ;
if ( err )
goto err1 ;
qp = rxe_alloc ( & rxe - > qp_pool ) ;
if ( ! qp ) {
err = - ENOMEM ;
goto err1 ;
}
if ( udata ) {
if ( udata - > inlen ) {
err = - EINVAL ;
2016-11-23 20:39:23 +03:00
goto err2 ;
2016-06-16 16:45:23 +03:00
}
2021-05-27 22:47:48 +03:00
qp - > is_user = true ;
} else {
qp - > is_user = false ;
2016-06-16 16:45:23 +03:00
}
rxe_add_index ( qp ) ;
2018-12-17 18:15:18 +03:00
err = rxe_qp_from_init ( rxe , qp , pd , init , uresp , ibpd , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2016-11-23 20:39:23 +03:00
goto err3 ;
2016-06-16 16:45:23 +03:00
return & qp - > ibqp ;
2016-11-23 20:39:23 +03:00
err3 :
2016-06-16 16:45:23 +03:00
rxe_drop_index ( qp ) ;
2016-11-23 20:39:23 +03:00
err2 :
2016-06-16 16:45:23 +03:00
rxe_drop_ref ( qp ) ;
err1 :
return ERR_PTR ( err ) ;
}
static int rxe_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int mask , struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
2020-10-04 02:20:06 +03:00
if ( mask & ~ IB_QP_ATTR_STANDARD_BITS )
return - EOPNOTSUPP ;
2016-06-16 16:45:23 +03:00
err = rxe_qp_chk_attr ( rxe , qp , attr , mask ) ;
if ( err )
goto err1 ;
err = rxe_qp_from_attr ( qp , attr , mask , udata ) ;
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_query_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int mask , struct ib_qp_init_attr * init )
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
rxe_qp_to_init ( qp , init ) ;
rxe_qp_to_attr ( qp , attr , mask ) ;
return 0 ;
}
2019-03-31 19:10:05 +03:00
static int rxe_destroy_qp ( struct ib_qp * ibqp , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
rxe_qp_destroy ( qp ) ;
rxe_drop_index ( qp ) ;
rxe_drop_ref ( qp ) ;
return 0 ;
}
2018-07-18 19:25:14 +03:00
static int validate_send_wr ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-06-16 16:45:23 +03:00
unsigned int mask , unsigned int length )
{
int num_sge = ibwr - > num_sge ;
struct rxe_sq * sq = & qp - > sq ;
if ( unlikely ( num_sge > sq - > max_sge ) )
goto err1 ;
if ( unlikely ( mask & WR_ATOMIC_MASK ) ) {
if ( length < 8 )
goto err1 ;
if ( atomic_wr ( ibwr ) - > remote_addr & 0x7 )
goto err1 ;
}
if ( unlikely ( ( ibwr - > send_flags & IB_SEND_INLINE ) & &
( length > sq - > max_inline ) ) )
goto err1 ;
return 0 ;
err1 :
return - EINVAL ;
}
static void init_send_wr ( struct rxe_qp * qp , struct rxe_send_wr * wr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * ibwr )
2016-06-16 16:45:23 +03:00
{
wr - > wr_id = ibwr - > wr_id ;
wr - > num_sge = ibwr - > num_sge ;
wr - > opcode = ibwr - > opcode ;
wr - > send_flags = ibwr - > send_flags ;
if ( qp_type ( qp ) = = IB_QPT_UD | |
qp_type ( qp ) = = IB_QPT_SMI | |
qp_type ( qp ) = = IB_QPT_GSI ) {
wr - > wr . ud . remote_qpn = ud_wr ( ibwr ) - > remote_qpn ;
wr - > wr . ud . remote_qkey = ud_wr ( ibwr ) - > remote_qkey ;
if ( qp_type ( qp ) = = IB_QPT_GSI )
wr - > wr . ud . pkey_index = ud_wr ( ibwr ) - > pkey_index ;
if ( wr - > opcode = = IB_WR_SEND_WITH_IMM )
wr - > ex . imm_data = ibwr - > ex . imm_data ;
} else {
switch ( wr - > opcode ) {
case IB_WR_RDMA_WRITE_WITH_IMM :
wr - > ex . imm_data = ibwr - > ex . imm_data ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2016-06-16 16:45:23 +03:00
case IB_WR_RDMA_READ :
case IB_WR_RDMA_WRITE :
wr - > wr . rdma . remote_addr = rdma_wr ( ibwr ) - > remote_addr ;
wr - > wr . rdma . rkey = rdma_wr ( ibwr ) - > rkey ;
break ;
case IB_WR_SEND_WITH_IMM :
wr - > ex . imm_data = ibwr - > ex . imm_data ;
break ;
case IB_WR_SEND_WITH_INV :
wr - > ex . invalidate_rkey = ibwr - > ex . invalidate_rkey ;
break ;
case IB_WR_ATOMIC_CMP_AND_SWP :
case IB_WR_ATOMIC_FETCH_AND_ADD :
wr - > wr . atomic . remote_addr =
atomic_wr ( ibwr ) - > remote_addr ;
wr - > wr . atomic . compare_add =
atomic_wr ( ibwr ) - > compare_add ;
wr - > wr . atomic . swap = atomic_wr ( ibwr ) - > swap ;
wr - > wr . atomic . rkey = atomic_wr ( ibwr ) - > rkey ;
break ;
case IB_WR_LOCAL_INV :
wr - > ex . invalidate_rkey = ibwr - > ex . invalidate_rkey ;
break ;
case IB_WR_REG_MR :
wr - > wr . reg . mr = reg_wr ( ibwr ) - > mr ;
wr - > wr . reg . key = reg_wr ( ibwr ) - > key ;
wr - > wr . reg . access = reg_wr ( ibwr ) - > access ;
break ;
default :
break ;
}
}
}
2021-02-06 03:24:37 +03:00
static void copy_inline_data_to_wqe ( struct rxe_send_wqe * wqe ,
const struct ib_send_wr * ibwr )
{
struct ib_sge * sge = ibwr - > sg_list ;
u8 * p = wqe - > dma . inline_data ;
int i ;
for ( i = 0 ; i < ibwr - > num_sge ; i + + , sge + + ) {
memcpy ( p , ( void * ) ( uintptr_t ) sge - > addr , sge - > length ) ;
p + = sge - > length ;
}
}
static void init_send_wqe ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-06-16 16:45:23 +03:00
unsigned int mask , unsigned int length ,
struct rxe_send_wqe * wqe )
{
int num_sge = ibwr - > num_sge ;
init_send_wr ( qp , & wqe - > wr , ibwr ) ;
2021-02-06 02:05:26 +03:00
/* local operation */
if ( unlikely ( mask & WR_REG_MASK ) ) {
wqe - > mask = mask ;
wqe - > state = wqe_state_posted ;
2021-02-06 03:24:37 +03:00
return ;
2021-02-06 02:05:26 +03:00
}
2016-06-16 16:45:23 +03:00
if ( qp_type ( qp ) = = IB_QPT_UD | |
qp_type ( qp ) = = IB_QPT_SMI | |
qp_type ( qp ) = = IB_QPT_GSI )
memcpy ( & wqe - > av , & to_rah ( ud_wr ( ibwr ) - > ah ) - > av , sizeof ( wqe - > av ) ) ;
2021-02-06 03:24:37 +03:00
if ( unlikely ( ibwr - > send_flags & IB_SEND_INLINE ) )
copy_inline_data_to_wqe ( wqe , ibwr ) ;
else
2016-06-16 16:45:23 +03:00
memcpy ( wqe - > dma . sge , ibwr - > sg_list ,
num_sge * sizeof ( struct ib_sge ) ) ;
2018-03-02 01:00:29 +03:00
wqe - > iova = mask & WR_ATOMIC_MASK ? atomic_wr ( ibwr ) - > remote_addr :
mask & WR_READ_OR_WRITE_MASK ? rdma_wr ( ibwr ) - > remote_addr : 0 ;
2016-06-16 16:45:23 +03:00
wqe - > mask = mask ;
wqe - > dma . length = length ;
wqe - > dma . resid = length ;
wqe - > dma . num_sge = num_sge ;
wqe - > dma . cur_sge = 0 ;
wqe - > dma . sge_offset = 0 ;
wqe - > state = wqe_state_posted ;
wqe - > ssn = atomic_add_return ( 1 , & qp - > ssn ) ;
}
2018-07-18 19:25:14 +03:00
static int post_one_send ( struct rxe_qp * qp , const struct ib_send_wr * ibwr ,
2016-09-28 23:26:26 +03:00
unsigned int mask , u32 length )
2016-06-16 16:45:23 +03:00
{
int err ;
struct rxe_sq * sq = & qp - > sq ;
struct rxe_send_wqe * send_wqe ;
unsigned long flags ;
2021-05-27 22:47:48 +03:00
int full ;
2016-06-16 16:45:23 +03:00
err = validate_send_wr ( qp , ibwr , mask , length ) ;
if ( err )
return err ;
spin_lock_irqsave ( & qp - > sq . sq_lock , flags ) ;
2021-05-27 22:47:48 +03:00
if ( qp - > is_user )
full = queue_full ( sq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
full = queue_full ( sq - > queue , QUEUE_TYPE_KERNEL ) ;
if ( unlikely ( full ) ) {
spin_unlock_irqrestore ( & qp - > sq . sq_lock , flags ) ;
return - ENOMEM ;
2016-06-16 16:45:23 +03:00
}
2021-05-27 22:47:48 +03:00
if ( qp - > is_user )
send_wqe = producer_addr ( sq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
send_wqe = producer_addr ( sq - > queue , QUEUE_TYPE_KERNEL ) ;
2021-02-06 03:24:37 +03:00
init_send_wqe ( qp , ibwr , mask , length , send_wqe ) ;
2016-06-16 16:45:23 +03:00
2021-05-27 22:47:48 +03:00
if ( qp - > is_user )
advance_producer ( sq - > queue , QUEUE_TYPE_FROM_USER ) ;
else
advance_producer ( sq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
spin_unlock_irqrestore ( & qp - > sq . sq_lock , flags ) ;
return 0 ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_send_kernel ( struct rxe_qp * qp , const struct ib_send_wr * wr ,
const struct ib_send_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
unsigned int mask ;
unsigned int length = 0 ;
int i ;
2020-07-16 22:03:41 +03:00
struct ib_send_wr * next ;
2016-06-16 16:45:23 +03:00
while ( wr ) {
mask = wr_opcode_mask ( wr - > opcode , qp ) ;
if ( unlikely ( ! mask ) ) {
err = - EINVAL ;
* bad_wr = wr ;
break ;
}
if ( unlikely ( ( wr - > send_flags & IB_SEND_INLINE ) & &
! ( mask & WR_INLINE_MASK ) ) ) {
err = - EINVAL ;
* bad_wr = wr ;
break ;
}
2020-07-16 22:03:41 +03:00
next = wr - > next ;
2016-06-16 16:45:23 +03:00
length = 0 ;
for ( i = 0 ; i < wr - > num_sge ; i + + )
length + = wr - > sg_list [ i ] . length ;
err = post_one_send ( qp , wr , mask , length ) ;
if ( err ) {
* bad_wr = wr ;
break ;
}
2020-07-16 22:03:41 +03:00
wr = next ;
2016-06-16 16:45:23 +03:00
}
2018-05-08 12:02:02 +03:00
rxe_run_task ( & qp - > req . task , 1 ) ;
2018-01-09 22:23:40 +03:00
if ( unlikely ( qp - > req . state = = QP_STATE_ERROR ) )
rxe_run_task ( & qp - > comp . task , 1 ) ;
2016-06-16 16:45:23 +03:00
return err ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_send ( struct ib_qp * ibqp , const struct ib_send_wr * wr ,
const struct ib_send_wr * * bad_wr )
2016-09-28 23:24:12 +03:00
{
struct rxe_qp * qp = to_rqp ( ibqp ) ;
if ( unlikely ( ! qp - > valid ) ) {
* bad_wr = wr ;
return - EINVAL ;
}
if ( unlikely ( qp - > req . state < QP_STATE_READY ) ) {
* bad_wr = wr ;
return - EINVAL ;
}
if ( qp - > is_user ) {
/* Utilize process context to do protocol processing */
rxe_run_task ( & qp - > req . task , 0 ) ;
return 0 ;
} else
return rxe_post_send_kernel ( qp , wr , bad_wr ) ;
}
2018-07-18 19:25:32 +03:00
static int rxe_post_recv ( struct ib_qp * ibqp , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2016-06-16 16:45:23 +03:00
{
int err = 0 ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
struct rxe_rq * rq = & qp - > rq ;
unsigned long flags ;
if ( unlikely ( ( qp_state ( qp ) < IB_QPS_INIT ) | | ! qp - > valid ) ) {
* bad_wr = wr ;
err = - EINVAL ;
goto err1 ;
}
if ( unlikely ( qp - > srq ) ) {
* bad_wr = wr ;
err = - EINVAL ;
goto err1 ;
}
spin_lock_irqsave ( & rq - > producer_lock , flags ) ;
while ( wr ) {
err = post_one_recv ( rq , wr ) ;
if ( unlikely ( err ) ) {
* bad_wr = wr ;
break ;
}
wr = wr - > next ;
}
spin_unlock_irqrestore ( & rq - > producer_lock , flags ) ;
2017-06-27 12:19:38 +03:00
if ( qp - > resp . state = = QP_STATE_ERROR )
rxe_run_task ( & qp - > resp . task , 1 ) ;
2016-06-16 16:45:23 +03:00
err1 :
return err ;
}
2019-05-28 14:37:29 +03:00
static int rxe_create_cq ( struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
int err ;
2019-05-28 14:37:29 +03:00
struct ib_device * dev = ibcq - > device ;
2016-06-16 16:45:23 +03:00
struct rxe_dev * rxe = to_rdev ( dev ) ;
2019-05-28 14:37:29 +03:00
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2018-03-14 01:33:18 +03:00
struct rxe_create_cq_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
2019-05-28 14:37:29 +03:00
return - EINVAL ;
2018-03-14 01:33:18 +03:00
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
if ( attr - > flags )
2020-10-04 02:20:07 +03:00
return - EOPNOTSUPP ;
2016-06-16 16:45:23 +03:00
2018-03-14 01:33:17 +03:00
err = rxe_cq_chk_attr ( rxe , NULL , attr - > cqe , attr - > comp_vector ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-05-28 14:37:29 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2019-03-31 19:10:07 +03:00
err = rxe_cq_from_init ( rxe , cq , attr - > cqe , attr - > comp_vector , udata ,
uresp ) ;
2016-06-16 16:45:23 +03:00
if ( err )
2019-05-28 14:37:29 +03:00
return err ;
2016-06-16 16:45:23 +03:00
2020-12-17 02:15:48 +03:00
return rxe_add_to_pool ( & rxe - > cq_pool , cq ) ;
2016-06-16 16:45:23 +03:00
}
2020-09-07 15:09:18 +03:00
static int rxe_destroy_cq ( struct ib_cq * ibcq , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2017-08-28 23:11:50 +03:00
rxe_cq_disable ( cq ) ;
2016-06-16 16:45:23 +03:00
rxe_drop_ref ( cq ) ;
2020-09-07 15:09:18 +03:00
return 0 ;
2016-06-16 16:45:23 +03:00
}
static int rxe_resize_cq ( struct ib_cq * ibcq , int cqe , struct ib_udata * udata )
{
int err ;
struct rxe_cq * cq = to_rcq ( ibcq ) ;
struct rxe_dev * rxe = to_rdev ( ibcq - > device ) ;
2018-03-14 01:33:18 +03:00
struct rxe_resize_cq_resp __user * uresp = NULL ;
if ( udata ) {
if ( udata - > outlen < sizeof ( * uresp ) )
return - EINVAL ;
uresp = udata - > outbuf ;
}
2016-06-16 16:45:23 +03:00
2018-03-14 01:33:17 +03:00
err = rxe_cq_chk_attr ( rxe , cq , cqe , 0 ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
2019-03-31 19:10:07 +03:00
err = rxe_cq_resize_queue ( cq , cqe , uresp , udata ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err1 ;
return 0 ;
err1 :
return err ;
}
static int rxe_poll_cq ( struct ib_cq * ibcq , int num_entries , struct ib_wc * wc )
{
int i ;
struct rxe_cq * cq = to_rcq ( ibcq ) ;
struct rxe_cqe * cqe ;
unsigned long flags ;
spin_lock_irqsave ( & cq - > cq_lock , flags ) ;
for ( i = 0 ; i < num_entries ; i + + ) {
2021-05-27 22:47:48 +03:00
if ( cq - > is_user )
cqe = queue_head ( cq - > queue , QUEUE_TYPE_TO_USER ) ;
else
cqe = queue_head ( cq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
if ( ! cqe )
break ;
memcpy ( wc + + , & cqe - > ibwc , sizeof ( * wc ) ) ;
2021-05-27 22:47:48 +03:00
if ( cq - > is_user )
advance_consumer ( cq - > queue , QUEUE_TYPE_TO_USER ) ;
else
advance_consumer ( cq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
}
spin_unlock_irqrestore ( & cq - > cq_lock , flags ) ;
return i ;
}
static int rxe_peek_cq ( struct ib_cq * ibcq , int wc_cnt )
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2021-05-27 22:47:48 +03:00
int count ;
if ( cq - > is_user )
count = queue_count ( cq - > queue , QUEUE_TYPE_TO_USER ) ;
else
count = queue_count ( cq - > queue , QUEUE_TYPE_KERNEL ) ;
2016-06-16 16:45:23 +03:00
return ( count > wc_cnt ) ? wc_cnt : count ;
}
static int rxe_req_notify_cq ( struct ib_cq * ibcq , enum ib_cq_notify_flags flags )
{
struct rxe_cq * cq = to_rcq ( ibcq ) ;
2016-11-23 20:39:22 +03:00
unsigned long irq_flags ;
int ret = 0 ;
2021-05-27 22:47:48 +03:00
int empty ;
2016-06-16 16:45:23 +03:00
2016-11-23 20:39:22 +03:00
spin_lock_irqsave ( & cq - > cq_lock , irq_flags ) ;
2016-06-16 16:45:23 +03:00
if ( cq - > notify ! = IB_CQ_NEXT_COMP )
cq - > notify = flags & IB_CQ_SOLICITED_MASK ;
2021-05-27 22:47:48 +03:00
if ( cq - > is_user )
empty = queue_empty ( cq - > queue , QUEUE_TYPE_TO_USER ) ;
else
empty = queue_empty ( cq - > queue , QUEUE_TYPE_KERNEL ) ;
if ( ( flags & IB_CQ_REPORT_MISSED_EVENTS ) & & ! empty )
2016-11-23 20:39:22 +03:00
ret = 1 ;
spin_unlock_irqrestore ( & cq - > cq_lock , irq_flags ) ;
return ret ;
2016-06-16 16:45:23 +03:00
}
static struct ib_mr * rxe_get_dma_mr ( struct ib_pd * ibpd , int access )
{
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr ;
2016-06-16 16:45:23 +03:00
mr = rxe_alloc ( & rxe - > mr_pool ) ;
2020-07-05 13:43:12 +03:00
if ( ! mr )
return ERR_PTR ( - ENOMEM ) ;
2016-06-16 16:45:23 +03:00
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2021-03-26 00:24:26 +03:00
rxe_mr_init_dma ( pd , access , mr ) ;
2016-06-16 16:45:23 +03:00
return & mr - > ibmr ;
}
static struct ib_mr * rxe_reg_user_mr ( struct ib_pd * ibpd ,
u64 start ,
u64 length ,
u64 iova ,
int access , struct ib_udata * udata )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr ;
2016-06-16 16:45:23 +03:00
mr = rxe_alloc ( & rxe - > mr_pool ) ;
if ( ! mr ) {
err = - ENOMEM ;
goto err2 ;
}
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2021-05-12 11:12:22 +03:00
err = rxe_mr_init_user ( pd , start , length , iova , access , mr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err3 ;
return & mr - > ibmr ;
err3 :
rxe_drop_ref ( pd ) ;
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
err2 :
return ERR_PTR ( err ) ;
}
2019-03-31 19:10:05 +03:00
static int rxe_dereg_mr ( struct ib_mr * ibmr , struct ib_udata * udata )
2016-06-16 16:45:23 +03:00
{
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr = to_rmr ( ibmr ) ;
2016-06-16 16:45:23 +03:00
2021-03-26 00:24:26 +03:00
mr - > state = RXE_MR_STATE_ZOMBIE ;
2020-10-09 00:28:18 +03:00
rxe_drop_ref ( mr_pd ( mr ) ) ;
2016-06-16 16:45:23 +03:00
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
return 0 ;
}
2019-03-31 19:10:05 +03:00
static struct ib_mr * rxe_alloc_mr ( struct ib_pd * ibpd , enum ib_mr_type mr_type ,
2020-07-06 15:03:43 +03:00
u32 max_num_sg )
2016-06-16 16:45:23 +03:00
{
struct rxe_dev * rxe = to_rdev ( ibpd - > device ) ;
struct rxe_pd * pd = to_rpd ( ibpd ) ;
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr ;
2016-06-16 16:45:23 +03:00
int err ;
if ( mr_type ! = IB_MR_TYPE_MEM_REG )
return ERR_PTR ( - EINVAL ) ;
mr = rxe_alloc ( & rxe - > mr_pool ) ;
if ( ! mr ) {
err = - ENOMEM ;
goto err1 ;
}
rxe_add_index ( mr ) ;
rxe_add_ref ( pd ) ;
2021-03-26 00:24:26 +03:00
err = rxe_mr_init_fast ( pd , max_num_sg , mr ) ;
2016-06-16 16:45:23 +03:00
if ( err )
goto err2 ;
return & mr - > ibmr ;
err2 :
rxe_drop_ref ( pd ) ;
rxe_drop_index ( mr ) ;
rxe_drop_ref ( mr ) ;
err1 :
return ERR_PTR ( err ) ;
}
static int rxe_set_page ( struct ib_mr * ibmr , u64 addr )
{
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr = to_rmr ( ibmr ) ;
2016-06-16 16:45:23 +03:00
struct rxe_map * map ;
struct rxe_phys_buf * buf ;
if ( unlikely ( mr - > nbuf = = mr - > num_buf ) )
return - ENOMEM ;
map = mr - > map [ mr - > nbuf / RXE_BUF_PER_MAP ] ;
buf = & map - > buf [ mr - > nbuf % RXE_BUF_PER_MAP ] ;
buf - > addr = addr ;
buf - > size = ibmr - > page_size ;
mr - > nbuf + + ;
return 0 ;
}
2016-09-28 23:26:26 +03:00
static int rxe_map_mr_sg ( struct ib_mr * ibmr , struct scatterlist * sg ,
int sg_nents , unsigned int * sg_offset )
2016-06-16 16:45:23 +03:00
{
2021-03-26 00:24:26 +03:00
struct rxe_mr * mr = to_rmr ( ibmr ) ;
2016-06-16 16:45:23 +03:00
int n ;
mr - > nbuf = 0 ;
n = ib_sg_to_pages ( ibmr , sg , sg_nents , sg_offset , rxe_set_page ) ;
mr - > va = ibmr - > iova ;
mr - > iova = ibmr - > iova ;
mr - > length = ibmr - > length ;
mr - > page_shift = ilog2 ( ibmr - > page_size ) ;
mr - > page_mask = ibmr - > page_size - 1 ;
mr - > offset = mr - > iova & mr - > page_mask ;
return n ;
}
static int rxe_attach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid )
{
int err ;
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
struct rxe_mc_grp * grp ;
/* takes a ref on grp if successful */
err = rxe_mcast_get_grp ( rxe , mgid , & grp ) ;
if ( err )
return err ;
err = rxe_mcast_add_grp_elem ( rxe , qp , grp ) ;
rxe_drop_ref ( grp ) ;
return err ;
}
static int rxe_detach_mcast ( struct ib_qp * ibqp , union ib_gid * mgid , u16 mlid )
{
struct rxe_dev * rxe = to_rdev ( ibqp - > device ) ;
struct rxe_qp * qp = to_rqp ( ibqp ) ;
return rxe_mcast_drop_grp_elem ( rxe , qp , mgid ) ;
}
2017-06-15 11:29:05 +03:00
static ssize_t parent_show ( struct device * device ,
struct device_attribute * attr , char * buf )
2016-06-16 16:45:23 +03:00
{
2018-12-18 15:15:56 +03:00
struct rxe_dev * rxe =
rdma_device_to_drv_device ( device , struct rxe_dev , ib_dev ) ;
2016-06-16 16:45:23 +03:00
RDMA: Convert sysfs device * show functions to use sysfs_emit()
Done with cocci script:
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
return
- strcpy(buf, chr);
+ sysfs_emit(buf, chr);
...>
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- sprintf(buf,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- snprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
len =
- scnprintf(buf, PAGE_SIZE,
+ sysfs_emit(buf,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
identifier len;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
<...
- len += scnprintf(buf + len, PAGE_SIZE - len,
+ len += sysfs_emit_at(buf, len,
...);
...>
return len;
}
@@
identifier d_show;
identifier dev, attr, buf;
expression chr;
@@
ssize_t d_show(struct device *dev, struct device_attribute *attr, char *buf)
{
...
- strcpy(buf, chr);
- return strlen(buf);
+ return sysfs_emit(buf, chr);
}
Link: https://lore.kernel.org/r/7f406fa8e3aa2552c022bec680f621e38d1fe414.1602122879.git.joe@perches.com
Signed-off-by: Joe Perches <joe@perches.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
2020-10-08 05:36:24 +03:00
return sysfs_emit ( buf , " %s \n " , rxe_parent_name ( rxe , 1 ) ) ;
2016-06-16 16:45:23 +03:00
}
2017-06-15 11:29:05 +03:00
static DEVICE_ATTR_RO ( parent ) ;
2016-06-16 16:45:23 +03:00
2018-10-11 22:31:54 +03:00
static struct attribute * rxe_dev_attributes [ ] = {
& dev_attr_parent . attr ,
NULL
} ;
static const struct attribute_group rxe_attr_group = {
. attrs = rxe_dev_attributes ,
2016-06-16 16:45:23 +03:00
} ;
2019-02-13 07:12:56 +03:00
static int rxe_enable_driver ( struct ib_device * ib_dev )
{
struct rxe_dev * rxe = container_of ( ib_dev , struct rxe_dev , ib_dev ) ;
rxe_set_port_state ( rxe ) ;
dev_info ( & rxe - > ib_dev . dev , " added %s \n " , netdev_name ( rxe - > ndev ) ) ;
return 0 ;
}
2018-12-10 22:09:46 +03:00
static const struct ib_device_ops rxe_dev_ops = {
2019-06-05 20:39:26 +03:00
. owner = THIS_MODULE ,
2019-06-05 20:39:24 +03:00
. driver_id = RDMA_DRIVER_RXE ,
2019-06-05 20:39:25 +03:00
. uverbs_abi_ver = RXE_UVERBS_ABI_VERSION ,
2019-06-05 20:39:24 +03:00
2018-12-10 22:09:46 +03:00
. alloc_hw_stats = rxe_ib_alloc_hw_stats ,
. alloc_mr = rxe_alloc_mr ,
2021-06-08 07:25:47 +03:00
. alloc_mw = rxe_alloc_mw ,
2018-12-10 22:09:46 +03:00
. alloc_pd = rxe_alloc_pd ,
. alloc_ucontext = rxe_alloc_ucontext ,
. attach_mcast = rxe_attach_mcast ,
. create_ah = rxe_create_ah ,
. create_cq = rxe_create_cq ,
. create_qp = rxe_create_qp ,
. create_srq = rxe_create_srq ,
2020-10-04 02:20:11 +03:00
. create_user_ah = rxe_create_ah ,
2019-01-23 02:27:24 +03:00
. dealloc_driver = rxe_dealloc ,
2021-06-08 07:25:47 +03:00
. dealloc_mw = rxe_dealloc_mw ,
2018-12-10 22:09:46 +03:00
. dealloc_pd = rxe_dealloc_pd ,
. dealloc_ucontext = rxe_dealloc_ucontext ,
. dereg_mr = rxe_dereg_mr ,
. destroy_ah = rxe_destroy_ah ,
. destroy_cq = rxe_destroy_cq ,
. destroy_qp = rxe_destroy_qp ,
. destroy_srq = rxe_destroy_srq ,
. detach_mcast = rxe_detach_mcast ,
2019-02-13 07:12:56 +03:00
. enable_driver = rxe_enable_driver ,
2018-12-10 22:09:46 +03:00
. get_dma_mr = rxe_get_dma_mr ,
. get_hw_stats = rxe_ib_get_hw_stats ,
. get_link_layer = rxe_get_link_layer ,
. get_port_immutable = rxe_port_immutable ,
. map_mr_sg = rxe_map_mr_sg ,
. mmap = rxe_mmap ,
. modify_ah = rxe_modify_ah ,
. modify_device = rxe_modify_device ,
. modify_port = rxe_modify_port ,
. modify_qp = rxe_modify_qp ,
. modify_srq = rxe_modify_srq ,
. peek_cq = rxe_peek_cq ,
. poll_cq = rxe_poll_cq ,
. post_recv = rxe_post_recv ,
. post_send = rxe_post_send ,
. post_srq_recv = rxe_post_srq_recv ,
. query_ah = rxe_query_ah ,
. query_device = rxe_query_device ,
. query_pkey = rxe_query_pkey ,
. query_port = rxe_query_port ,
. query_qp = rxe_query_qp ,
. query_srq = rxe_query_srq ,
. reg_user_mr = rxe_reg_user_mr ,
. req_notify_cq = rxe_req_notify_cq ,
. resize_cq = rxe_resize_cq ,
2019-04-03 16:42:42 +03:00
INIT_RDMA_OBJ_SIZE ( ib_ah , rxe_ah , ibah ) ,
2019-05-28 14:37:29 +03:00
INIT_RDMA_OBJ_SIZE ( ib_cq , rxe_cq , ibcq ) ,
2019-02-03 15:55:51 +03:00
INIT_RDMA_OBJ_SIZE ( ib_pd , rxe_pd , ibpd ) ,
2019-04-03 16:42:43 +03:00
INIT_RDMA_OBJ_SIZE ( ib_srq , rxe_srq , ibsrq ) ,
2019-02-12 21:39:16 +03:00
INIT_RDMA_OBJ_SIZE ( ib_ucontext , rxe_ucontext , ibuc ) ,
2021-03-26 00:24:26 +03:00
INIT_RDMA_OBJ_SIZE ( ib_mw , rxe_mw , ibmw ) ,
2018-12-10 22:09:46 +03:00
} ;
2019-02-15 22:03:57 +03:00
int rxe_register_device ( struct rxe_dev * rxe , const char * ibdev_name )
2016-06-16 16:45:23 +03:00
{
int err ;
struct ib_device * dev = & rxe - > ib_dev ;
2017-10-31 13:16:46 +03:00
struct crypto_shash * tfm ;
2016-06-16 16:45:23 +03:00
2021-02-06 02:05:26 +03:00
strscpy ( dev - > node_desc , " rxe " , sizeof ( dev - > node_desc ) ) ;
2016-06-16 16:45:23 +03:00
dev - > node_type = RDMA_NODE_IB_CA ;
dev - > phys_port_cnt = 1 ;
2017-05-04 16:23:07 +03:00
dev - > num_comp_vectors = num_possible_cpus ( ) ;
2016-06-16 16:45:23 +03:00
dev - > local_dma_lkey = 0 ;
2017-03-14 17:01:57 +03:00
addrconf_addr_eui48 ( ( unsigned char * ) & dev - > node_guid ,
rxe - > ndev - > dev_addr ) ;
2016-06-16 16:45:23 +03:00
2020-10-30 17:03:05 +03:00
dev - > uverbs_cmd_mask | = BIT_ULL ( IB_USER_VERBS_CMD_POST_SEND ) |
BIT_ULL ( IB_USER_VERBS_CMD_REQ_NOTIFY_CQ ) ;
2016-06-16 16:45:23 +03:00
2018-12-10 22:09:46 +03:00
ib_set_device_ops ( dev , & rxe_dev_ops ) ;
2019-02-13 07:12:52 +03:00
err = ib_device_set_netdev ( & rxe - > ib_dev , rxe - > ndev , 1 ) ;
if ( err )
return err ;
2016-06-16 16:45:23 +03:00
2017-10-31 13:16:46 +03:00
tfm = crypto_alloc_shash ( " crc32 " , 0 , 0 ) ;
if ( IS_ERR ( tfm ) ) {
2017-04-24 12:26:42 +03:00
pr_err ( " failed to allocate crc algorithm err:%ld \n " ,
2017-10-31 13:16:46 +03:00
PTR_ERR ( tfm ) ) ;
return PTR_ERR ( tfm ) ;
2017-04-20 20:55:55 +03:00
}
2017-10-31 13:16:46 +03:00
rxe - > tfm = tfm ;
2017-04-20 20:55:55 +03:00
2018-10-11 22:31:54 +03:00
rdma_set_device_sysfs_group ( dev , & rxe_attr_group ) ;
2020-10-08 11:27:52 +03:00
err = ib_register_device ( dev , ibdev_name , NULL ) ;
2019-01-23 02:27:24 +03:00
if ( err )
2017-06-15 11:29:06 +03:00
pr_warn ( " %s failed with error %d \n " , __func__ , err ) ;
2017-04-20 20:55:55 +03:00
2019-02-13 07:12:56 +03:00
/*
* Note that rxe may be invalid at this point if another thread
* unregistered it .
*/
2016-06-16 16:45:23 +03:00
return err ;
}