2015-07-24 02:33:59 +03:00
/* This file is part of the Emulex RoCE Device Driver for
* RoCE ( RDMA over Converged Ethernet ) adapters .
* Copyright ( C ) 2012 - 2015 Emulex . All rights reserved .
* EMULEX and SLI are trademarks of Emulex .
* www . emulex . com
*
* This software is available to you under a choice of one of two licenses .
* You may choose to be licensed under the terms of the GNU General Public
* License ( GPL ) Version 2 , available from the file COPYING in the main
* directory of this source tree , or the BSD license below :
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright notice ,
* this list of conditions and the following disclaimer .
*
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS " AS IS "
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO , THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR
* BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY ,
* WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE OR
* OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
2012-03-21 02:39:06 +04:00
*
* Contact Information :
* linux - drivers @ emulex . com
*
* Emulex
* 3333 Susan Street
* Costa Mesa , CA 92626
2015-07-24 02:33:59 +03:00
*/
2012-03-21 02:39:06 +04:00
# include <linux/dma-mapping.h>
# include <rdma/ib_verbs.h>
# include <rdma/ib_user_verbs.h>
# include <rdma/iw_cm.h>
# include <rdma/ib_umem.h>
# include <rdma/ib_addr.h>
2015-07-30 18:33:31 +03:00
# include <rdma/ib_cache.h>
2019-03-31 19:10:07 +03:00
# include <rdma/uverbs_ioctl.h>
2012-03-21 02:39:06 +04:00
# include "ocrdma.h"
# include "ocrdma_hw.h"
# include "ocrdma_verbs.h"
2016-09-22 17:31:15 +03:00
# include <rdma/ocrdma-abi.h>
2012-03-21 02:39:06 +04:00
int ocrdma_query_pkey ( struct ib_device * ibdev , u8 port , u16 index , u16 * pkey )
{
2019-01-07 18:27:55 +03:00
if ( index > 0 )
2012-03-21 02:39:06 +04:00
return - EINVAL ;
* pkey = 0xffff ;
return 0 ;
}
2015-06-11 16:35:25 +03:00
int ocrdma_query_device ( struct ib_device * ibdev , struct ib_device_attr * attr ,
struct ib_udata * uhw )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_dev * dev = get_ocrdma_dev ( ibdev ) ;
2015-06-11 16:35:25 +03:00
if ( uhw - > inlen | | uhw - > outlen )
return - EINVAL ;
2012-03-21 02:39:06 +04:00
memset ( attr , 0 , sizeof * attr ) ;
memcpy ( & attr - > fw_ver , & dev - > attr . fw_ver [ 0 ] ,
min ( sizeof ( dev - > attr . fw_ver ) , sizeof ( attr - > fw_ver ) ) ) ;
ocrdma_get_guid ( dev , ( u8 * ) & attr - > sys_image_guid ) ;
2014-06-10 18:02:22 +04:00
attr - > max_mr_size = dev - > attr . max_mr_size ;
2012-03-21 02:39:06 +04:00
attr - > page_size_cap = 0xffff000 ;
attr - > vendor_id = dev - > nic_info . pdev - > vendor ;
attr - > vendor_part_id = dev - > nic_info . pdev - > device ;
2014-07-02 10:06:06 +04:00
attr - > hw_ver = dev - > asic_id ;
2012-03-21 02:39:06 +04:00
attr - > max_qp = dev - > attr . max_qp ;
2013-08-26 13:57:40 +04:00
attr - > max_ah = OCRDMA_MAX_AH ;
2012-03-21 02:39:06 +04:00
attr - > max_qp_wr = dev - > attr . max_wqe ;
attr - > device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
2013-08-26 13:57:43 +04:00
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS ;
2018-06-18 18:05:26 +03:00
attr - > max_send_sge = dev - > attr . max_send_sge ;
attr - > max_recv_sge = dev - > attr . max_recv_sge ;
2016-08-24 08:17:41 +03:00
attr - > max_sge_rd = dev - > attr . max_rdma_sge ;
2012-03-21 02:39:06 +04:00
attr - > max_cq = dev - > attr . max_cq ;
attr - > max_cqe = dev - > attr . max_cqe ;
attr - > max_mr = dev - > attr . max_mr ;
2014-02-04 10:27:04 +04:00
attr - > max_mw = dev - > attr . max_mw ;
2012-03-21 02:39:06 +04:00
attr - > max_pd = dev - > attr . max_pd ;
attr - > atomic_cap = 0 ;
attr - > max_fmr = 0 ;
attr - > max_map_per_fmr = 0 ;
attr - > max_qp_rd_atom =
min ( dev - > attr . max_ord_per_qp , dev - > attr . max_ird_per_qp ) ;
attr - > max_qp_init_rd_atom = dev - > attr . max_ord_per_qp ;
2013-08-26 13:57:39 +04:00
attr - > max_srq = dev - > attr . max_srq ;
2012-07-08 02:13:47 +04:00
attr - > max_srq_sge = dev - > attr . max_srq_sge ;
2012-03-21 02:39:06 +04:00
attr - > max_srq_wr = dev - > attr . max_rqe ;
attr - > local_ca_ack_delay = dev - > attr . local_ca_ack_delay ;
2014-06-09 09:22:37 +04:00
attr - > max_fast_reg_page_list_len = dev - > attr . max_pages_per_frmr ;
2012-03-21 02:39:06 +04:00
attr - > max_pkeys = 1 ;
return 0 ;
}
2013-08-26 13:57:47 +04:00
static inline void get_link_speed_and_width ( struct ocrdma_dev * dev ,
u8 * ib_speed , u8 * ib_width )
{
int status ;
u8 speed ;
2015-12-24 21:14:07 +03:00
status = ocrdma_mbx_get_link_speed ( dev , & speed , NULL ) ;
2013-08-26 13:57:47 +04:00
if ( status )
speed = OCRDMA_PHYS_LINK_SPEED_ZERO ;
switch ( speed ) {
case OCRDMA_PHYS_LINK_SPEED_1GBPS :
* ib_speed = IB_SPEED_SDR ;
* ib_width = IB_WIDTH_1X ;
break ;
case OCRDMA_PHYS_LINK_SPEED_10GBPS :
* ib_speed = IB_SPEED_QDR ;
* ib_width = IB_WIDTH_1X ;
break ;
case OCRDMA_PHYS_LINK_SPEED_20GBPS :
* ib_speed = IB_SPEED_DDR ;
* ib_width = IB_WIDTH_4X ;
break ;
case OCRDMA_PHYS_LINK_SPEED_40GBPS :
* ib_speed = IB_SPEED_QDR ;
* ib_width = IB_WIDTH_4X ;
break ;
default :
/* Unsupported */
* ib_speed = IB_SPEED_SDR ;
* ib_width = IB_WIDTH_1X ;
2013-10-09 03:07:22 +04:00
}
2013-08-26 13:57:47 +04:00
}
2012-03-21 02:39:06 +04:00
int ocrdma_query_port ( struct ib_device * ibdev ,
u8 port , struct ib_port_attr * props )
{
enum ib_port_state port_state ;
struct ocrdma_dev * dev ;
struct net_device * netdev ;
2017-01-24 14:02:39 +03:00
/* props being zeroed by the caller, avoid zeroing it here */
2012-03-21 02:39:06 +04:00
dev = get_ocrdma_dev ( ibdev ) ;
netdev = dev - > nic_info . netdev ;
if ( netif_running ( netdev ) & & netif_oper_up ( netdev ) ) {
port_state = IB_PORT_ACTIVE ;
2019-08-07 13:31:35 +03:00
props - > phys_state = IB_PORT_PHYS_STATE_LINK_UP ;
2012-03-21 02:39:06 +04:00
} else {
port_state = IB_PORT_DOWN ;
2019-08-07 13:31:35 +03:00
props - > phys_state = IB_PORT_PHYS_STATE_DISABLED ;
2012-03-21 02:39:06 +04:00
}
props - > max_mtu = IB_MTU_4096 ;
props - > active_mtu = iboe_get_mtu ( netdev - > mtu ) ;
props - > lid = 0 ;
props - > lmc = 0 ;
props - > sm_lid = 0 ;
props - > sm_sl = 0 ;
props - > state = port_state ;
2018-07-04 15:57:48 +03:00
props - > port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP ;
props - > ip_gids = true ;
2012-03-21 02:39:06 +04:00
props - > gid_tbl_len = OCRDMA_MAX_SGID ;
props - > pkey_tbl_len = 1 ;
props - > bad_pkey_cntr = 0 ;
props - > qkey_viol_cntr = 0 ;
2013-08-26 13:57:47 +04:00
get_link_speed_and_width ( dev , & props - > active_speed ,
& props - > active_width ) ;
2012-03-21 02:39:06 +04:00
props - > max_msg_sz = 0x80000000 ;
props - > max_vl_num = 4 ;
return 0 ;
}
static int ocrdma_add_mmap ( struct ocrdma_ucontext * uctx , u64 phy_addr ,
unsigned long len )
{
struct ocrdma_mm * mm ;
mm = kzalloc ( sizeof ( * mm ) , GFP_KERNEL ) ;
if ( mm = = NULL )
return - ENOMEM ;
mm - > key . phy_addr = phy_addr ;
mm - > key . len = len ;
INIT_LIST_HEAD ( & mm - > entry ) ;
mutex_lock ( & uctx - > mm_list_lock ) ;
list_add_tail ( & mm - > entry , & uctx - > mm_head ) ;
mutex_unlock ( & uctx - > mm_list_lock ) ;
return 0 ;
}
static void ocrdma_del_mmap ( struct ocrdma_ucontext * uctx , u64 phy_addr ,
unsigned long len )
{
struct ocrdma_mm * mm , * tmp ;
mutex_lock ( & uctx - > mm_list_lock ) ;
list_for_each_entry_safe ( mm , tmp , & uctx - > mm_head , entry ) {
2013-08-26 13:57:38 +04:00
if ( len ! = mm - > key . len & & phy_addr ! = mm - > key . phy_addr )
2012-03-21 02:39:06 +04:00
continue ;
list_del ( & mm - > entry ) ;
kfree ( mm ) ;
break ;
}
mutex_unlock ( & uctx - > mm_list_lock ) ;
}
static bool ocrdma_search_mmap ( struct ocrdma_ucontext * uctx , u64 phy_addr ,
unsigned long len )
{
bool found = false ;
struct ocrdma_mm * mm ;
mutex_lock ( & uctx - > mm_list_lock ) ;
list_for_each_entry ( mm , & uctx - > mm_head , entry ) {
2013-08-26 13:57:38 +04:00
if ( len ! = mm - > key . len & & phy_addr ! = mm - > key . phy_addr )
2012-03-21 02:39:06 +04:00
continue ;
found = true ;
break ;
}
mutex_unlock ( & uctx - > mm_list_lock ) ;
return found ;
}
2014-12-18 11:42:57 +03:00
static u16 _ocrdma_pd_mgr_get_bitmap ( struct ocrdma_dev * dev , bool dpp_pool )
{
u16 pd_bitmap_idx = 0 ;
const unsigned long * pd_bitmap ;
if ( dpp_pool ) {
pd_bitmap = dev - > pd_mgr - > pd_dpp_bitmap ;
pd_bitmap_idx = find_first_zero_bit ( pd_bitmap ,
dev - > pd_mgr - > max_dpp_pd ) ;
__set_bit ( pd_bitmap_idx , dev - > pd_mgr - > pd_dpp_bitmap ) ;
dev - > pd_mgr - > pd_dpp_count + + ;
if ( dev - > pd_mgr - > pd_dpp_count > dev - > pd_mgr - > pd_dpp_thrsh )
dev - > pd_mgr - > pd_dpp_thrsh = dev - > pd_mgr - > pd_dpp_count ;
} else {
pd_bitmap = dev - > pd_mgr - > pd_norm_bitmap ;
pd_bitmap_idx = find_first_zero_bit ( pd_bitmap ,
dev - > pd_mgr - > max_normal_pd ) ;
__set_bit ( pd_bitmap_idx , dev - > pd_mgr - > pd_norm_bitmap ) ;
dev - > pd_mgr - > pd_norm_count + + ;
if ( dev - > pd_mgr - > pd_norm_count > dev - > pd_mgr - > pd_norm_thrsh )
dev - > pd_mgr - > pd_norm_thrsh = dev - > pd_mgr - > pd_norm_count ;
}
return pd_bitmap_idx ;
}
static int _ocrdma_pd_mgr_put_bitmap ( struct ocrdma_dev * dev , u16 pd_id ,
bool dpp_pool )
{
u16 pd_count ;
u16 pd_bit_index ;
pd_count = dpp_pool ? dev - > pd_mgr - > pd_dpp_count :
dev - > pd_mgr - > pd_norm_count ;
if ( pd_count = = 0 )
return - EINVAL ;
if ( dpp_pool ) {
pd_bit_index = pd_id - dev - > pd_mgr - > pd_dpp_start ;
if ( pd_bit_index > = dev - > pd_mgr - > max_dpp_pd ) {
return - EINVAL ;
} else {
__clear_bit ( pd_bit_index , dev - > pd_mgr - > pd_dpp_bitmap ) ;
dev - > pd_mgr - > pd_dpp_count - - ;
}
} else {
pd_bit_index = pd_id - dev - > pd_mgr - > pd_norm_start ;
if ( pd_bit_index > = dev - > pd_mgr - > max_normal_pd ) {
return - EINVAL ;
} else {
__clear_bit ( pd_bit_index , dev - > pd_mgr - > pd_norm_bitmap ) ;
dev - > pd_mgr - > pd_norm_count - - ;
}
}
return 0 ;
}
2017-02-23 13:40:16 +03:00
static int ocrdma_put_pd_num ( struct ocrdma_dev * dev , u16 pd_id ,
2014-12-18 11:42:57 +03:00
bool dpp_pool )
{
int status ;
mutex_lock ( & dev - > dev_lock ) ;
status = _ocrdma_pd_mgr_put_bitmap ( dev , pd_id , dpp_pool ) ;
mutex_unlock ( & dev - > dev_lock ) ;
return status ;
}
static int ocrdma_get_pd_num ( struct ocrdma_dev * dev , struct ocrdma_pd * pd )
{
u16 pd_idx = 0 ;
int status = 0 ;
mutex_lock ( & dev - > dev_lock ) ;
if ( pd - > dpp_enabled ) {
/* try allocating DPP PD, if not available then normal PD */
if ( dev - > pd_mgr - > pd_dpp_count < dev - > pd_mgr - > max_dpp_pd ) {
pd_idx = _ocrdma_pd_mgr_get_bitmap ( dev , true ) ;
pd - > id = dev - > pd_mgr - > pd_dpp_start + pd_idx ;
pd - > dpp_page = dev - > pd_mgr - > dpp_page_index + pd_idx ;
} else if ( dev - > pd_mgr - > pd_norm_count <
dev - > pd_mgr - > max_normal_pd ) {
pd_idx = _ocrdma_pd_mgr_get_bitmap ( dev , false ) ;
pd - > id = dev - > pd_mgr - > pd_norm_start + pd_idx ;
pd - > dpp_enabled = false ;
} else {
status = - EINVAL ;
}
} else {
if ( dev - > pd_mgr - > pd_norm_count < dev - > pd_mgr - > max_normal_pd ) {
pd_idx = _ocrdma_pd_mgr_get_bitmap ( dev , false ) ;
pd - > id = dev - > pd_mgr - > pd_norm_start + pd_idx ;
} else {
status = - EINVAL ;
}
}
mutex_unlock ( & dev - > dev_lock ) ;
return status ;
}
2019-03-31 19:10:07 +03:00
/*
* NOTE :
*
* ocrdma_ucontext must be used here because this function is also
* called from ocrdma_alloc_ucontext where ib_udata does not have
* valid ib_ucontext pointer . ib_uverbs_get_context does not call
* uobj_ { alloc | get_xxx } helpers which are used to store the
* ib_ucontext in uverbs_attr_bundle wrapping the ib_udata . so
* ib_udata does NOT imply valid ib_ucontext here !
*/
2019-02-03 15:55:51 +03:00
static int _ocrdma_alloc_pd ( struct ocrdma_dev * dev , struct ocrdma_pd * pd ,
struct ocrdma_ucontext * uctx ,
struct ib_udata * udata )
2013-08-26 13:57:44 +04:00
{
2015-12-26 20:18:18 +03:00
int status ;
2013-08-26 13:57:44 +04:00
2015-05-19 09:02:37 +03:00
if ( udata & & uctx & & dev - > attr . max_dpp_pds ) {
2013-08-26 13:57:44 +04:00
pd - > dpp_enabled =
2014-02-04 10:26:56 +04:00
ocrdma_get_asic_type ( dev ) = = OCRDMA_ASIC_GEN_SKH_R ;
2013-08-26 13:57:44 +04:00
pd - > num_dpp_qp =
2014-06-10 18:02:17 +04:00
pd - > dpp_enabled ? ( dev - > nic_info . db_page_size /
dev - > attr . wqe_size ) : 0 ;
2013-08-26 13:57:44 +04:00
}
2019-02-03 15:55:51 +03:00
if ( dev - > pd_mgr - > pd_prealloc_valid )
return ocrdma_get_pd_num ( dev , pd ) ;
2014-12-18 11:42:57 +03:00
2013-08-26 13:57:44 +04:00
retry :
status = ocrdma_mbx_alloc_pd ( dev , pd ) ;
if ( status ) {
if ( pd - > dpp_enabled ) {
pd - > dpp_enabled = false ;
pd - > num_dpp_qp = 0 ;
goto retry ;
}
2019-02-03 15:55:51 +03:00
return status ;
2013-08-26 13:57:44 +04:00
}
2019-02-03 15:55:51 +03:00
return 0 ;
2013-08-26 13:57:44 +04:00
}
static inline int is_ucontext_pd ( struct ocrdma_ucontext * uctx ,
struct ocrdma_pd * pd )
{
2017-12-05 22:15:53 +03:00
return ( uctx - > cntxt_pd = = pd ) ;
2013-08-26 13:57:44 +04:00
}
2019-02-03 15:55:51 +03:00
static void _ocrdma_dealloc_pd ( struct ocrdma_dev * dev ,
2013-08-26 13:57:44 +04:00
struct ocrdma_pd * pd )
{
2014-12-18 11:42:57 +03:00
if ( dev - > pd_mgr - > pd_prealloc_valid )
2019-02-03 15:55:51 +03:00
ocrdma_put_pd_num ( dev , pd - > id , pd - > dpp_enabled ) ;
2014-12-18 11:42:57 +03:00
else
2019-02-03 15:55:51 +03:00
ocrdma_mbx_dealloc_pd ( dev , pd ) ;
2013-08-26 13:57:44 +04:00
}
static int ocrdma_alloc_ucontext_pd ( struct ocrdma_dev * dev ,
struct ocrdma_ucontext * uctx ,
struct ib_udata * udata )
{
2019-02-03 15:55:51 +03:00
struct ib_device * ibdev = & dev - > ibdev ;
struct ib_pd * pd ;
int status ;
pd = rdma_zalloc_drv_obj ( ibdev , ib_pd ) ;
if ( ! pd )
return - ENOMEM ;
2013-08-26 13:57:44 +04:00
2019-02-03 15:55:51 +03:00
pd - > device = ibdev ;
uctx - > cntxt_pd = get_ocrdma_pd ( pd ) ;
status = _ocrdma_alloc_pd ( dev , uctx - > cntxt_pd , uctx , udata ) ;
if ( status ) {
kfree ( uctx - > cntxt_pd ) ;
2013-08-26 13:57:44 +04:00
goto err ;
}
uctx - > cntxt_pd - > uctx = uctx ;
uctx - > cntxt_pd - > ibpd . device = & dev - > ibdev ;
err :
return status ;
}
2019-02-12 21:39:16 +03:00
static void ocrdma_dealloc_ucontext_pd ( struct ocrdma_ucontext * uctx )
2013-08-26 13:57:44 +04:00
{
struct ocrdma_pd * pd = uctx - > cntxt_pd ;
struct ocrdma_dev * dev = get_ocrdma_dev ( pd - > ibpd . device ) ;
2014-06-10 18:02:21 +04:00
if ( uctx - > pd_in_use ) {
pr_err ( " %s(%d) Freeing in use pdid=0x%x. \n " ,
__func__ , dev - > id , pd - > id ) ;
}
2019-02-03 15:55:51 +03:00
kfree ( uctx - > cntxt_pd ) ;
2013-08-26 13:57:44 +04:00
uctx - > cntxt_pd = NULL ;
2019-02-12 21:39:16 +03:00
_ocrdma_dealloc_pd ( dev , pd ) ;
2013-08-26 13:57:44 +04:00
}
static struct ocrdma_pd * ocrdma_get_ucontext_pd ( struct ocrdma_ucontext * uctx )
{
struct ocrdma_pd * pd = NULL ;
mutex_lock ( & uctx - > mm_list_lock ) ;
if ( ! uctx - > pd_in_use ) {
uctx - > pd_in_use = true ;
pd = uctx - > cntxt_pd ;
}
mutex_unlock ( & uctx - > mm_list_lock ) ;
return pd ;
}
static void ocrdma_release_ucontext_pd ( struct ocrdma_ucontext * uctx )
{
mutex_lock ( & uctx - > mm_list_lock ) ;
uctx - > pd_in_use = false ;
mutex_unlock ( & uctx - > mm_list_lock ) ;
}
2019-02-12 21:39:16 +03:00
int ocrdma_alloc_ucontext ( struct ib_ucontext * uctx , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
2019-02-12 21:39:16 +03:00
struct ib_device * ibdev = uctx - > device ;
2012-03-21 02:39:06 +04:00
int status ;
2019-02-12 21:39:16 +03:00
struct ocrdma_ucontext * ctx = get_ocrdma_ucontext ( uctx ) ;
struct ocrdma_alloc_ucontext_resp resp = { } ;
2012-03-21 02:39:06 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibdev ) ;
struct pci_dev * pdev = dev - > nic_info . pdev ;
u32 map_len = roundup ( sizeof ( u32 ) * 2048 , PAGE_SIZE ) ;
if ( ! udata )
2019-02-12 21:39:16 +03:00
return - EFAULT ;
2012-03-21 02:39:06 +04:00
INIT_LIST_HEAD ( & ctx - > mm_head ) ;
mutex_init ( & ctx - > mm_list_lock ) ;
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 11:23:09 +03:00
ctx - > ah_tbl . va = dma_alloc_coherent ( & pdev - > dev , map_len ,
& ctx - > ah_tbl . pa , GFP_KERNEL ) ;
2019-02-12 21:39:16 +03:00
if ( ! ctx - > ah_tbl . va )
return - ENOMEM ;
2012-03-21 02:39:06 +04:00
ctx - > ah_tbl . len = map_len ;
resp . ah_tbl_len = ctx - > ah_tbl . len ;
2014-09-05 18:05:40 +04:00
resp . ah_tbl_page = virt_to_phys ( ctx - > ah_tbl . va ) ;
2012-03-21 02:39:06 +04:00
status = ocrdma_add_mmap ( ctx , resp . ah_tbl_page , resp . ah_tbl_len ) ;
if ( status )
goto map_err ;
2013-08-26 13:57:44 +04:00
status = ocrdma_alloc_ucontext_pd ( dev , ctx , udata ) ;
if ( status )
goto pd_err ;
2012-03-21 02:39:06 +04:00
resp . dev_id = dev - > id ;
resp . max_inline_data = dev - > attr . max_inline_data ;
resp . wqe_size = dev - > attr . wqe_size ;
resp . rqe_size = dev - > attr . rqe_size ;
resp . dpp_wqe_size = dev - > attr . wqe_size ;
memcpy ( resp . fw_ver , dev - > attr . fw_ver , sizeof ( resp . fw_ver ) ) ;
status = ib_copy_to_udata ( udata , & resp , sizeof ( resp ) ) ;
if ( status )
goto cpy_err ;
2019-02-12 21:39:16 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
cpy_err :
2019-02-03 15:55:51 +03:00
ocrdma_dealloc_ucontext_pd ( ctx ) ;
2013-08-26 13:57:44 +04:00
pd_err :
2012-03-21 02:39:06 +04:00
ocrdma_del_mmap ( ctx , ctx - > ah_tbl . pa , ctx - > ah_tbl . len ) ;
map_err :
dma_free_coherent ( & pdev - > dev , ctx - > ah_tbl . len , ctx - > ah_tbl . va ,
ctx - > ah_tbl . pa ) ;
2019-02-12 21:39:16 +03:00
return status ;
2012-03-21 02:39:06 +04:00
}
2019-02-12 21:39:16 +03:00
void ocrdma_dealloc_ucontext ( struct ib_ucontext * ibctx )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_mm * mm , * tmp ;
struct ocrdma_ucontext * uctx = get_ocrdma_ucontext ( ibctx ) ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibctx - > device ) ;
struct pci_dev * pdev = dev - > nic_info . pdev ;
2012-03-21 02:39:06 +04:00
2019-02-12 21:39:16 +03:00
ocrdma_dealloc_ucontext_pd ( uctx ) ;
2013-08-26 13:57:44 +04:00
2012-03-21 02:39:06 +04:00
ocrdma_del_mmap ( uctx , uctx - > ah_tbl . pa , uctx - > ah_tbl . len ) ;
dma_free_coherent ( & pdev - > dev , uctx - > ah_tbl . len , uctx - > ah_tbl . va ,
uctx - > ah_tbl . pa ) ;
list_for_each_entry_safe ( mm , tmp , & uctx - > mm_head , entry ) {
list_del ( & mm - > entry ) ;
kfree ( mm ) ;
}
}
int ocrdma_mmap ( struct ib_ucontext * context , struct vm_area_struct * vma )
{
struct ocrdma_ucontext * ucontext = get_ocrdma_ucontext ( context ) ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( context - > device ) ;
2012-03-21 02:39:06 +04:00
unsigned long vm_page = vma - > vm_pgoff < < PAGE_SHIFT ;
u64 unmapped_db = ( u64 ) dev - > nic_info . unmapped_db ;
unsigned long len = ( vma - > vm_end - vma - > vm_start ) ;
2015-12-26 20:18:18 +03:00
int status ;
2012-03-21 02:39:06 +04:00
bool found ;
if ( vma - > vm_start & ( PAGE_SIZE - 1 ) )
return - EINVAL ;
found = ocrdma_search_mmap ( ucontext , vma - > vm_pgoff < < PAGE_SHIFT , len ) ;
if ( ! found )
return - EINVAL ;
if ( ( vm_page > = unmapped_db ) & & ( vm_page < = ( unmapped_db +
dev - > nic_info . db_total_size ) ) & &
( len < = dev - > nic_info . db_page_size ) ) {
2013-08-26 13:57:38 +04:00
if ( vma - > vm_flags & VM_READ )
return - EPERM ;
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2012-03-21 02:39:06 +04:00
status = io_remap_pfn_range ( vma , vma - > vm_start , vma - > vm_pgoff ,
len , vma - > vm_page_prot ) ;
} else if ( dev - > nic_info . dpp_unmapped_len & &
( vm_page > = ( u64 ) dev - > nic_info . dpp_unmapped_addr ) & &
( vm_page < = ( u64 ) ( dev - > nic_info . dpp_unmapped_addr +
dev - > nic_info . dpp_unmapped_len ) ) & &
( len < = dev - > nic_info . dpp_unmapped_len ) ) {
2013-08-26 13:57:38 +04:00
if ( vma - > vm_flags & VM_READ )
return - EPERM ;
2012-03-21 02:39:06 +04:00
vma - > vm_page_prot = pgprot_writecombine ( vma - > vm_page_prot ) ;
status = io_remap_pfn_range ( vma , vma - > vm_start , vma - > vm_pgoff ,
len , vma - > vm_page_prot ) ;
} else {
status = remap_pfn_range ( vma , vma - > vm_start ,
vma - > vm_pgoff , len , vma - > vm_page_prot ) ;
}
return status ;
}
2013-08-07 11:22:37 +04:00
static int ocrdma_copy_pd_uresp ( struct ocrdma_dev * dev , struct ocrdma_pd * pd ,
2012-03-21 02:39:06 +04:00
struct ib_udata * udata )
{
int status ;
u64 db_page_addr ;
2012-04-16 22:32:17 +04:00
u64 dpp_page_addr = 0 ;
2012-03-21 02:39:06 +04:00
u32 db_page_size ;
struct ocrdma_alloc_pd_uresp rsp ;
2019-03-31 19:10:07 +03:00
struct ocrdma_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct ocrdma_ucontext , ibucontext ) ;
2012-03-21 02:39:06 +04:00
2013-07-29 23:34:29 +04:00
memset ( & rsp , 0 , sizeof ( rsp ) ) ;
2012-03-21 02:39:06 +04:00
rsp . id = pd - > id ;
rsp . dpp_enabled = pd - > dpp_enabled ;
2013-08-26 13:57:44 +04:00
db_page_addr = ocrdma_get_db_addr ( dev , pd - > id ) ;
2013-08-07 11:22:32 +04:00
db_page_size = dev - > nic_info . db_page_size ;
2012-03-21 02:39:06 +04:00
status = ocrdma_add_mmap ( uctx , db_page_addr , db_page_size ) ;
if ( status )
return status ;
if ( pd - > dpp_enabled ) {
2013-08-07 11:22:32 +04:00
dpp_page_addr = dev - > nic_info . dpp_unmapped_addr +
2013-08-26 13:57:38 +04:00
( pd - > id * PAGE_SIZE ) ;
2012-03-21 02:39:06 +04:00
status = ocrdma_add_mmap ( uctx , dpp_page_addr ,
2013-08-26 13:57:38 +04:00
PAGE_SIZE ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto dpp_map_err ;
rsp . dpp_page_addr_hi = upper_32_bits ( dpp_page_addr ) ;
rsp . dpp_page_addr_lo = dpp_page_addr ;
}
status = ib_copy_to_udata ( udata , & rsp , sizeof ( rsp ) ) ;
if ( status )
goto ucopy_err ;
pd - > uctx = uctx ;
return 0 ;
ucopy_err :
2012-04-16 22:32:17 +04:00
if ( pd - > dpp_enabled )
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( pd - > uctx , dpp_page_addr , PAGE_SIZE ) ;
2012-03-21 02:39:06 +04:00
dpp_map_err :
ocrdma_del_mmap ( pd - > uctx , db_page_addr , db_page_size ) ;
return status ;
}
2019-03-31 19:10:07 +03:00
int ocrdma_alloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
2019-02-03 15:55:51 +03:00
struct ib_device * ibdev = ibpd - > device ;
2012-03-21 02:39:06 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibdev ) ;
struct ocrdma_pd * pd ;
int status ;
2013-08-26 13:57:44 +04:00
u8 is_uctx_pd = false ;
2019-03-31 19:10:07 +03:00
struct ocrdma_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct ocrdma_ucontext , ibucontext ) ;
2012-03-21 02:39:06 +04:00
2019-03-31 19:10:07 +03:00
if ( udata ) {
2013-08-26 13:57:44 +04:00
pd = ocrdma_get_ucontext_pd ( uctx ) ;
if ( pd ) {
is_uctx_pd = true ;
goto pd_mapping ;
2013-08-26 13:57:38 +04:00
}
2012-03-21 02:39:06 +04:00
}
2019-02-03 15:55:51 +03:00
pd = get_ocrdma_pd ( ibpd ) ;
status = _ocrdma_alloc_pd ( dev , pd , uctx , udata ) ;
if ( status )
2013-08-26 13:57:44 +04:00
goto exit ;
pd_mapping :
2019-03-31 19:10:07 +03:00
if ( udata ) {
status = ocrdma_copy_pd_uresp ( dev , pd , udata ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto err ;
}
2019-02-03 15:55:51 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
err :
2019-02-03 15:55:51 +03:00
if ( is_uctx_pd )
2013-08-26 13:57:44 +04:00
ocrdma_release_ucontext_pd ( uctx ) ;
2019-02-03 15:55:51 +03:00
else
_ocrdma_dealloc_pd ( dev , pd ) ;
2013-08-26 13:57:44 +04:00
exit :
2019-02-03 15:55:51 +03:00
return status ;
2012-03-21 02:39:06 +04:00
}
2019-03-31 19:10:05 +03:00
void ocrdma_dealloc_pd ( struct ib_pd * ibpd , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_pd * pd = get_ocrdma_pd ( ibpd ) ;
2013-08-07 11:22:32 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibpd - > device ) ;
2013-08-26 13:57:44 +04:00
struct ocrdma_ucontext * uctx = NULL ;
2012-03-21 02:39:06 +04:00
u64 usr_db ;
2013-08-26 13:57:44 +04:00
uctx = pd - > uctx ;
if ( uctx ) {
2012-03-21 02:39:06 +04:00
u64 dpp_db = dev - > nic_info . dpp_unmapped_addr +
2013-08-26 13:57:44 +04:00
( pd - > id * PAGE_SIZE ) ;
2012-03-21 02:39:06 +04:00
if ( pd - > dpp_enabled )
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( pd - > uctx , dpp_db , PAGE_SIZE ) ;
2013-08-26 13:57:44 +04:00
usr_db = ocrdma_get_db_addr ( dev , pd - > id ) ;
2012-03-21 02:39:06 +04:00
ocrdma_del_mmap ( pd - > uctx , usr_db , dev - > nic_info . db_page_size ) ;
2013-08-26 13:57:44 +04:00
if ( is_ucontext_pd ( uctx , pd ) ) {
ocrdma_release_ucontext_pd ( uctx ) ;
2019-02-03 15:55:51 +03:00
return ;
2013-08-26 13:57:44 +04:00
}
2012-03-21 02:39:06 +04:00
}
2019-02-03 15:55:51 +03:00
_ocrdma_dealloc_pd ( dev , pd ) ;
2012-03-21 02:39:06 +04:00
}
2013-08-07 11:22:33 +04:00
static int ocrdma_alloc_lkey ( struct ocrdma_dev * dev , struct ocrdma_mr * mr ,
u32 pdid , int acc , u32 num_pbls , u32 addr_check )
2012-03-21 02:39:06 +04:00
{
int status ;
mr - > hwmr . fr_mr = 0 ;
mr - > hwmr . local_rd = 1 ;
mr - > hwmr . remote_rd = ( acc & IB_ACCESS_REMOTE_READ ) ? 1 : 0 ;
mr - > hwmr . remote_wr = ( acc & IB_ACCESS_REMOTE_WRITE ) ? 1 : 0 ;
mr - > hwmr . local_wr = ( acc & IB_ACCESS_LOCAL_WRITE ) ? 1 : 0 ;
mr - > hwmr . mw_bind = ( acc & IB_ACCESS_MW_BIND ) ? 1 : 0 ;
mr - > hwmr . remote_atomic = ( acc & IB_ACCESS_REMOTE_ATOMIC ) ? 1 : 0 ;
mr - > hwmr . num_pbls = num_pbls ;
2013-08-07 11:22:32 +04:00
status = ocrdma_mbx_alloc_lkey ( dev , & mr - > hwmr , pdid , addr_check ) ;
if ( status )
return status ;
2012-03-21 02:39:06 +04:00
mr - > ibmr . lkey = mr - > hwmr . lkey ;
if ( mr - > hwmr . remote_wr | | mr - > hwmr . remote_rd )
mr - > ibmr . rkey = mr - > hwmr . lkey ;
2013-08-07 11:22:32 +04:00
return 0 ;
2012-03-21 02:39:06 +04:00
}
struct ib_mr * ocrdma_get_dma_mr ( struct ib_pd * ibpd , int acc )
{
2013-08-07 11:22:32 +04:00
int status ;
2012-03-21 02:39:06 +04:00
struct ocrdma_mr * mr ;
2013-08-07 11:22:32 +04:00
struct ocrdma_pd * pd = get_ocrdma_pd ( ibpd ) ;
struct ocrdma_dev * dev = get_ocrdma_dev ( ibpd - > device ) ;
if ( acc & IB_ACCESS_REMOTE_WRITE & & ! ( acc & IB_ACCESS_LOCAL_WRITE ) ) {
pr_err ( " %s err, invalid access rights \n " , __func__ ) ;
return ERR_PTR ( - EINVAL ) ;
}
2012-03-21 02:39:06 +04:00
2013-08-07 11:22:32 +04:00
mr = kzalloc ( sizeof ( * mr ) , GFP_KERNEL ) ;
if ( ! mr )
return ERR_PTR ( - ENOMEM ) ;
2013-08-07 11:22:33 +04:00
status = ocrdma_alloc_lkey ( dev , mr , pd - > id , acc , 0 ,
2013-08-07 11:22:32 +04:00
OCRDMA_ADDR_CHECK_DISABLE ) ;
if ( status ) {
kfree ( mr ) ;
return ERR_PTR ( status ) ;
}
2012-03-21 02:39:06 +04:00
return & mr - > ibmr ;
}
static void ocrdma_free_mr_pbl_tbl ( struct ocrdma_dev * dev ,
struct ocrdma_hw_mr * mr )
{
struct pci_dev * pdev = dev - > nic_info . pdev ;
int i = 0 ;
if ( mr - > pbl_table ) {
for ( i = 0 ; i < mr - > num_pbls ; i + + ) {
if ( ! mr - > pbl_table [ i ] . va )
continue ;
dma_free_coherent ( & pdev - > dev , mr - > pbl_size ,
mr - > pbl_table [ i ] . va ,
mr - > pbl_table [ i ] . pa ) ;
}
kfree ( mr - > pbl_table ) ;
mr - > pbl_table = NULL ;
}
}
2013-08-07 11:22:33 +04:00
static int ocrdma_get_pbl_info ( struct ocrdma_dev * dev , struct ocrdma_mr * mr ,
u32 num_pbes )
2012-03-21 02:39:06 +04:00
{
u32 num_pbls = 0 ;
u32 idx = 0 ;
int status = 0 ;
u32 pbl_size ;
do {
pbl_size = OCRDMA_MIN_HPAGE_SIZE * ( 1 < < idx ) ;
if ( pbl_size > MAX_OCRDMA_PBL_SIZE ) {
status = - EFAULT ;
break ;
}
num_pbls = roundup ( num_pbes , ( pbl_size / sizeof ( u64 ) ) ) ;
num_pbls = num_pbls / ( pbl_size / sizeof ( u64 ) ) ;
idx + + ;
2013-08-07 11:22:33 +04:00
} while ( num_pbls > = dev - > attr . max_num_mr_pbl ) ;
2012-03-21 02:39:06 +04:00
mr - > hwmr . num_pbes = num_pbes ;
mr - > hwmr . num_pbls = num_pbls ;
mr - > hwmr . pbl_size = pbl_size ;
return status ;
}
static int ocrdma_build_pbl_tbl ( struct ocrdma_dev * dev , struct ocrdma_hw_mr * mr )
{
int status = 0 ;
int i ;
u32 dma_len = mr - > pbl_size ;
struct pci_dev * pdev = dev - > nic_info . pdev ;
void * va ;
dma_addr_t pa ;
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
mr - > pbl_table = kcalloc ( mr - > num_pbls , sizeof ( struct ocrdma_pbl ) ,
GFP_KERNEL ) ;
2012-03-21 02:39:06 +04:00
if ( ! mr - > pbl_table )
return - ENOMEM ;
for ( i = 0 ; i < mr - > num_pbls ; i + + ) {
cross-tree: phase out dma_zalloc_coherent()
We already need to zero out memory for dma_alloc_coherent(), as such
using dma_zalloc_coherent() is superflous. Phase it out.
This change was generated with the following Coccinelle SmPL patch:
@ replace_dma_zalloc_coherent @
expression dev, size, data, handle, flags;
@@
-dma_zalloc_coherent(dev, size, handle, flags)
+dma_alloc_coherent(dev, size, handle, flags)
Suggested-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Luis Chamberlain <mcgrof@kernel.org>
[hch: re-ran the script on the latest tree]
Signed-off-by: Christoph Hellwig <hch@lst.de>
2019-01-04 11:23:09 +03:00
va = dma_alloc_coherent ( & pdev - > dev , dma_len , & pa , GFP_KERNEL ) ;
2012-03-21 02:39:06 +04:00
if ( ! va ) {
ocrdma_free_mr_pbl_tbl ( dev , mr ) ;
status = - ENOMEM ;
break ;
}
mr - > pbl_table [ i ] . va = va ;
mr - > pbl_table [ i ] . pa = pa ;
}
return status ;
}
static void build_user_pbes ( struct ocrdma_dev * dev , struct ocrdma_mr * mr ,
u32 num_pbes )
{
struct ocrdma_pbe * pbe ;
2019-02-11 18:25:05 +03:00
struct sg_dma_page_iter sg_iter ;
2012-03-21 02:39:06 +04:00
struct ocrdma_pbl * pbl_tbl = mr - > hwmr . pbl_table ;
struct ib_umem * umem = mr - > umem ;
2019-02-11 18:25:05 +03:00
int pbe_cnt , total_num_pbes = 0 ;
u64 pg_addr ;
2012-03-21 02:39:06 +04:00
if ( ! mr - > hwmr . num_pbes )
return ;
pbe = ( struct ocrdma_pbe * ) pbl_tbl - > va ;
pbe_cnt = 0 ;
2019-02-11 18:25:05 +03:00
for_each_sg_dma_page ( umem - > sg_head . sgl , & sg_iter , umem - > nmap , 0 ) {
/* store the page address in pbe */
pg_addr = sg_page_iter_dma_address ( & sg_iter ) ;
pbe - > pa_lo = cpu_to_le32 ( pg_addr ) ;
pbe - > pa_hi = cpu_to_le32 ( upper_32_bits ( pg_addr ) ) ;
pbe_cnt + = 1 ;
total_num_pbes + = 1 ;
pbe + + ;
/* if done building pbes, issue the mbx cmd. */
if ( total_num_pbes = = num_pbes )
return ;
2014-01-28 15:40:15 +04:00
2019-02-11 18:25:05 +03:00
/* if the given pbl is full storing the pbes,
* move to next pbl .
*/
if ( pbe_cnt = = ( mr - > hwmr . pbl_size / sizeof ( u64 ) ) ) {
pbl_tbl + + ;
pbe = ( struct ocrdma_pbe * ) pbl_tbl - > va ;
pbe_cnt = 0 ;
2012-03-21 02:39:06 +04:00
}
}
}
struct ib_mr * ocrdma_reg_user_mr ( struct ib_pd * ibpd , u64 start , u64 len ,
u64 usr_addr , int acc , struct ib_udata * udata )
{
int status = - ENOMEM ;
2013-08-07 11:22:32 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibpd - > device ) ;
2012-03-21 02:39:06 +04:00
struct ocrdma_mr * mr ;
struct ocrdma_pd * pd ;
u32 num_pbes ;
pd = get_ocrdma_pd ( ibpd ) ;
if ( acc & IB_ACCESS_REMOTE_WRITE & & ! ( acc & IB_ACCESS_LOCAL_WRITE ) )
return ERR_PTR ( - EINVAL ) ;
mr = kzalloc ( sizeof ( * mr ) , GFP_KERNEL ) ;
if ( ! mr )
return ERR_PTR ( status ) ;
2020-01-15 15:43:31 +03:00
mr - > umem = ib_umem_get ( ibpd - > device , start , len , acc ) ;
2012-03-21 02:39:06 +04:00
if ( IS_ERR ( mr - > umem ) ) {
status = - EFAULT ;
goto umem_err ;
}
num_pbes = ib_umem_page_count ( mr - > umem ) ;
2013-08-07 11:22:33 +04:00
status = ocrdma_get_pbl_info ( dev , mr , num_pbes ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto umem_err ;
2019-02-11 18:25:05 +03:00
mr - > hwmr . pbe_size = PAGE_SIZE ;
2014-12-11 18:04:12 +03:00
mr - > hwmr . fbo = ib_umem_offset ( mr - > umem ) ;
2012-03-21 02:39:06 +04:00
mr - > hwmr . va = usr_addr ;
mr - > hwmr . len = len ;
mr - > hwmr . remote_wr = ( acc & IB_ACCESS_REMOTE_WRITE ) ? 1 : 0 ;
mr - > hwmr . remote_rd = ( acc & IB_ACCESS_REMOTE_READ ) ? 1 : 0 ;
mr - > hwmr . local_wr = ( acc & IB_ACCESS_LOCAL_WRITE ) ? 1 : 0 ;
mr - > hwmr . local_rd = 1 ;
mr - > hwmr . remote_atomic = ( acc & IB_ACCESS_REMOTE_ATOMIC ) ? 1 : 0 ;
status = ocrdma_build_pbl_tbl ( dev , & mr - > hwmr ) ;
if ( status )
goto umem_err ;
build_user_pbes ( dev , mr , num_pbes ) ;
status = ocrdma_reg_mr ( dev , & mr - > hwmr , pd - > id , acc ) ;
if ( status )
goto mbx_err ;
mr - > ibmr . lkey = mr - > hwmr . lkey ;
if ( mr - > hwmr . remote_wr | | mr - > hwmr . remote_rd )
mr - > ibmr . rkey = mr - > hwmr . lkey ;
return & mr - > ibmr ;
mbx_err :
ocrdma_free_mr_pbl_tbl ( dev , & mr - > hwmr ) ;
umem_err :
kfree ( mr ) ;
return ERR_PTR ( status ) ;
}
2019-03-31 19:10:05 +03:00
int ocrdma_dereg_mr ( struct ib_mr * ib_mr , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_mr * mr = get_ocrdma_mr ( ib_mr ) ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ib_mr - > device ) ;
2012-03-21 02:39:06 +04:00
2014-12-18 11:43:01 +03:00
( void ) ocrdma_mbx_dealloc_lkey ( dev , mr - > hwmr . fr_mr , mr - > hwmr . lkey ) ;
2012-03-21 02:39:06 +04:00
2015-10-13 19:11:28 +03:00
kfree ( mr - > pages ) ;
2014-02-04 10:27:02 +04:00
ocrdma_free_mr_pbl_tbl ( dev , & mr - > hwmr ) ;
2012-03-21 02:39:06 +04:00
/* it could be user registered memory. */
2019-06-16 15:05:20 +03:00
ib_umem_release ( mr - > umem ) ;
2012-03-21 02:39:06 +04:00
kfree ( mr ) ;
2014-06-10 18:02:21 +04:00
/* Don't stop cleanup, in case FW is unresponsive */
if ( dev - > mqe_ctx . fw_error_state ) {
pr_err ( " %s(%d) fw not responding. \n " ,
__func__ , dev - > id ) ;
}
2014-12-18 11:43:01 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
}
2013-08-07 11:22:33 +04:00
static int ocrdma_copy_cq_uresp ( struct ocrdma_dev * dev , struct ocrdma_cq * cq ,
2019-03-31 19:10:07 +03:00
struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
int status ;
2019-03-31 19:10:07 +03:00
struct ocrdma_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct ocrdma_ucontext , ibucontext ) ;
2012-03-21 02:39:06 +04:00
struct ocrdma_create_cq_uresp uresp ;
2019-03-31 19:10:07 +03:00
/* this must be user flow! */
if ( ! udata )
return - EINVAL ;
2013-07-29 23:34:29 +04:00
memset ( & uresp , 0 , sizeof ( uresp ) ) ;
2012-03-21 02:39:06 +04:00
uresp . cq_id = cq - > id ;
2013-08-26 13:57:38 +04:00
uresp . page_size = PAGE_ALIGN ( cq - > len ) ;
2012-03-21 02:39:06 +04:00
uresp . num_pages = 1 ;
uresp . max_hw_cqe = cq - > max_hw_cqe ;
2014-09-05 18:05:40 +04:00
uresp . page_addr [ 0 ] = virt_to_phys ( cq - > va ) ;
2013-08-26 13:57:44 +04:00
uresp . db_page_addr = ocrdma_get_db_addr ( dev , uctx - > cntxt_pd - > id ) ;
2013-08-07 11:22:33 +04:00
uresp . db_page_size = dev - > nic_info . db_page_size ;
2012-03-21 02:39:06 +04:00
uresp . phase_change = cq - > phase_change ? 1 : 0 ;
status = ib_copy_to_udata ( udata , & uresp , sizeof ( uresp ) ) ;
if ( status ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) copy error cqid=0x%x. \n " ,
2013-08-07 11:22:33 +04:00
__func__ , dev - > id , cq - > id ) ;
2012-03-21 02:39:06 +04:00
goto err ;
}
status = ocrdma_add_mmap ( uctx , uresp . db_page_addr , uresp . db_page_size ) ;
if ( status )
goto err ;
status = ocrdma_add_mmap ( uctx , uresp . page_addr [ 0 ] , uresp . page_size ) ;
if ( status ) {
ocrdma_del_mmap ( uctx , uresp . db_page_addr , uresp . db_page_size ) ;
goto err ;
}
cq - > ucontext = uctx ;
err :
return status ;
}
2019-05-28 14:37:29 +03:00
int ocrdma_create_cq ( struct ib_cq * ibcq , const struct ib_cq_init_attr * attr ,
struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
2019-05-28 14:37:29 +03:00
struct ib_device * ibdev = ibcq - > device ;
2015-06-11 16:35:20 +03:00
int entries = attr - > cqe ;
2019-05-28 14:37:29 +03:00
struct ocrdma_cq * cq = get_ocrdma_cq ( ibcq ) ;
2012-03-21 02:39:06 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibdev ) ;
2019-03-31 19:10:07 +03:00
struct ocrdma_ucontext * uctx = rdma_udata_to_drv_context (
udata , struct ocrdma_ucontext , ibucontext ) ;
2013-08-26 13:57:44 +04:00
u16 pd_id = 0 ;
2012-03-21 02:39:06 +04:00
int status ;
struct ocrdma_create_cq_ureq ureq ;
2015-06-11 16:35:20 +03:00
if ( attr - > flags )
2019-05-28 14:37:29 +03:00
return - EINVAL ;
2015-06-11 16:35:20 +03:00
2012-03-21 02:39:06 +04:00
if ( udata ) {
if ( ib_copy_from_udata ( & ureq , udata , sizeof ( ureq ) ) )
2019-05-28 14:37:29 +03:00
return - EFAULT ;
2012-03-21 02:39:06 +04:00
} else
ureq . dpp_cq = 0 ;
spin_lock_init ( & cq - > cq_lock ) ;
spin_lock_init ( & cq - > comp_handler_lock ) ;
INIT_LIST_HEAD ( & cq - > sq_head ) ;
INIT_LIST_HEAD ( & cq - > rq_head ) ;
2019-03-31 19:10:07 +03:00
if ( udata )
2013-08-26 13:57:44 +04:00
pd_id = uctx - > cntxt_pd - > id ;
status = ocrdma_mbx_create_cq ( dev , cq , entries , ureq . dpp_cq , pd_id ) ;
2019-05-28 14:37:29 +03:00
if ( status )
return status ;
2019-03-31 19:10:07 +03:00
if ( udata ) {
status = ocrdma_copy_cq_uresp ( dev , cq , udata ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto ctx_err ;
}
cq - > phase = OCRDMA_CQE_VALID ;
dev - > cq_tbl [ cq - > id ] = cq ;
2019-05-28 14:37:29 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
ctx_err :
ocrdma_mbx_destroy_cq ( dev , cq ) ;
2019-05-28 14:37:29 +03:00
return status ;
2012-03-21 02:39:06 +04:00
}
int ocrdma_resize_cq ( struct ib_cq * ibcq , int new_cnt ,
struct ib_udata * udata )
{
int status = 0 ;
struct ocrdma_cq * cq = get_ocrdma_cq ( ibcq ) ;
if ( new_cnt < 1 | | new_cnt > cq - > max_hw_cqe ) {
status = - EINVAL ;
return status ;
}
ibcq - > cqe = new_cnt ;
return status ;
}
2014-02-04 10:26:54 +04:00
static void ocrdma_flush_cq ( struct ocrdma_cq * cq )
{
int cqe_cnt ;
int valid_count = 0 ;
unsigned long flags ;
struct ocrdma_dev * dev = get_ocrdma_dev ( cq - > ibcq . device ) ;
struct ocrdma_cqe * cqe = NULL ;
cqe = cq - > va ;
cqe_cnt = cq - > cqe_cnt ;
/* Last irq might have scheduled a polling thread
* sync - up with it before hard flushing .
*/
spin_lock_irqsave ( & cq - > cq_lock , flags ) ;
while ( cqe_cnt ) {
if ( is_cqe_valid ( cq , cqe ) )
valid_count + + ;
cqe + + ;
cqe_cnt - - ;
}
ocrdma_ring_cq_db ( dev , cq - > id , false , false , valid_count ) ;
spin_unlock_irqrestore ( & cq - > cq_lock , flags ) ;
}
2019-05-28 14:37:28 +03:00
void ocrdma_destroy_cq ( struct ib_cq * ibcq , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_cq * cq = get_ocrdma_cq ( ibcq ) ;
2014-02-04 10:26:54 +04:00
struct ocrdma_eq * eq = NULL ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibcq - > device ) ;
2013-08-26 13:57:44 +04:00
int pdid = 0 ;
2014-02-04 10:26:54 +04:00
u32 irq , indx ;
2012-03-21 02:39:06 +04:00
2014-02-04 10:26:54 +04:00
dev - > cq_tbl [ cq - > id ] = NULL ;
indx = ocrdma_get_eq_table_index ( dev , cq - > eqn ) ;
2012-03-21 02:39:06 +04:00
2014-02-04 10:26:54 +04:00
eq = & dev - > eq_tbl [ indx ] ;
irq = ocrdma_get_irq ( dev , eq ) ;
synchronize_irq ( irq ) ;
ocrdma_flush_cq ( cq ) ;
2012-03-21 02:39:06 +04:00
2019-05-28 14:37:28 +03:00
ocrdma_mbx_destroy_cq ( dev , cq ) ;
2012-03-21 02:39:06 +04:00
if ( cq - > ucontext ) {
2013-08-26 13:57:44 +04:00
pdid = cq - > ucontext - > cntxt_pd - > id ;
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( cq - > ucontext , ( u64 ) cq - > pa ,
PAGE_ALIGN ( cq - > len ) ) ;
2013-08-26 13:57:44 +04:00
ocrdma_del_mmap ( cq - > ucontext ,
ocrdma_get_db_addr ( dev , pdid ) ,
2012-03-21 02:39:06 +04:00
dev - > nic_info . db_page_size ) ;
}
}
static int ocrdma_add_qpn_map ( struct ocrdma_dev * dev , struct ocrdma_qp * qp )
{
int status = - EINVAL ;
if ( qp - > id < OCRDMA_MAX_QP & & dev - > qp_tbl [ qp - > id ] = = NULL ) {
dev - > qp_tbl [ qp - > id ] = qp ;
status = 0 ;
}
return status ;
}
static void ocrdma_del_qpn_map ( struct ocrdma_dev * dev , struct ocrdma_qp * qp )
{
dev - > qp_tbl [ qp - > id ] = NULL ;
}
static int ocrdma_check_qp_params ( struct ib_pd * ibpd , struct ocrdma_dev * dev ,
2018-12-17 18:15:18 +03:00
struct ib_qp_init_attr * attrs ,
struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
2013-08-26 13:57:38 +04:00
if ( ( attrs - > qp_type ! = IB_QPT_GSI ) & &
( attrs - > qp_type ! = IB_QPT_RC ) & &
( attrs - > qp_type ! = IB_QPT_UC ) & &
( attrs - > qp_type ! = IB_QPT_UD ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported qp type=0x%x requested \n " ,
__func__ , dev - > id , attrs - > qp_type ) ;
2020-01-30 11:20:49 +03:00
return - EOPNOTSUPP ;
2012-03-21 02:39:06 +04:00
}
2013-08-26 13:57:38 +04:00
/* Skip the check for QP1 to support CM size of 128 */
if ( ( attrs - > qp_type ! = IB_QPT_GSI ) & &
( attrs - > cap . max_send_wr > dev - > attr . max_wqe ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported send_wr=0x%x requested \n " ,
__func__ , dev - > id , attrs - > cap . max_send_wr ) ;
pr_err ( " %s(%d) supported send_wr=0x%x \n " ,
__func__ , dev - > id , dev - > attr . max_wqe ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
if ( ! attrs - > srq & & ( attrs - > cap . max_recv_wr > dev - > attr . max_rqe ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported recv_wr=0x%x requested \n " ,
__func__ , dev - > id , attrs - > cap . max_recv_wr ) ;
pr_err ( " %s(%d) supported recv_wr=0x%x \n " ,
__func__ , dev - > id , dev - > attr . max_rqe ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
if ( attrs - > cap . max_inline_data > dev - > attr . max_inline_data ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported inline data size=0x%x requested \n " ,
__func__ , dev - > id , attrs - > cap . max_inline_data ) ;
pr_err ( " %s(%d) supported inline data size=0x%x \n " ,
__func__ , dev - > id , dev - > attr . max_inline_data ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
if ( attrs - > cap . max_send_sge > dev - > attr . max_send_sge ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported send_sge=0x%x requested \n " ,
__func__ , dev - > id , attrs - > cap . max_send_sge ) ;
pr_err ( " %s(%d) supported send_sge=0x%x \n " ,
__func__ , dev - > id , dev - > attr . max_send_sge ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
if ( attrs - > cap . max_recv_sge > dev - > attr . max_recv_sge ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) unsupported recv_sge=0x%x requested \n " ,
__func__ , dev - > id , attrs - > cap . max_recv_sge ) ;
pr_err ( " %s(%d) supported recv_sge=0x%x \n " ,
__func__ , dev - > id , dev - > attr . max_recv_sge ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
/* unprivileged user space cannot create special QP */
2018-12-17 18:15:18 +03:00
if ( udata & & attrs - > qp_type = = IB_QPT_GSI ) {
2013-06-10 08:42:39 +04:00
pr_err
2012-03-21 02:39:06 +04:00
( " %s(%d) Userspace can't create special QPs of type=0x%x \n " ,
__func__ , dev - > id , attrs - > qp_type ) ;
return - EINVAL ;
}
/* allow creating only one GSI type of QP */
if ( attrs - > qp_type = = IB_QPT_GSI & & dev - > gsi_qp_created ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) GSI special QPs already created. \n " ,
__func__ , dev - > id ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ( ( attrs - > qp_type ! = IB_QPT_GSI ) & & ( dev - > gsi_qp_created ) ) {
if ( ( dev - > gsi_sqcq = = get_ocrdma_cq ( attrs - > send_cq ) ) | |
2013-08-26 13:57:38 +04:00
( dev - > gsi_rqcq = = get_ocrdma_cq ( attrs - > recv_cq ) ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) Consumer QP cannot use GSI CQs. \n " ,
2013-08-26 13:57:38 +04:00
__func__ , dev - > id ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
}
return 0 ;
}
static int ocrdma_copy_qp_uresp ( struct ocrdma_qp * qp ,
struct ib_udata * udata , int dpp_offset ,
int dpp_credit_lmt , int srq )
{
2015-12-26 20:18:18 +03:00
int status ;
2012-03-21 02:39:06 +04:00
u64 usr_db ;
struct ocrdma_create_qp_uresp uresp ;
struct ocrdma_pd * pd = qp - > pd ;
2014-12-18 11:43:06 +03:00
struct ocrdma_dev * dev = get_ocrdma_dev ( pd - > ibpd . device ) ;
2012-03-21 02:39:06 +04:00
memset ( & uresp , 0 , sizeof ( uresp ) ) ;
usr_db = dev - > nic_info . unmapped_db +
( pd - > id * dev - > nic_info . db_page_size ) ;
uresp . qp_id = qp - > id ;
uresp . sq_dbid = qp - > sq . dbid ;
uresp . num_sq_pages = 1 ;
2013-08-26 13:57:38 +04:00
uresp . sq_page_size = PAGE_ALIGN ( qp - > sq . len ) ;
2014-09-05 18:05:40 +04:00
uresp . sq_page_addr [ 0 ] = virt_to_phys ( qp - > sq . va ) ;
2012-03-21 02:39:06 +04:00
uresp . num_wqe_allocated = qp - > sq . max_cnt ;
if ( ! srq ) {
uresp . rq_dbid = qp - > rq . dbid ;
uresp . num_rq_pages = 1 ;
2013-08-26 13:57:38 +04:00
uresp . rq_page_size = PAGE_ALIGN ( qp - > rq . len ) ;
2014-09-05 18:05:40 +04:00
uresp . rq_page_addr [ 0 ] = virt_to_phys ( qp - > rq . va ) ;
2012-03-21 02:39:06 +04:00
uresp . num_rqe_allocated = qp - > rq . max_cnt ;
}
uresp . db_page_addr = usr_db ;
uresp . db_page_size = dev - > nic_info . db_page_size ;
2014-02-04 10:26:55 +04:00
uresp . db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET ;
uresp . db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET ;
uresp . db_shift = OCRDMA_DB_RQ_SHIFT ;
2012-03-21 02:39:06 +04:00
if ( qp - > dpp_enabled ) {
uresp . dpp_credit = dpp_credit_lmt ;
uresp . dpp_offset = dpp_offset ;
}
status = ib_copy_to_udata ( udata , & uresp , sizeof ( uresp ) ) ;
if ( status ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) user copy error. \n " , __func__ , dev - > id ) ;
2012-03-21 02:39:06 +04:00
goto err ;
}
status = ocrdma_add_mmap ( pd - > uctx , uresp . sq_page_addr [ 0 ] ,
uresp . sq_page_size ) ;
if ( status )
goto err ;
if ( ! srq ) {
status = ocrdma_add_mmap ( pd - > uctx , uresp . rq_page_addr [ 0 ] ,
uresp . rq_page_size ) ;
if ( status )
goto rq_map_err ;
}
return status ;
rq_map_err :
ocrdma_del_mmap ( pd - > uctx , uresp . sq_page_addr [ 0 ] , uresp . sq_page_size ) ;
err :
return status ;
}
static void ocrdma_set_qp_db ( struct ocrdma_dev * dev , struct ocrdma_qp * qp ,
struct ocrdma_pd * pd )
{
2014-02-04 10:26:56 +04:00
if ( ocrdma_get_asic_type ( dev ) = = OCRDMA_ASIC_GEN_SKH_R ) {
2012-03-21 02:39:06 +04:00
qp - > sq_db = dev - > nic_info . db +
( pd - > id * dev - > nic_info . db_page_size ) +
OCRDMA_DB_GEN2_SQ_OFFSET ;
qp - > rq_db = dev - > nic_info . db +
( pd - > id * dev - > nic_info . db_page_size ) +
2013-08-26 13:57:42 +04:00
OCRDMA_DB_GEN2_RQ_OFFSET ;
2012-03-21 02:39:06 +04:00
} else {
qp - > sq_db = dev - > nic_info . db +
( pd - > id * dev - > nic_info . db_page_size ) +
OCRDMA_DB_SQ_OFFSET ;
qp - > rq_db = dev - > nic_info . db +
( pd - > id * dev - > nic_info . db_page_size ) +
OCRDMA_DB_RQ_OFFSET ;
}
}
static int ocrdma_alloc_wr_id_tbl ( struct ocrdma_qp * qp )
{
qp - > wqe_wr_id_tbl =
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
kcalloc ( qp - > sq . max_cnt , sizeof ( * ( qp - > wqe_wr_id_tbl ) ) ,
2012-03-21 02:39:06 +04:00
GFP_KERNEL ) ;
if ( qp - > wqe_wr_id_tbl = = NULL )
return - ENOMEM ;
qp - > rqe_wr_id_tbl =
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
kcalloc ( qp - > rq . max_cnt , sizeof ( u64 ) , GFP_KERNEL ) ;
2012-03-21 02:39:06 +04:00
if ( qp - > rqe_wr_id_tbl = = NULL )
return - ENOMEM ;
return 0 ;
}
static void ocrdma_set_qp_init_params ( struct ocrdma_qp * qp ,
struct ocrdma_pd * pd ,
struct ib_qp_init_attr * attrs )
{
qp - > pd = pd ;
spin_lock_init ( & qp - > q_lock ) ;
INIT_LIST_HEAD ( & qp - > sq_entry ) ;
INIT_LIST_HEAD ( & qp - > rq_entry ) ;
qp - > qp_type = attrs - > qp_type ;
qp - > cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR ;
qp - > max_inline_data = attrs - > cap . max_inline_data ;
qp - > sq . max_sges = attrs - > cap . max_send_sge ;
qp - > rq . max_sges = attrs - > cap . max_recv_sge ;
qp - > state = OCRDMA_QPS_RST ;
2013-08-26 13:57:43 +04:00
qp - > signaled = ( attrs - > sq_sig_type = = IB_SIGNAL_ALL_WR ) ? true : false ;
2012-03-21 02:39:06 +04:00
}
static void ocrdma_store_gsi_qp_cq ( struct ocrdma_dev * dev ,
struct ib_qp_init_attr * attrs )
{
if ( attrs - > qp_type = = IB_QPT_GSI ) {
dev - > gsi_qp_created = 1 ;
dev - > gsi_sqcq = get_ocrdma_cq ( attrs - > send_cq ) ;
dev - > gsi_rqcq = get_ocrdma_cq ( attrs - > recv_cq ) ;
}
}
struct ib_qp * ocrdma_create_qp ( struct ib_pd * ibpd ,
struct ib_qp_init_attr * attrs ,
struct ib_udata * udata )
{
int status ;
struct ocrdma_pd * pd = get_ocrdma_pd ( ibpd ) ;
struct ocrdma_qp * qp ;
2013-08-07 11:22:32 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibpd - > device ) ;
2012-03-21 02:39:06 +04:00
struct ocrdma_create_qp_ureq ureq ;
u16 dpp_credit_lmt , dpp_offset ;
2018-12-17 18:15:18 +03:00
status = ocrdma_check_qp_params ( ibpd , dev , attrs , udata ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto gen_err ;
memset ( & ureq , 0 , sizeof ( ureq ) ) ;
if ( udata ) {
if ( ib_copy_from_udata ( & ureq , udata , sizeof ( ureq ) ) )
return ERR_PTR ( - EFAULT ) ;
}
qp = kzalloc ( sizeof ( * qp ) , GFP_KERNEL ) ;
if ( ! qp ) {
status = - ENOMEM ;
goto gen_err ;
}
ocrdma_set_qp_init_params ( qp , pd , attrs ) ;
2013-08-26 13:57:38 +04:00
if ( udata = = NULL )
qp - > cap_flags | = ( OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
OCRDMA_QP_FAST_REG ) ;
2012-03-21 02:39:06 +04:00
mutex_lock ( & dev - > dev_lock ) ;
status = ocrdma_mbx_create_qp ( qp , attrs , ureq . enable_dpp_cq ,
ureq . dpp_cq_id ,
& dpp_offset , & dpp_credit_lmt ) ;
if ( status )
goto mbx_err ;
/* user space QP's wr_id table are managed in library */
if ( udata = = NULL ) {
status = ocrdma_alloc_wr_id_tbl ( qp ) ;
if ( status )
goto map_err ;
}
status = ocrdma_add_qpn_map ( dev , qp ) ;
if ( status )
goto map_err ;
ocrdma_set_qp_db ( dev , qp , pd ) ;
if ( udata ) {
status = ocrdma_copy_qp_uresp ( qp , udata , dpp_offset ,
dpp_credit_lmt ,
( attrs - > srq ! = NULL ) ) ;
if ( status )
goto cpy_err ;
}
ocrdma_store_gsi_qp_cq ( dev , attrs ) ;
2013-06-05 12:50:46 +04:00
qp - > ibqp . qp_num = qp - > id ;
2012-03-21 02:39:06 +04:00
mutex_unlock ( & dev - > dev_lock ) ;
return & qp - > ibqp ;
cpy_err :
ocrdma_del_qpn_map ( dev , qp ) ;
map_err :
ocrdma_mbx_destroy_qp ( dev , qp ) ;
mbx_err :
mutex_unlock ( & dev - > dev_lock ) ;
kfree ( qp - > wqe_wr_id_tbl ) ;
kfree ( qp - > rqe_wr_id_tbl ) ;
kfree ( qp ) ;
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) error=%d \n " , __func__ , dev - > id , status ) ;
2012-03-21 02:39:06 +04:00
gen_err :
return ERR_PTR ( status ) ;
}
int _ocrdma_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask )
{
int status = 0 ;
struct ocrdma_qp * qp ;
struct ocrdma_dev * dev ;
enum ib_qp_state old_qps ;
qp = get_ocrdma_qp ( ibqp ) ;
2014-12-18 11:43:06 +03:00
dev = get_ocrdma_dev ( ibqp - > device ) ;
2012-03-21 02:39:06 +04:00
if ( attr_mask & IB_QP_STATE )
2013-08-07 11:22:35 +04:00
status = ocrdma_qp_state_change ( qp , attr - > qp_state , & old_qps ) ;
2012-03-21 02:39:06 +04:00
/* if new and previous states are same hw doesn't need to
* know about it .
*/
if ( status < 0 )
return status ;
2015-12-26 20:40:43 +03:00
return ocrdma_mbx_modify_qp ( dev , qp , attr , attr_mask ) ;
2012-03-21 02:39:06 +04:00
}
int ocrdma_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask , struct ib_udata * udata )
{
unsigned long flags ;
int status = - EINVAL ;
struct ocrdma_qp * qp ;
struct ocrdma_dev * dev ;
enum ib_qp_state old_qps , new_qps ;
qp = get_ocrdma_qp ( ibqp ) ;
2014-12-18 11:43:06 +03:00
dev = get_ocrdma_dev ( ibqp - > device ) ;
2012-03-21 02:39:06 +04:00
/* syncronize with multiple context trying to change, retrive qps */
mutex_lock ( & dev - > dev_lock ) ;
/* syncronize with wqe, rqe posting and cqe processing contexts */
spin_lock_irqsave ( & qp - > q_lock , flags ) ;
old_qps = get_ibqp_state ( qp - > state ) ;
if ( attr_mask & IB_QP_STATE )
new_qps = attr - > qp_state ;
else
new_qps = old_qps ;
spin_unlock_irqrestore ( & qp - > q_lock , flags ) ;
2018-10-02 16:11:21 +03:00
if ( ! ib_modify_qp_is_ok ( old_qps , new_qps , ibqp - > qp_type , attr_mask ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s(%d) invalid attribute mask=0x%x specified for \n "
" qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x \n " ,
__func__ , dev - > id , attr_mask , qp - > id , ibqp - > qp_type ,
old_qps , new_qps ) ;
2012-03-21 02:39:06 +04:00
goto param_err ;
}
status = _ocrdma_modify_qp ( ibqp , attr , attr_mask ) ;
if ( status > 0 )
status = 0 ;
param_err :
mutex_unlock ( & dev - > dev_lock ) ;
return status ;
}
static enum ib_mtu ocrdma_mtu_int_to_enum ( u16 mtu )
{
switch ( mtu ) {
case 256 :
return IB_MTU_256 ;
case 512 :
return IB_MTU_512 ;
case 1024 :
return IB_MTU_1024 ;
case 2048 :
return IB_MTU_2048 ;
case 4096 :
return IB_MTU_4096 ;
default :
return IB_MTU_1024 ;
}
}
static int ocrdma_to_ib_qp_acc_flags ( int qp_cap_flags )
{
int ib_qp_acc_flags = 0 ;
if ( qp_cap_flags & OCRDMA_QP_INB_WR )
ib_qp_acc_flags | = IB_ACCESS_REMOTE_WRITE ;
if ( qp_cap_flags & OCRDMA_QP_INB_RD )
ib_qp_acc_flags | = IB_ACCESS_LOCAL_WRITE ;
return ib_qp_acc_flags ;
}
int ocrdma_query_qp ( struct ib_qp * ibqp ,
struct ib_qp_attr * qp_attr ,
int attr_mask , struct ib_qp_init_attr * qp_init_attr )
{
int status ;
u32 qp_state ;
struct ocrdma_qp_params params ;
struct ocrdma_qp * qp = get_ocrdma_qp ( ibqp ) ;
2014-12-18 11:43:06 +03:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibqp - > device ) ;
2012-03-21 02:39:06 +04:00
memset ( & params , 0 , sizeof ( params ) ) ;
mutex_lock ( & dev - > dev_lock ) ;
status = ocrdma_mbx_query_qp ( dev , qp , & params ) ;
mutex_unlock ( & dev - > dev_lock ) ;
if ( status )
goto mbx_err ;
2014-12-03 09:06:33 +03:00
if ( qp - > qp_type = = IB_QPT_UD )
qp_attr - > qkey = params . qkey ;
2012-03-21 02:39:06 +04:00
qp_attr - > path_mtu =
ocrdma_mtu_int_to_enum ( params . path_mtu_pkey_indx &
OCRDMA_QP_PARAMS_PATH_MTU_MASK ) > >
OCRDMA_QP_PARAMS_PATH_MTU_SHIFT ;
qp_attr - > path_mig_state = IB_MIG_MIGRATED ;
qp_attr - > rq_psn = params . hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK ;
qp_attr - > sq_psn = params . tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK ;
qp_attr - > dest_qp_num =
params . ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK ;
qp_attr - > qp_access_flags = ocrdma_to_ib_qp_acc_flags ( qp - > cap_flags ) ;
qp_attr - > cap . max_send_wr = qp - > sq . max_cnt - 1 ;
qp_attr - > cap . max_recv_wr = qp - > rq . max_cnt - 1 ;
qp_attr - > cap . max_send_sge = qp - > sq . max_sges ;
qp_attr - > cap . max_recv_sge = qp - > rq . max_sges ;
2013-08-26 13:57:46 +04:00
qp_attr - > cap . max_inline_data = qp - > max_inline_data ;
2012-03-21 02:39:06 +04:00
qp_init_attr - > cap = qp_attr - > cap ;
2017-04-29 21:41:29 +03:00
qp_attr - > ah_attr . type = RDMA_AH_ATTR_TYPE_ROCE ;
2017-04-29 21:41:28 +03:00
rdma_ah_set_grh ( & qp_attr - > ah_attr , NULL ,
params . rnt_rc_sl_fl &
OCRDMA_QP_PARAMS_FLOW_LABEL_MASK ,
qp - > sgid_idx ,
( params . hop_lmt_rq_psn &
OCRDMA_QP_PARAMS_HOP_LMT_MASK ) > >
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT ,
( params . tclass_sq_psn &
OCRDMA_QP_PARAMS_TCLASS_MASK ) > >
OCRDMA_QP_PARAMS_TCLASS_SHIFT ) ;
rdma_ah_set_dgid_raw ( & qp_attr - > ah_attr , & params . dgid [ 0 ] ) ;
rdma_ah_set_port_num ( & qp_attr - > ah_attr , 1 ) ;
rdma_ah_set_sl ( & qp_attr - > ah_attr , ( params . rnt_rc_sl_fl &
OCRDMA_QP_PARAMS_SL_MASK ) > >
OCRDMA_QP_PARAMS_SL_SHIFT ) ;
2012-03-21 02:39:06 +04:00
qp_attr - > timeout = ( params . ack_to_rnr_rtc_dest_qpn &
OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK ) > >
OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT ;
qp_attr - > rnr_retry = ( params . ack_to_rnr_rtc_dest_qpn &
OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK ) > >
OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT ;
qp_attr - > retry_cnt =
( params . rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK ) > >
OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT ;
qp_attr - > min_rnr_timer = 0 ;
qp_attr - > pkey_index = 0 ;
qp_attr - > port_num = 1 ;
2017-04-29 21:41:28 +03:00
rdma_ah_set_path_bits ( & qp_attr - > ah_attr , 0 ) ;
rdma_ah_set_static_rate ( & qp_attr - > ah_attr , 0 ) ;
2012-03-21 02:39:06 +04:00
qp_attr - > alt_pkey_index = 0 ;
qp_attr - > alt_port_num = 0 ;
qp_attr - > alt_timeout = 0 ;
memset ( & qp_attr - > alt_ah_attr , 0 , sizeof ( qp_attr - > alt_ah_attr ) ) ;
qp_state = ( params . max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK ) > >
OCRDMA_QP_PARAMS_STATE_SHIFT ;
2014-12-18 11:43:00 +03:00
qp_attr - > qp_state = get_ibqp_state ( qp_state ) ;
qp_attr - > cur_qp_state = qp_attr - > qp_state ;
2012-03-21 02:39:06 +04:00
qp_attr - > sq_draining = ( qp_state = = OCRDMA_QPS_SQ_DRAINING ) ? 1 : 0 ;
qp_attr - > max_dest_rd_atomic =
params . max_ord_ird > > OCRDMA_QP_PARAMS_MAX_ORD_SHIFT ;
qp_attr - > max_rd_atomic =
params . max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK ;
qp_attr - > en_sqd_async_notify = ( params . max_sge_recv_flags &
OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC ) ? 1 : 0 ;
2014-12-18 11:43:00 +03:00
/* Sync driver QP state with FW */
ocrdma_qp_state_change ( qp , qp_attr - > qp_state , NULL ) ;
2012-03-21 02:39:06 +04:00
mbx_err :
return status ;
}
2015-01-16 17:39:56 +03:00
static void ocrdma_srq_toggle_bit ( struct ocrdma_srq * srq , unsigned int idx )
2012-03-21 02:39:06 +04:00
{
2015-01-16 17:39:56 +03:00
unsigned int i = idx / 32 ;
u32 mask = ( 1U < < ( idx % 32 ) ) ;
2012-03-21 02:39:06 +04:00
2015-01-16 17:39:55 +03:00
srq - > idx_bit_fields [ i ] ^ = mask ;
2012-03-21 02:39:06 +04:00
}
static int ocrdma_hwq_free_cnt ( struct ocrdma_qp_hwq_info * q )
{
2013-08-26 13:57:38 +04:00
return ( ( q - > max_wqe_idx - q - > head ) + q - > tail ) % q - > max_cnt ;
2012-03-21 02:39:06 +04:00
}
static int is_hw_sq_empty ( struct ocrdma_qp * qp )
{
2013-08-26 13:57:38 +04:00
return ( qp - > sq . tail = = qp - > sq . head ) ;
2012-03-21 02:39:06 +04:00
}
static int is_hw_rq_empty ( struct ocrdma_qp * qp )
{
2013-08-26 13:57:38 +04:00
return ( qp - > rq . tail = = qp - > rq . head ) ;
2012-03-21 02:39:06 +04:00
}
static void * ocrdma_hwq_head ( struct ocrdma_qp_hwq_info * q )
{
return q - > va + ( q - > head * q - > entry_size ) ;
}
static void * ocrdma_hwq_head_from_idx ( struct ocrdma_qp_hwq_info * q ,
u32 idx )
{
return q - > va + ( idx * q - > entry_size ) ;
}
static void ocrdma_hwq_inc_head ( struct ocrdma_qp_hwq_info * q )
{
q - > head = ( q - > head + 1 ) & q - > max_wqe_idx ;
}
static void ocrdma_hwq_inc_tail ( struct ocrdma_qp_hwq_info * q )
{
q - > tail = ( q - > tail + 1 ) & q - > max_wqe_idx ;
}
/* discard the cqe for a given QP */
static void ocrdma_discard_cqes ( struct ocrdma_qp * qp , struct ocrdma_cq * cq )
{
unsigned long cq_flags ;
unsigned long flags ;
int discard_cnt = 0 ;
u32 cur_getp , stop_getp ;
struct ocrdma_cqe * cqe ;
2014-02-04 10:27:03 +04:00
u32 qpn = 0 , wqe_idx = 0 ;
2012-03-21 02:39:06 +04:00
spin_lock_irqsave ( & cq - > cq_lock , cq_flags ) ;
/* traverse through the CQEs in the hw CQ,
* find the matching CQE for a given qp ,
* mark the matching one discarded by clearing qpn .
* ring the doorbell in the poll_cq ( ) as
* we don ' t complete out of order cqe .
*/
cur_getp = cq - > getp ;
/* find upto when do we reap the cq. */
stop_getp = cur_getp ;
do {
if ( is_hw_sq_empty ( qp ) & & ( ! qp - > srq & & is_hw_rq_empty ( qp ) ) )
break ;
cqe = cq - > va + cur_getp ;
/* if (a) done reaping whole hw cq, or
* ( b ) qp_xq becomes empty .
* then exit
*/
qpn = cqe - > cmn . qpn & OCRDMA_CQE_QPN_MASK ;
/* if previously discarded cqe found, skip that too. */
/* check for matching qp */
if ( qpn = = 0 | | qpn ! = qp - > id )
goto skip_cqe ;
2013-08-07 11:22:32 +04:00
if ( is_cqe_for_sq ( cqe ) ) {
2012-03-21 02:39:06 +04:00
ocrdma_hwq_inc_tail ( & qp - > sq ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
if ( qp - > srq ) {
2014-02-04 10:27:03 +04:00
wqe_idx = ( le32_to_cpu ( cqe - > rq . buftag_qpn ) > >
OCRDMA_CQE_BUFTAG_SHIFT ) &
qp - > srq - > rq . max_wqe_idx ;
2016-12-24 19:20:06 +03:00
BUG_ON ( wqe_idx < 1 ) ;
2012-03-21 02:39:06 +04:00
spin_lock_irqsave ( & qp - > srq - > q_lock , flags ) ;
ocrdma_hwq_inc_tail ( & qp - > srq - > rq ) ;
2014-02-04 10:27:03 +04:00
ocrdma_srq_toggle_bit ( qp - > srq , wqe_idx - 1 ) ;
2012-03-21 02:39:06 +04:00
spin_unlock_irqrestore ( & qp - > srq - > q_lock , flags ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
ocrdma_hwq_inc_tail ( & qp - > rq ) ;
2013-08-07 11:22:32 +04:00
}
2012-03-21 02:39:06 +04:00
}
2014-02-04 10:27:03 +04:00
/* mark cqe discarded so that it is not picked up later
* in the poll_cq ( ) .
*/
discard_cnt + = 1 ;
cqe - > cmn . qpn = 0 ;
2012-03-21 02:39:06 +04:00
skip_cqe :
cur_getp = ( cur_getp + 1 ) % cq - > max_hw_cqe ;
} while ( cur_getp ! = stop_getp ) ;
spin_unlock_irqrestore ( & cq - > cq_lock , cq_flags ) ;
}
2013-08-26 13:57:42 +04:00
void ocrdma_del_flush_qp ( struct ocrdma_qp * qp )
2012-03-21 02:39:06 +04:00
{
int found = false ;
unsigned long flags ;
2014-12-18 11:43:06 +03:00
struct ocrdma_dev * dev = get_ocrdma_dev ( qp - > ibqp . device ) ;
2012-03-21 02:39:06 +04:00
/* sync with any active CQ poll */
spin_lock_irqsave ( & dev - > flush_q_lock , flags ) ;
found = ocrdma_is_qp_in_sq_flushlist ( qp - > sq_cq , qp ) ;
if ( found )
list_del ( & qp - > sq_entry ) ;
if ( ! qp - > srq ) {
found = ocrdma_is_qp_in_rq_flushlist ( qp - > rq_cq , qp ) ;
if ( found )
list_del ( & qp - > rq_entry ) ;
}
spin_unlock_irqrestore ( & dev - > flush_q_lock , flags ) ;
}
2019-03-31 19:10:05 +03:00
int ocrdma_destroy_qp ( struct ib_qp * ibqp , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_pd * pd ;
struct ocrdma_qp * qp ;
struct ocrdma_dev * dev ;
struct ib_qp_attr attrs ;
2015-05-19 09:02:34 +03:00
int attr_mask ;
2012-05-02 10:14:47 +04:00
unsigned long flags ;
2012-03-21 02:39:06 +04:00
qp = get_ocrdma_qp ( ibqp ) ;
2014-12-18 11:43:06 +03:00
dev = get_ocrdma_dev ( ibqp - > device ) ;
2012-03-21 02:39:06 +04:00
pd = qp - > pd ;
/* change the QP state to ERROR */
2015-05-19 09:02:34 +03:00
if ( qp - > state ! = OCRDMA_QPS_RST ) {
attrs . qp_state = IB_QPS_ERR ;
attr_mask = IB_QP_STATE ;
_ocrdma_modify_qp ( ibqp , & attrs , attr_mask ) ;
}
2012-03-21 02:39:06 +04:00
/* ensure that CQEs for newly created QP (whose id may be same with
* one which just getting destroyed are same ) , dont get
* discarded until the old CQEs are discarded .
*/
mutex_lock ( & dev - > dev_lock ) ;
2014-12-18 11:43:01 +03:00
( void ) ocrdma_mbx_destroy_qp ( dev , qp ) ;
2012-03-21 02:39:06 +04:00
/*
* acquire CQ lock while destroy is in progress , in order to
* protect against proessing in - flight CQEs for this QP .
*/
2012-05-02 10:14:47 +04:00
spin_lock_irqsave ( & qp - > sq_cq - > cq_lock , flags ) ;
2018-07-06 23:04:30 +03:00
if ( qp - > rq_cq & & ( qp - > rq_cq ! = qp - > sq_cq ) ) {
2012-05-02 10:14:47 +04:00
spin_lock ( & qp - > rq_cq - > cq_lock ) ;
2018-07-06 23:04:30 +03:00
ocrdma_del_qpn_map ( dev , qp ) ;
2012-05-02 10:14:47 +04:00
spin_unlock ( & qp - > rq_cq - > cq_lock ) ;
2018-07-06 23:04:30 +03:00
} else {
ocrdma_del_qpn_map ( dev , qp ) ;
}
2012-05-02 10:14:47 +04:00
spin_unlock_irqrestore ( & qp - > sq_cq - > cq_lock , flags ) ;
2012-03-21 02:39:06 +04:00
if ( ! pd - > uctx ) {
ocrdma_discard_cqes ( qp , qp - > sq_cq ) ;
ocrdma_discard_cqes ( qp , qp - > rq_cq ) ;
}
mutex_unlock ( & dev - > dev_lock ) ;
if ( pd - > uctx ) {
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( pd - > uctx , ( u64 ) qp - > sq . pa ,
PAGE_ALIGN ( qp - > sq . len ) ) ;
2012-03-21 02:39:06 +04:00
if ( ! qp - > srq )
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( pd - > uctx , ( u64 ) qp - > rq . pa ,
PAGE_ALIGN ( qp - > rq . len ) ) ;
2012-03-21 02:39:06 +04:00
}
ocrdma_del_flush_qp ( qp ) ;
kfree ( qp - > wqe_wr_id_tbl ) ;
kfree ( qp - > rqe_wr_id_tbl ) ;
kfree ( qp ) ;
2014-12-18 11:43:01 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
}
2013-08-07 11:22:33 +04:00
static int ocrdma_copy_srq_uresp ( struct ocrdma_dev * dev , struct ocrdma_srq * srq ,
struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
int status ;
struct ocrdma_create_srq_uresp uresp ;
2013-07-29 23:34:29 +04:00
memset ( & uresp , 0 , sizeof ( uresp ) ) ;
2012-03-21 02:39:06 +04:00
uresp . rq_dbid = srq - > rq . dbid ;
uresp . num_rq_pages = 1 ;
2014-09-05 18:05:40 +04:00
uresp . rq_page_addr [ 0 ] = virt_to_phys ( srq - > rq . va ) ;
2012-03-21 02:39:06 +04:00
uresp . rq_page_size = srq - > rq . len ;
2013-08-07 11:22:33 +04:00
uresp . db_page_addr = dev - > nic_info . unmapped_db +
( srq - > pd - > id * dev - > nic_info . db_page_size ) ;
uresp . db_page_size = dev - > nic_info . db_page_size ;
2012-03-21 02:39:06 +04:00
uresp . num_rqe_allocated = srq - > rq . max_cnt ;
2014-02-04 10:26:56 +04:00
if ( ocrdma_get_asic_type ( dev ) = = OCRDMA_ASIC_GEN_SKH_R ) {
2013-08-26 13:57:42 +04:00
uresp . db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET ;
2012-03-21 02:39:06 +04:00
uresp . db_shift = 24 ;
} else {
uresp . db_rq_offset = OCRDMA_DB_RQ_OFFSET ;
uresp . db_shift = 16 ;
}
status = ib_copy_to_udata ( udata , & uresp , sizeof ( uresp ) ) ;
if ( status )
return status ;
status = ocrdma_add_mmap ( srq - > pd - > uctx , uresp . rq_page_addr [ 0 ] ,
uresp . rq_page_size ) ;
if ( status )
return status ;
return status ;
}
2019-04-03 16:42:43 +03:00
int ocrdma_create_srq ( struct ib_srq * ibsrq , struct ib_srq_init_attr * init_attr ,
struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
2019-04-03 16:42:43 +03:00
int status ;
struct ocrdma_pd * pd = get_ocrdma_pd ( ibsrq - > pd ) ;
struct ocrdma_dev * dev = get_ocrdma_dev ( ibsrq - > device ) ;
struct ocrdma_srq * srq = get_ocrdma_srq ( ibsrq ) ;
2012-03-21 02:39:06 +04:00
if ( init_attr - > attr . max_sge > dev - > attr . max_recv_sge )
2019-04-03 16:42:43 +03:00
return - EINVAL ;
2012-03-21 02:39:06 +04:00
if ( init_attr - > attr . max_wr > dev - > attr . max_rqe )
2019-04-03 16:42:43 +03:00
return - EINVAL ;
2012-03-21 02:39:06 +04:00
spin_lock_init ( & srq - > q_lock ) ;
srq - > pd = pd ;
srq - > db = dev - > nic_info . db + ( pd - > id * dev - > nic_info . db_page_size ) ;
2013-08-07 11:22:33 +04:00
status = ocrdma_mbx_create_srq ( dev , srq , init_attr , pd ) ;
2012-03-21 02:39:06 +04:00
if ( status )
2019-04-03 16:42:43 +03:00
return status ;
2012-03-21 02:39:06 +04:00
2019-04-03 16:42:43 +03:00
if ( ! udata ) {
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
srq - > rqe_wr_id_tbl = kcalloc ( srq - > rq . max_cnt , sizeof ( u64 ) ,
GFP_KERNEL ) ;
2019-04-03 16:42:43 +03:00
if ( ! srq - > rqe_wr_id_tbl ) {
status = - ENOMEM ;
2012-03-21 02:39:06 +04:00
goto arm_err ;
2019-04-03 16:42:43 +03:00
}
2012-03-21 02:39:06 +04:00
srq - > bit_fields_len = ( srq - > rq . max_cnt / 32 ) +
( srq - > rq . max_cnt % 32 ? 1 : 0 ) ;
srq - > idx_bit_fields =
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
kmalloc_array ( srq - > bit_fields_len , sizeof ( u32 ) ,
GFP_KERNEL ) ;
2019-04-03 16:42:43 +03:00
if ( ! srq - > idx_bit_fields ) {
status = - ENOMEM ;
2012-03-21 02:39:06 +04:00
goto arm_err ;
2019-04-03 16:42:43 +03:00
}
2012-03-21 02:39:06 +04:00
memset ( srq - > idx_bit_fields , 0xff ,
srq - > bit_fields_len * sizeof ( u32 ) ) ;
}
if ( init_attr - > attr . srq_limit ) {
status = ocrdma_mbx_modify_srq ( srq , & init_attr - > attr ) ;
if ( status )
goto arm_err ;
}
if ( udata ) {
2013-08-07 11:22:33 +04:00
status = ocrdma_copy_srq_uresp ( dev , srq , udata ) ;
2012-03-21 02:39:06 +04:00
if ( status )
goto arm_err ;
}
2019-04-03 16:42:43 +03:00
return 0 ;
2012-03-21 02:39:06 +04:00
arm_err :
ocrdma_mbx_destroy_srq ( dev , srq ) ;
kfree ( srq - > rqe_wr_id_tbl ) ;
kfree ( srq - > idx_bit_fields ) ;
2019-04-03 16:42:43 +03:00
return status ;
2012-03-21 02:39:06 +04:00
}
int ocrdma_modify_srq ( struct ib_srq * ibsrq ,
struct ib_srq_attr * srq_attr ,
enum ib_srq_attr_mask srq_attr_mask ,
struct ib_udata * udata )
{
2015-12-26 20:18:18 +03:00
int status ;
2012-03-21 02:39:06 +04:00
struct ocrdma_srq * srq ;
srq = get_ocrdma_srq ( ibsrq ) ;
if ( srq_attr_mask & IB_SRQ_MAX_WR )
status = - EINVAL ;
else
status = ocrdma_mbx_modify_srq ( srq , srq_attr ) ;
return status ;
}
int ocrdma_query_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * srq_attr )
{
int status ;
struct ocrdma_srq * srq ;
srq = get_ocrdma_srq ( ibsrq ) ;
status = ocrdma_mbx_query_srq ( srq , srq_attr ) ;
return status ;
}
2019-04-03 16:42:43 +03:00
void ocrdma_destroy_srq ( struct ib_srq * ibsrq , struct ib_udata * udata )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_srq * srq ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( ibsrq - > device ) ;
2012-03-21 02:39:06 +04:00
srq = get_ocrdma_srq ( ibsrq ) ;
2019-04-03 16:42:43 +03:00
ocrdma_mbx_destroy_srq ( dev , srq ) ;
2012-03-21 02:39:06 +04:00
if ( srq - > pd - > uctx )
2013-08-26 13:57:38 +04:00
ocrdma_del_mmap ( srq - > pd - > uctx , ( u64 ) srq - > rq . pa ,
PAGE_ALIGN ( srq - > rq . len ) ) ;
2012-03-21 02:39:06 +04:00
kfree ( srq - > idx_bit_fields ) ;
kfree ( srq - > rqe_wr_id_tbl ) ;
}
/* unprivileged verbs and their support functions. */
static void ocrdma_build_ud_hdr ( struct ocrdma_qp * qp ,
struct ocrdma_hdr_wqe * hdr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * wr )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_ewqe_ud_hdr * ud_hdr =
( struct ocrdma_ewqe_ud_hdr * ) ( hdr + 1 ) ;
2015-10-08 11:16:33 +03:00
struct ocrdma_ah * ah = get_ocrdma_ah ( ud_wr ( wr ) - > ah ) ;
2012-03-21 02:39:06 +04:00
2015-10-08 11:16:33 +03:00
ud_hdr - > rsvd_dest_qpn = ud_wr ( wr ) - > remote_qpn ;
2012-03-21 02:39:06 +04:00
if ( qp - > qp_type = = IB_QPT_GSI )
ud_hdr - > qkey = qp - > qkey ;
else
2015-10-08 11:16:33 +03:00
ud_hdr - > qkey = ud_wr ( wr ) - > remote_qkey ;
2012-03-21 02:39:06 +04:00
ud_hdr - > rsvd_ahid = ah - > id ;
2016-01-28 16:59:57 +03:00
ud_hdr - > hdr_type = ah - > hdr_type ;
2014-12-18 11:43:07 +03:00
if ( ah - > av - > valid & OCRDMA_AV_VLAN_VALID )
hdr - > cw | = ( OCRDMA_FLAG_AH_VLAN_PR < < OCRDMA_WQE_FLAGS_SHIFT ) ;
2012-03-21 02:39:06 +04:00
}
static void ocrdma_build_sges ( struct ocrdma_hdr_wqe * hdr ,
struct ocrdma_sge * sge , int num_sge ,
struct ib_sge * sg_list )
{
int i ;
for ( i = 0 ; i < num_sge ; i + + ) {
sge [ i ] . lrkey = sg_list [ i ] . lkey ;
sge [ i ] . addr_lo = sg_list [ i ] . addr ;
sge [ i ] . addr_hi = upper_32_bits ( sg_list [ i ] . addr ) ;
sge [ i ] . len = sg_list [ i ] . length ;
hdr - > total_len + = sg_list [ i ] . length ;
}
if ( num_sge = = 0 )
memset ( sge , 0 , sizeof ( * sge ) ) ;
}
2013-08-26 13:57:48 +04:00
static inline uint32_t ocrdma_sglist_len ( struct ib_sge * sg_list , int num_sge )
{
uint32_t total_len = 0 , i ;
for ( i = 0 ; i < num_sge ; i + + )
total_len + = sg_list [ i ] . length ;
return total_len ;
}
2012-03-21 02:39:06 +04:00
static int ocrdma_build_inline_sges ( struct ocrdma_qp * qp ,
struct ocrdma_hdr_wqe * hdr ,
struct ocrdma_sge * sge ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * wr , u32 wqe_size )
2012-03-21 02:39:06 +04:00
{
2013-08-26 13:57:48 +04:00
int i ;
char * dpp_addr ;
2013-08-26 13:57:38 +04:00
if ( wr - > send_flags & IB_SEND_INLINE & & qp - > qp_type ! = IB_QPT_UD ) {
2013-08-26 13:57:48 +04:00
hdr - > total_len = ocrdma_sglist_len ( wr - > sg_list , wr - > num_sge ) ;
if ( unlikely ( hdr - > total_len > qp - > max_inline_data ) ) {
2013-06-10 08:42:39 +04:00
pr_err ( " %s() supported_len=0x%x, \n "
2014-08-29 18:37:33 +04:00
" unsupported len req=0x%x \n " , __func__ ,
2013-08-26 13:57:48 +04:00
qp - > max_inline_data , hdr - > total_len ) ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
2013-08-26 13:57:48 +04:00
dpp_addr = ( char * ) sge ;
for ( i = 0 ; i < wr - > num_sge ; i + + ) {
memcpy ( dpp_addr ,
( void * ) ( unsigned long ) wr - > sg_list [ i ] . addr ,
wr - > sg_list [ i ] . length ) ;
dpp_addr + = wr - > sg_list [ i ] . length ;
}
2012-03-21 02:39:06 +04:00
wqe_size + = roundup ( hdr - > total_len , OCRDMA_WQE_ALIGN_BYTES ) ;
2013-08-26 13:57:48 +04:00
if ( 0 = = hdr - > total_len )
2013-08-26 13:57:38 +04:00
wqe_size + = sizeof ( struct ocrdma_sge ) ;
2012-03-21 02:39:06 +04:00
hdr - > cw | = ( OCRDMA_TYPE_INLINE < < OCRDMA_WQE_TYPE_SHIFT ) ;
} else {
ocrdma_build_sges ( hdr , sge , wr - > num_sge , wr - > sg_list ) ;
if ( wr - > num_sge )
wqe_size + = ( wr - > num_sge * sizeof ( struct ocrdma_sge ) ) ;
else
wqe_size + = sizeof ( struct ocrdma_sge ) ;
hdr - > cw | = ( OCRDMA_TYPE_LKEY < < OCRDMA_WQE_TYPE_SHIFT ) ;
}
hdr - > cw | = ( ( wqe_size / OCRDMA_WQE_STRIDE ) < < OCRDMA_WQE_SIZE_SHIFT ) ;
return 0 ;
}
static int ocrdma_build_send ( struct ocrdma_qp * qp , struct ocrdma_hdr_wqe * hdr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * wr )
2012-03-21 02:39:06 +04:00
{
int status ;
struct ocrdma_sge * sge ;
u32 wqe_size = sizeof ( * hdr ) ;
if ( qp - > qp_type = = IB_QPT_UD | | qp - > qp_type = = IB_QPT_GSI ) {
ocrdma_build_ud_hdr ( qp , hdr , wr ) ;
sge = ( struct ocrdma_sge * ) ( hdr + 2 ) ;
wqe_size + = sizeof ( struct ocrdma_ewqe_ud_hdr ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
sge = ( struct ocrdma_sge * ) ( hdr + 1 ) ;
2013-08-07 11:22:32 +04:00
}
2012-03-21 02:39:06 +04:00
status = ocrdma_build_inline_sges ( qp , hdr , sge , wr , wqe_size ) ;
return status ;
}
static int ocrdma_build_write ( struct ocrdma_qp * qp , struct ocrdma_hdr_wqe * hdr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * wr )
2012-03-21 02:39:06 +04:00
{
int status ;
struct ocrdma_sge * ext_rw = ( struct ocrdma_sge * ) ( hdr + 1 ) ;
struct ocrdma_sge * sge = ext_rw + 1 ;
u32 wqe_size = sizeof ( * hdr ) + sizeof ( * ext_rw ) ;
status = ocrdma_build_inline_sges ( qp , hdr , sge , wr , wqe_size ) ;
if ( status )
return status ;
2015-10-08 11:16:33 +03:00
ext_rw - > addr_lo = rdma_wr ( wr ) - > remote_addr ;
ext_rw - > addr_hi = upper_32_bits ( rdma_wr ( wr ) - > remote_addr ) ;
ext_rw - > lrkey = rdma_wr ( wr ) - > rkey ;
2012-03-21 02:39:06 +04:00
ext_rw - > len = hdr - > total_len ;
return 0 ;
}
static void ocrdma_build_read ( struct ocrdma_qp * qp , struct ocrdma_hdr_wqe * hdr ,
2018-07-18 19:25:14 +03:00
const struct ib_send_wr * wr )
2012-03-21 02:39:06 +04:00
{
struct ocrdma_sge * ext_rw = ( struct ocrdma_sge * ) ( hdr + 1 ) ;
struct ocrdma_sge * sge = ext_rw + 1 ;
u32 wqe_size = ( ( wr - > num_sge + 1 ) * sizeof ( struct ocrdma_sge ) ) +
sizeof ( struct ocrdma_hdr_wqe ) ;
ocrdma_build_sges ( hdr , sge , wr - > num_sge , wr - > sg_list ) ;
hdr - > cw | = ( ( wqe_size / OCRDMA_WQE_STRIDE ) < < OCRDMA_WQE_SIZE_SHIFT ) ;
hdr - > cw | = ( OCRDMA_READ < < OCRDMA_WQE_OPCODE_SHIFT ) ;
hdr - > cw | = ( OCRDMA_TYPE_LKEY < < OCRDMA_WQE_TYPE_SHIFT ) ;
2015-10-08 11:16:33 +03:00
ext_rw - > addr_lo = rdma_wr ( wr ) - > remote_addr ;
ext_rw - > addr_hi = upper_32_bits ( rdma_wr ( wr ) - > remote_addr ) ;
ext_rw - > lrkey = rdma_wr ( wr ) - > rkey ;
2012-03-21 02:39:06 +04:00
ext_rw - > len = hdr - > total_len ;
}
2013-08-26 13:57:39 +04:00
static int get_encoded_page_size ( int pg_sz )
{
/* Max size is 256M 4096 << 16 */
int i = 0 ;
for ( ; i < 17 ; i + + )
if ( pg_sz = = ( 4096 < < i ) )
break ;
return i ;
}
2015-10-13 19:11:28 +03:00
static int ocrdma_build_reg ( struct ocrdma_qp * qp ,
struct ocrdma_hdr_wqe * hdr ,
2018-07-18 19:25:14 +03:00
const struct ib_reg_wr * wr )
2015-10-13 19:11:28 +03:00
{
u64 fbo ;
struct ocrdma_ewqe_fr * fast_reg = ( struct ocrdma_ewqe_fr * ) ( hdr + 1 ) ;
struct ocrdma_mr * mr = get_ocrdma_mr ( wr - > mr ) ;
struct ocrdma_pbl * pbl_tbl = mr - > hwmr . pbl_table ;
struct ocrdma_pbe * pbe ;
u32 wqe_size = sizeof ( * fast_reg ) + sizeof ( * hdr ) ;
int num_pbes = 0 , i ;
wqe_size = roundup ( wqe_size , OCRDMA_WQE_ALIGN_BYTES ) ;
hdr - > cw | = ( OCRDMA_FR_MR < < OCRDMA_WQE_OPCODE_SHIFT ) ;
hdr - > cw | = ( ( wqe_size / OCRDMA_WQE_STRIDE ) < < OCRDMA_WQE_SIZE_SHIFT ) ;
if ( wr - > access & IB_ACCESS_LOCAL_WRITE )
hdr - > rsvd_lkey_flags | = OCRDMA_LKEY_FLAG_LOCAL_WR ;
if ( wr - > access & IB_ACCESS_REMOTE_WRITE )
hdr - > rsvd_lkey_flags | = OCRDMA_LKEY_FLAG_REMOTE_WR ;
if ( wr - > access & IB_ACCESS_REMOTE_READ )
hdr - > rsvd_lkey_flags | = OCRDMA_LKEY_FLAG_REMOTE_RD ;
hdr - > lkey = wr - > key ;
hdr - > total_len = mr - > ibmr . length ;
fbo = mr - > ibmr . iova - mr - > pages [ 0 ] ;
fast_reg - > va_hi = upper_32_bits ( mr - > ibmr . iova ) ;
fast_reg - > va_lo = ( u32 ) ( mr - > ibmr . iova & 0xffffffff ) ;
fast_reg - > fbo_hi = upper_32_bits ( fbo ) ;
fast_reg - > fbo_lo = ( u32 ) fbo & 0xffffffff ;
fast_reg - > num_sges = mr - > npages ;
fast_reg - > size_sge = get_encoded_page_size ( mr - > ibmr . page_size ) ;
pbe = pbl_tbl - > va ;
for ( i = 0 ; i < mr - > npages ; i + + ) {
u64 buf_addr = mr - > pages [ i ] ;
pbe - > pa_lo = cpu_to_le32 ( ( u32 ) ( buf_addr & PAGE_MASK ) ) ;
pbe - > pa_hi = cpu_to_le32 ( ( u32 ) upper_32_bits ( buf_addr ) ) ;
num_pbes + = 1 ;
pbe + + ;
/* if the pbl is full storing the pbes,
* move to next pbl .
*/
if ( num_pbes = = ( mr - > hwmr . pbl_size / sizeof ( u64 ) ) ) {
pbl_tbl + + ;
pbe = ( struct ocrdma_pbe * ) pbl_tbl - > va ;
}
}
return 0 ;
}
2013-08-26 13:57:39 +04:00
2012-03-21 02:39:06 +04:00
static void ocrdma_ring_sq_db ( struct ocrdma_qp * qp )
{
2014-02-04 10:26:55 +04:00
u32 val = qp - > sq . dbid | ( 1 < < OCRDMA_DB_SQ_SHIFT ) ;
2012-03-21 02:39:06 +04:00
iowrite32 ( val , qp - > sq_db ) ;
}
2018-07-18 19:25:32 +03:00
int ocrdma_post_send ( struct ib_qp * ibqp , const struct ib_send_wr * wr ,
const struct ib_send_wr * * bad_wr )
2012-03-21 02:39:06 +04:00
{
int status = 0 ;
struct ocrdma_qp * qp = get_ocrdma_qp ( ibqp ) ;
struct ocrdma_hdr_wqe * hdr ;
unsigned long flags ;
spin_lock_irqsave ( & qp - > q_lock , flags ) ;
if ( qp - > state ! = OCRDMA_QPS_RTS & & qp - > state ! = OCRDMA_QPS_SQD ) {
spin_unlock_irqrestore ( & qp - > q_lock , flags ) ;
2013-06-10 08:42:40 +04:00
* bad_wr = wr ;
2012-03-21 02:39:06 +04:00
return - EINVAL ;
}
while ( wr ) {
2014-06-10 18:02:20 +04:00
if ( qp - > qp_type = = IB_QPT_UD & &
( wr - > opcode ! = IB_WR_SEND & &
wr - > opcode ! = IB_WR_SEND_WITH_IMM ) ) {
* bad_wr = wr ;
status = - EINVAL ;
break ;
}
2012-03-21 02:39:06 +04:00
if ( ocrdma_hwq_free_cnt ( & qp - > sq ) = = 0 | |
wr - > num_sge > qp - > sq . max_sges ) {
2013-06-10 08:42:40 +04:00
* bad_wr = wr ;
2012-03-21 02:39:06 +04:00
status = - ENOMEM ;
break ;
}
hdr = ocrdma_hwq_head ( & qp - > sq ) ;
hdr - > cw = 0 ;
2013-08-26 13:57:43 +04:00
if ( wr - > send_flags & IB_SEND_SIGNALED | | qp - > signaled )
2012-03-21 02:39:06 +04:00
hdr - > cw | = ( OCRDMA_FLAG_SIG < < OCRDMA_WQE_FLAGS_SHIFT ) ;
if ( wr - > send_flags & IB_SEND_FENCE )
hdr - > cw | =
( OCRDMA_FLAG_FENCE_L < < OCRDMA_WQE_FLAGS_SHIFT ) ;
if ( wr - > send_flags & IB_SEND_SOLICITED )
hdr - > cw | =
( OCRDMA_FLAG_SOLICIT < < OCRDMA_WQE_FLAGS_SHIFT ) ;
hdr - > total_len = 0 ;
switch ( wr - > opcode ) {
case IB_WR_SEND_WITH_IMM :
hdr - > cw | = ( OCRDMA_FLAG_IMM < < OCRDMA_WQE_FLAGS_SHIFT ) ;
hdr - > immdt = ntohl ( wr - > ex . imm_data ) ;
2017-10-11 20:49:14 +03:00
/* fall through */
2012-03-21 02:39:06 +04:00
case IB_WR_SEND :
hdr - > cw | = ( OCRDMA_SEND < < OCRDMA_WQE_OPCODE_SHIFT ) ;
ocrdma_build_send ( qp , hdr , wr ) ;
break ;
case IB_WR_SEND_WITH_INV :
hdr - > cw | = ( OCRDMA_FLAG_INV < < OCRDMA_WQE_FLAGS_SHIFT ) ;
hdr - > cw | = ( OCRDMA_SEND < < OCRDMA_WQE_OPCODE_SHIFT ) ;
hdr - > lkey = wr - > ex . invalidate_rkey ;
status = ocrdma_build_send ( qp , hdr , wr ) ;
break ;
case IB_WR_RDMA_WRITE_WITH_IMM :
hdr - > cw | = ( OCRDMA_FLAG_IMM < < OCRDMA_WQE_FLAGS_SHIFT ) ;
hdr - > immdt = ntohl ( wr - > ex . imm_data ) ;
2017-10-11 20:49:14 +03:00
/* fall through */
2012-03-21 02:39:06 +04:00
case IB_WR_RDMA_WRITE :
hdr - > cw | = ( OCRDMA_WRITE < < OCRDMA_WQE_OPCODE_SHIFT ) ;
status = ocrdma_build_write ( qp , hdr , wr ) ;
break ;
case IB_WR_RDMA_READ :
ocrdma_build_read ( qp , hdr , wr ) ;
break ;
case IB_WR_LOCAL_INV :
hdr - > cw | =
( OCRDMA_LKEY_INV < < OCRDMA_WQE_OPCODE_SHIFT ) ;
2013-08-26 13:57:39 +04:00
hdr - > cw | = ( ( sizeof ( struct ocrdma_hdr_wqe ) +
sizeof ( struct ocrdma_sge ) ) /
2012-03-21 02:39:06 +04:00
OCRDMA_WQE_STRIDE ) < < OCRDMA_WQE_SIZE_SHIFT ;
hdr - > lkey = wr - > ex . invalidate_rkey ;
break ;
2015-10-13 19:11:28 +03:00
case IB_WR_REG_MR :
status = ocrdma_build_reg ( qp , hdr , reg_wr ( wr ) ) ;
break ;
2012-03-21 02:39:06 +04:00
default :
status = - EINVAL ;
break ;
}
if ( status ) {
* bad_wr = wr ;
break ;
}
2013-08-26 13:57:43 +04:00
if ( wr - > send_flags & IB_SEND_SIGNALED | | qp - > signaled )
2012-03-21 02:39:06 +04:00
qp - > wqe_wr_id_tbl [ qp - > sq . head ] . signaled = 1 ;
else
qp - > wqe_wr_id_tbl [ qp - > sq . head ] . signaled = 0 ;
qp - > wqe_wr_id_tbl [ qp - > sq . head ] . wrid = wr - > wr_id ;
ocrdma_cpu_to_le32 ( hdr , ( ( hdr - > cw > > OCRDMA_WQE_SIZE_SHIFT ) &
OCRDMA_WQE_SIZE_MASK ) * OCRDMA_WQE_STRIDE ) ;
/* make sure wqe is written before adapter can access it */
wmb ( ) ;
/* inform hw to start processing it */
ocrdma_ring_sq_db ( qp ) ;
/* update pointer, counter for next wr */
ocrdma_hwq_inc_head ( & qp - > sq ) ;
wr = wr - > next ;
}
spin_unlock_irqrestore ( & qp - > q_lock , flags ) ;
return status ;
}
static void ocrdma_ring_rq_db ( struct ocrdma_qp * qp )
{
2014-02-04 10:26:55 +04:00
u32 val = qp - > rq . dbid | ( 1 < < OCRDMA_DB_RQ_SHIFT ) ;
2012-03-21 02:39:06 +04:00
2014-02-04 10:26:55 +04:00
iowrite32 ( val , qp - > rq_db ) ;
2012-03-21 02:39:06 +04:00
}
2018-07-18 19:25:32 +03:00
static void ocrdma_build_rqe ( struct ocrdma_hdr_wqe * rqe ,
const struct ib_recv_wr * wr , u16 tag )
2012-03-21 02:39:06 +04:00
{
u32 wqe_size = 0 ;
struct ocrdma_sge * sge ;
if ( wr - > num_sge )
wqe_size = ( wr - > num_sge * sizeof ( * sge ) ) + sizeof ( * rqe ) ;
else
wqe_size = sizeof ( * sge ) + sizeof ( * rqe ) ;
rqe - > cw = ( ( wqe_size / OCRDMA_WQE_STRIDE ) < <
OCRDMA_WQE_SIZE_SHIFT ) ;
rqe - > cw | = ( OCRDMA_FLAG_SIG < < OCRDMA_WQE_FLAGS_SHIFT ) ;
rqe - > cw | = ( OCRDMA_TYPE_LKEY < < OCRDMA_WQE_TYPE_SHIFT ) ;
rqe - > total_len = 0 ;
rqe - > rsvd_tag = tag ;
sge = ( struct ocrdma_sge * ) ( rqe + 1 ) ;
ocrdma_build_sges ( rqe , sge , wr - > num_sge , wr - > sg_list ) ;
ocrdma_cpu_to_le32 ( rqe , wqe_size ) ;
}
2018-07-18 19:25:32 +03:00
int ocrdma_post_recv ( struct ib_qp * ibqp , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2012-03-21 02:39:06 +04:00
{
int status = 0 ;
unsigned long flags ;
struct ocrdma_qp * qp = get_ocrdma_qp ( ibqp ) ;
struct ocrdma_hdr_wqe * rqe ;
spin_lock_irqsave ( & qp - > q_lock , flags ) ;
if ( qp - > state = = OCRDMA_QPS_RST | | qp - > state = = OCRDMA_QPS_ERR ) {
spin_unlock_irqrestore ( & qp - > q_lock , flags ) ;
* bad_wr = wr ;
return - EINVAL ;
}
while ( wr ) {
if ( ocrdma_hwq_free_cnt ( & qp - > rq ) = = 0 | |
wr - > num_sge > qp - > rq . max_sges ) {
* bad_wr = wr ;
status = - ENOMEM ;
break ;
}
rqe = ocrdma_hwq_head ( & qp - > rq ) ;
ocrdma_build_rqe ( rqe , wr , 0 ) ;
qp - > rqe_wr_id_tbl [ qp - > rq . head ] = wr - > wr_id ;
/* make sure rqe is written before adapter can access it */
wmb ( ) ;
/* inform hw to start processing it */
ocrdma_ring_rq_db ( qp ) ;
/* update pointer, counter for next wr */
ocrdma_hwq_inc_head ( & qp - > rq ) ;
wr = wr - > next ;
}
spin_unlock_irqrestore ( & qp - > q_lock , flags ) ;
return status ;
}
/* cqe for srq's rqe can potentially arrive out of order.
* index gives the entry in the shadow table where to store
* the wr_id . tag / index is returned in cqe to reference back
* for a given rqe .
*/
static int ocrdma_srq_get_idx ( struct ocrdma_srq * srq )
{
int row = 0 ;
int indx = 0 ;
for ( row = 0 ; row < srq - > bit_fields_len ; row + + ) {
if ( srq - > idx_bit_fields [ row ] ) {
indx = ffs ( srq - > idx_bit_fields [ row ] ) ;
indx = ( row * 32 ) + ( indx - 1 ) ;
2016-12-24 19:20:06 +03:00
BUG_ON ( indx > = srq - > rq . max_cnt ) ;
2012-03-21 02:39:06 +04:00
ocrdma_srq_toggle_bit ( srq , indx ) ;
break ;
}
}
2016-12-24 19:20:06 +03:00
BUG_ON ( row = = srq - > bit_fields_len ) ;
2014-02-04 10:27:03 +04:00
return indx + 1 ; /* Use from index 1 */
2012-03-21 02:39:06 +04:00
}
static void ocrdma_ring_srq_db ( struct ocrdma_srq * srq )
{
u32 val = srq - > rq . dbid | ( 1 < < 16 ) ;
iowrite32 ( val , srq - > db + OCRDMA_DB_GEN2_SRQ_OFFSET ) ;
}
2018-07-18 19:25:32 +03:00
int ocrdma_post_srq_recv ( struct ib_srq * ibsrq , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2012-03-21 02:39:06 +04:00
{
int status = 0 ;
unsigned long flags ;
struct ocrdma_srq * srq ;
struct ocrdma_hdr_wqe * rqe ;
u16 tag ;
srq = get_ocrdma_srq ( ibsrq ) ;
spin_lock_irqsave ( & srq - > q_lock , flags ) ;
while ( wr ) {
if ( ocrdma_hwq_free_cnt ( & srq - > rq ) = = 0 | |
wr - > num_sge > srq - > rq . max_sges ) {
status = - ENOMEM ;
* bad_wr = wr ;
break ;
}
tag = ocrdma_srq_get_idx ( srq ) ;
rqe = ocrdma_hwq_head ( & srq - > rq ) ;
ocrdma_build_rqe ( rqe , wr , tag ) ;
srq - > rqe_wr_id_tbl [ tag ] = wr - > wr_id ;
/* make sure rqe is written before adapter can perform DMA */
wmb ( ) ;
/* inform hw to start processing it */
ocrdma_ring_srq_db ( srq ) ;
/* update pointer, counter for next wr */
ocrdma_hwq_inc_head ( & srq - > rq ) ;
wr = wr - > next ;
}
spin_unlock_irqrestore ( & srq - > q_lock , flags ) ;
return status ;
}
static enum ib_wc_status ocrdma_to_ibwc_err ( u16 status )
{
2013-08-07 11:22:32 +04:00
enum ib_wc_status ibwc_status ;
2012-03-21 02:39:06 +04:00
switch ( status ) {
case OCRDMA_CQE_GENERAL_ERR :
ibwc_status = IB_WC_GENERAL_ERR ;
break ;
case OCRDMA_CQE_LOC_LEN_ERR :
ibwc_status = IB_WC_LOC_LEN_ERR ;
break ;
case OCRDMA_CQE_LOC_QP_OP_ERR :
ibwc_status = IB_WC_LOC_QP_OP_ERR ;
break ;
case OCRDMA_CQE_LOC_EEC_OP_ERR :
ibwc_status = IB_WC_LOC_EEC_OP_ERR ;
break ;
case OCRDMA_CQE_LOC_PROT_ERR :
ibwc_status = IB_WC_LOC_PROT_ERR ;
break ;
case OCRDMA_CQE_WR_FLUSH_ERR :
ibwc_status = IB_WC_WR_FLUSH_ERR ;
break ;
case OCRDMA_CQE_MW_BIND_ERR :
ibwc_status = IB_WC_MW_BIND_ERR ;
break ;
case OCRDMA_CQE_BAD_RESP_ERR :
ibwc_status = IB_WC_BAD_RESP_ERR ;
break ;
case OCRDMA_CQE_LOC_ACCESS_ERR :
ibwc_status = IB_WC_LOC_ACCESS_ERR ;
break ;
case OCRDMA_CQE_REM_INV_REQ_ERR :
ibwc_status = IB_WC_REM_INV_REQ_ERR ;
break ;
case OCRDMA_CQE_REM_ACCESS_ERR :
ibwc_status = IB_WC_REM_ACCESS_ERR ;
break ;
case OCRDMA_CQE_REM_OP_ERR :
ibwc_status = IB_WC_REM_OP_ERR ;
break ;
case OCRDMA_CQE_RETRY_EXC_ERR :
ibwc_status = IB_WC_RETRY_EXC_ERR ;
break ;
case OCRDMA_CQE_RNR_RETRY_EXC_ERR :
ibwc_status = IB_WC_RNR_RETRY_EXC_ERR ;
break ;
case OCRDMA_CQE_LOC_RDD_VIOL_ERR :
ibwc_status = IB_WC_LOC_RDD_VIOL_ERR ;
break ;
case OCRDMA_CQE_REM_INV_RD_REQ_ERR :
ibwc_status = IB_WC_REM_INV_RD_REQ_ERR ;
break ;
case OCRDMA_CQE_REM_ABORT_ERR :
ibwc_status = IB_WC_REM_ABORT_ERR ;
break ;
case OCRDMA_CQE_INV_EECN_ERR :
ibwc_status = IB_WC_INV_EECN_ERR ;
break ;
case OCRDMA_CQE_INV_EEC_STATE_ERR :
ibwc_status = IB_WC_INV_EEC_STATE_ERR ;
break ;
case OCRDMA_CQE_FATAL_ERR :
ibwc_status = IB_WC_FATAL_ERR ;
break ;
case OCRDMA_CQE_RESP_TIMEOUT_ERR :
ibwc_status = IB_WC_RESP_TIMEOUT_ERR ;
break ;
default :
ibwc_status = IB_WC_GENERAL_ERR ;
break ;
2013-10-09 03:07:22 +04:00
}
2012-03-21 02:39:06 +04:00
return ibwc_status ;
}
static void ocrdma_update_wc ( struct ocrdma_qp * qp , struct ib_wc * ibwc ,
u32 wqe_idx )
{
struct ocrdma_hdr_wqe * hdr ;
struct ocrdma_sge * rw ;
int opcode ;
hdr = ocrdma_hwq_head_from_idx ( & qp - > sq , wqe_idx ) ;
ibwc - > wr_id = qp - > wqe_wr_id_tbl [ wqe_idx ] . wrid ;
/* Undo the hdr->cw swap */
opcode = le32_to_cpu ( hdr - > cw ) & OCRDMA_WQE_OPCODE_MASK ;
switch ( opcode ) {
case OCRDMA_WRITE :
ibwc - > opcode = IB_WC_RDMA_WRITE ;
break ;
case OCRDMA_READ :
rw = ( struct ocrdma_sge * ) ( hdr + 1 ) ;
ibwc - > opcode = IB_WC_RDMA_READ ;
ibwc - > byte_len = rw - > len ;
break ;
case OCRDMA_SEND :
ibwc - > opcode = IB_WC_SEND ;
break ;
2013-08-26 13:57:39 +04:00
case OCRDMA_FR_MR :
2015-10-13 19:11:44 +03:00
ibwc - > opcode = IB_WC_REG_MR ;
2013-08-26 13:57:39 +04:00
break ;
2012-03-21 02:39:06 +04:00
case OCRDMA_LKEY_INV :
ibwc - > opcode = IB_WC_LOCAL_INV ;
break ;
default :
ibwc - > status = IB_WC_GENERAL_ERR ;
2013-06-10 08:42:39 +04:00
pr_err ( " %s() invalid opcode received = 0x%x \n " ,
__func__ , hdr - > cw & OCRDMA_WQE_OPCODE_MASK ) ;
2012-03-21 02:39:06 +04:00
break ;
2013-10-09 03:07:22 +04:00
}
2012-03-21 02:39:06 +04:00
}
static void ocrdma_set_cqe_status_flushed ( struct ocrdma_qp * qp ,
struct ocrdma_cqe * cqe )
{
if ( is_cqe_for_sq ( cqe ) ) {
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) &
~ OCRDMA_CQE_STATUS_MASK ) ;
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) |
( OCRDMA_CQE_WR_FLUSH_ERR < <
OCRDMA_CQE_STATUS_SHIFT ) ) ;
} else {
if ( qp - > qp_type = = IB_QPT_UD | | qp - > qp_type = = IB_QPT_GSI ) {
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) &
~ OCRDMA_CQE_UD_STATUS_MASK ) ;
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) |
( OCRDMA_CQE_WR_FLUSH_ERR < <
OCRDMA_CQE_UD_STATUS_SHIFT ) ) ;
} else {
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) &
~ OCRDMA_CQE_STATUS_MASK ) ;
cqe - > flags_status_srcqpn = cpu_to_le32 ( le32_to_cpu (
cqe - > flags_status_srcqpn ) |
( OCRDMA_CQE_WR_FLUSH_ERR < <
OCRDMA_CQE_STATUS_SHIFT ) ) ;
}
}
}
static bool ocrdma_update_err_cqe ( struct ib_wc * ibwc , struct ocrdma_cqe * cqe ,
struct ocrdma_qp * qp , int status )
{
bool expand = false ;
ibwc - > byte_len = 0 ;
ibwc - > qp = & qp - > ibqp ;
ibwc - > status = ocrdma_to_ibwc_err ( status ) ;
ocrdma_flush_qp ( qp ) ;
2013-08-07 11:22:35 +04:00
ocrdma_qp_state_change ( qp , IB_QPS_ERR , NULL ) ;
2012-03-21 02:39:06 +04:00
/* if wqe/rqe pending for which cqe needs to be returned,
* trigger inflating it .
*/
if ( ! is_hw_rq_empty ( qp ) | | ! is_hw_sq_empty ( qp ) ) {
expand = true ;
ocrdma_set_cqe_status_flushed ( qp , cqe ) ;
}
return expand ;
}
static int ocrdma_update_err_rcqe ( struct ib_wc * ibwc , struct ocrdma_cqe * cqe ,
struct ocrdma_qp * qp , int status )
{
ibwc - > opcode = IB_WC_RECV ;
ibwc - > wr_id = qp - > rqe_wr_id_tbl [ qp - > rq . tail ] ;
ocrdma_hwq_inc_tail ( & qp - > rq ) ;
return ocrdma_update_err_cqe ( ibwc , cqe , qp , status ) ;
}
static int ocrdma_update_err_scqe ( struct ib_wc * ibwc , struct ocrdma_cqe * cqe ,
struct ocrdma_qp * qp , int status )
{
ocrdma_update_wc ( qp , ibwc , qp - > sq . tail ) ;
ocrdma_hwq_inc_tail ( & qp - > sq ) ;
return ocrdma_update_err_cqe ( ibwc , cqe , qp , status ) ;
}
static bool ocrdma_poll_err_scqe ( struct ocrdma_qp * qp ,
struct ocrdma_cqe * cqe , struct ib_wc * ibwc ,
bool * polled , bool * stop )
{
bool expand ;
2014-12-18 11:42:59 +03:00
struct ocrdma_dev * dev = get_ocrdma_dev ( qp - > ibqp . device ) ;
2012-03-21 02:39:06 +04:00
int status = ( le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_STATUS_MASK ) > > OCRDMA_CQE_STATUS_SHIFT ;
2014-12-18 11:42:59 +03:00
if ( status < OCRDMA_MAX_CQE_ERR )
atomic_inc ( & dev - > cqe_err_stats [ status ] ) ;
2012-03-21 02:39:06 +04:00
/* when hw sq is empty, but rq is not empty, so we continue
* to keep the cqe in order to get the cq event again .
*/
if ( is_hw_sq_empty ( qp ) & & ! is_hw_rq_empty ( qp ) ) {
/* when cq for rq and sq is same, it is safe to return
* flush cqe for RQEs .
*/
if ( ! qp - > srq & & ( qp - > sq_cq = = qp - > rq_cq ) ) {
* polled = true ;
status = OCRDMA_CQE_WR_FLUSH_ERR ;
expand = ocrdma_update_err_rcqe ( ibwc , cqe , qp , status ) ;
} else {
/* stop processing further cqe as this cqe is used for
* triggering cq event on buddy cq of RQ .
* When QP is destroyed , this cqe will be removed
* from the cq ' s hardware q .
*/
* polled = false ;
* stop = true ;
expand = false ;
}
2014-06-10 18:02:19 +04:00
} else if ( is_hw_sq_empty ( qp ) ) {
/* Do nothing */
expand = false ;
* polled = false ;
* stop = false ;
2012-03-21 02:39:06 +04:00
} else {
* polled = true ;
expand = ocrdma_update_err_scqe ( ibwc , cqe , qp , status ) ;
}
return expand ;
}
static bool ocrdma_poll_success_scqe ( struct ocrdma_qp * qp ,
struct ocrdma_cqe * cqe ,
struct ib_wc * ibwc , bool * polled )
{
bool expand = false ;
int tail = qp - > sq . tail ;
u32 wqe_idx ;
if ( ! qp - > wqe_wr_id_tbl [ tail ] . signaled ) {
* polled = false ; /* WC cannot be consumed yet */
} else {
ibwc - > status = IB_WC_SUCCESS ;
ibwc - > wc_flags = 0 ;
ibwc - > qp = & qp - > ibqp ;
ocrdma_update_wc ( qp , ibwc , tail ) ;
* polled = true ;
}
2013-08-26 13:57:38 +04:00
wqe_idx = ( le32_to_cpu ( cqe - > wq . wqeidx ) &
OCRDMA_CQE_WQEIDX_MASK ) & qp - > sq . max_wqe_idx ;
2012-08-17 18:45:33 +04:00
if ( tail ! = wqe_idx )
expand = true ; /* Coalesced CQE can't be consumed yet */
2012-03-21 02:39:06 +04:00
ocrdma_hwq_inc_tail ( & qp - > sq ) ;
return expand ;
}
static bool ocrdma_poll_scqe ( struct ocrdma_qp * qp , struct ocrdma_cqe * cqe ,
struct ib_wc * ibwc , bool * polled , bool * stop )
{
int status ;
bool expand ;
status = ( le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_STATUS_MASK ) > > OCRDMA_CQE_STATUS_SHIFT ;
if ( status = = OCRDMA_CQE_SUCCESS )
expand = ocrdma_poll_success_scqe ( qp , cqe , ibwc , polled ) ;
else
expand = ocrdma_poll_err_scqe ( qp , cqe , ibwc , polled , stop ) ;
return expand ;
}
2016-01-28 16:59:57 +03:00
static int ocrdma_update_ud_rcqe ( struct ocrdma_dev * dev , struct ib_wc * ibwc ,
struct ocrdma_cqe * cqe )
2012-03-21 02:39:06 +04:00
{
int status ;
2016-01-28 16:59:57 +03:00
u16 hdr_type = 0 ;
2012-03-21 02:39:06 +04:00
status = ( le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_UD_STATUS_MASK ) > > OCRDMA_CQE_UD_STATUS_SHIFT ;
ibwc - > src_qp = le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_SRCQP_MASK ;
2016-02-05 17:36:39 +03:00
ibwc - > pkey_index = 0 ;
2012-03-21 02:39:06 +04:00
ibwc - > wc_flags = IB_WC_GRH ;
ibwc - > byte_len = ( le32_to_cpu ( cqe - > ud . rxlen_pkey ) > >
2016-01-28 16:59:57 +03:00
OCRDMA_CQE_UD_XFER_LEN_SHIFT ) &
OCRDMA_CQE_UD_XFER_LEN_MASK ;
if ( ocrdma_is_udp_encap_supported ( dev ) ) {
hdr_type = ( le32_to_cpu ( cqe - > ud . rxlen_pkey ) > >
OCRDMA_CQE_UD_L3TYPE_SHIFT ) &
OCRDMA_CQE_UD_L3TYPE_MASK ;
ibwc - > wc_flags | = IB_WC_WITH_NETWORK_HDR_TYPE ;
ibwc - > network_hdr_type = hdr_type ;
}
2012-03-21 02:39:06 +04:00
return status ;
}
static void ocrdma_update_free_srq_cqe ( struct ib_wc * ibwc ,
struct ocrdma_cqe * cqe ,
struct ocrdma_qp * qp )
{
unsigned long flags ;
struct ocrdma_srq * srq ;
u32 wqe_idx ;
srq = get_ocrdma_srq ( qp - > ibqp . srq ) ;
2013-08-26 13:57:38 +04:00
wqe_idx = ( le32_to_cpu ( cqe - > rq . buftag_qpn ) > >
2014-02-04 10:27:03 +04:00
OCRDMA_CQE_BUFTAG_SHIFT ) & srq - > rq . max_wqe_idx ;
2016-12-24 19:20:06 +03:00
BUG_ON ( wqe_idx < 1 ) ;
2014-02-04 10:27:03 +04:00
2012-03-21 02:39:06 +04:00
ibwc - > wr_id = srq - > rqe_wr_id_tbl [ wqe_idx ] ;
spin_lock_irqsave ( & srq - > q_lock , flags ) ;
2014-02-04 10:27:03 +04:00
ocrdma_srq_toggle_bit ( srq , wqe_idx - 1 ) ;
2012-03-21 02:39:06 +04:00
spin_unlock_irqrestore ( & srq - > q_lock , flags ) ;
ocrdma_hwq_inc_tail ( & srq - > rq ) ;
}
static bool ocrdma_poll_err_rcqe ( struct ocrdma_qp * qp , struct ocrdma_cqe * cqe ,
struct ib_wc * ibwc , bool * polled , bool * stop ,
int status )
{
bool expand ;
2014-12-18 11:42:59 +03:00
struct ocrdma_dev * dev = get_ocrdma_dev ( qp - > ibqp . device ) ;
if ( status < OCRDMA_MAX_CQE_ERR )
atomic_inc ( & dev - > cqe_err_stats [ status ] ) ;
2012-03-21 02:39:06 +04:00
/* when hw_rq is empty, but wq is not empty, so continue
* to keep the cqe to get the cq event again .
*/
if ( is_hw_rq_empty ( qp ) & & ! is_hw_sq_empty ( qp ) ) {
if ( ! qp - > srq & & ( qp - > sq_cq = = qp - > rq_cq ) ) {
* polled = true ;
status = OCRDMA_CQE_WR_FLUSH_ERR ;
expand = ocrdma_update_err_scqe ( ibwc , cqe , qp , status ) ;
} else {
* polled = false ;
* stop = true ;
expand = false ;
}
2014-06-10 18:02:19 +04:00
} else if ( is_hw_rq_empty ( qp ) ) {
/* Do nothing */
expand = false ;
* polled = false ;
* stop = false ;
2012-06-11 15:09:20 +04:00
} else {
* polled = true ;
2012-03-21 02:39:06 +04:00
expand = ocrdma_update_err_rcqe ( ibwc , cqe , qp , status ) ;
2012-06-11 15:09:20 +04:00
}
2012-03-21 02:39:06 +04:00
return expand ;
}
static void ocrdma_poll_success_rcqe ( struct ocrdma_qp * qp ,
struct ocrdma_cqe * cqe , struct ib_wc * ibwc )
{
2016-01-28 16:59:57 +03:00
struct ocrdma_dev * dev ;
dev = get_ocrdma_dev ( qp - > ibqp . device ) ;
2012-03-21 02:39:06 +04:00
ibwc - > opcode = IB_WC_RECV ;
ibwc - > qp = & qp - > ibqp ;
ibwc - > status = IB_WC_SUCCESS ;
if ( qp - > qp_type = = IB_QPT_UD | | qp - > qp_type = = IB_QPT_GSI )
2016-01-28 16:59:57 +03:00
ocrdma_update_ud_rcqe ( dev , ibwc , cqe ) ;
2012-03-21 02:39:06 +04:00
else
ibwc - > byte_len = le32_to_cpu ( cqe - > rq . rxlen ) ;
if ( is_cqe_imm ( cqe ) ) {
ibwc - > ex . imm_data = htonl ( le32_to_cpu ( cqe - > rq . lkey_immdt ) ) ;
ibwc - > wc_flags | = IB_WC_WITH_IMM ;
} else if ( is_cqe_wr_imm ( cqe ) ) {
ibwc - > opcode = IB_WC_RECV_RDMA_WITH_IMM ;
ibwc - > ex . imm_data = htonl ( le32_to_cpu ( cqe - > rq . lkey_immdt ) ) ;
ibwc - > wc_flags | = IB_WC_WITH_IMM ;
} else if ( is_cqe_invalidated ( cqe ) ) {
ibwc - > ex . invalidate_rkey = le32_to_cpu ( cqe - > rq . lkey_immdt ) ;
ibwc - > wc_flags | = IB_WC_WITH_INVALIDATE ;
}
2013-08-07 11:22:32 +04:00
if ( qp - > ibqp . srq ) {
2012-03-21 02:39:06 +04:00
ocrdma_update_free_srq_cqe ( ibwc , cqe , qp ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
ibwc - > wr_id = qp - > rqe_wr_id_tbl [ qp - > rq . tail ] ;
ocrdma_hwq_inc_tail ( & qp - > rq ) ;
}
}
static bool ocrdma_poll_rcqe ( struct ocrdma_qp * qp , struct ocrdma_cqe * cqe ,
struct ib_wc * ibwc , bool * polled , bool * stop )
{
int status ;
bool expand = false ;
ibwc - > wc_flags = 0 ;
2013-08-07 11:22:32 +04:00
if ( qp - > qp_type = = IB_QPT_UD | | qp - > qp_type = = IB_QPT_GSI ) {
2012-03-21 02:39:06 +04:00
status = ( le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_UD_STATUS_MASK ) > >
OCRDMA_CQE_UD_STATUS_SHIFT ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
status = ( le32_to_cpu ( cqe - > flags_status_srcqpn ) &
OCRDMA_CQE_STATUS_MASK ) > > OCRDMA_CQE_STATUS_SHIFT ;
2013-08-07 11:22:32 +04:00
}
2012-03-21 02:39:06 +04:00
if ( status = = OCRDMA_CQE_SUCCESS ) {
* polled = true ;
ocrdma_poll_success_rcqe ( qp , cqe , ibwc ) ;
} else {
expand = ocrdma_poll_err_rcqe ( qp , cqe , ibwc , polled , stop ,
status ) ;
}
return expand ;
}
static void ocrdma_change_cq_phase ( struct ocrdma_cq * cq , struct ocrdma_cqe * cqe ,
u16 cur_getp )
{
if ( cq - > phase_change ) {
if ( cur_getp = = 0 )
cq - > phase = ( ~ cq - > phase & OCRDMA_CQE_VALID ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
/* clear valid bit */
cqe - > flags_status_srcqpn = 0 ;
2013-08-07 11:22:32 +04:00
}
2012-03-21 02:39:06 +04:00
}
static int ocrdma_poll_hwcq ( struct ocrdma_cq * cq , int num_entries ,
struct ib_wc * ibwc )
{
u16 qpn = 0 ;
int i = 0 ;
bool expand = false ;
int polled_hw_cqes = 0 ;
struct ocrdma_qp * qp = NULL ;
2013-08-07 11:22:33 +04:00
struct ocrdma_dev * dev = get_ocrdma_dev ( cq - > ibcq . device ) ;
2012-03-21 02:39:06 +04:00
struct ocrdma_cqe * cqe ;
u16 cur_getp ; bool polled = false ; bool stop = false ;
cur_getp = cq - > getp ;
while ( num_entries ) {
cqe = cq - > va + cur_getp ;
/* check whether valid cqe or not */
if ( ! is_cqe_valid ( cq , cqe ) )
break ;
qpn = ( le32_to_cpu ( cqe - > cmn . qpn ) & OCRDMA_CQE_QPN_MASK ) ;
/* ignore discarded cqe */
if ( qpn = = 0 )
goto skip_cqe ;
qp = dev - > qp_tbl [ qpn ] ;
BUG_ON ( qp = = NULL ) ;
if ( is_cqe_for_sq ( cqe ) ) {
expand = ocrdma_poll_scqe ( qp , cqe , ibwc , & polled ,
& stop ) ;
} else {
expand = ocrdma_poll_rcqe ( qp , cqe , ibwc , & polled ,
& stop ) ;
}
if ( expand )
goto expand_cqe ;
if ( stop )
goto stop_cqe ;
/* clear qpn to avoid duplicate processing by discard_cqe() */
cqe - > cmn . qpn = 0 ;
skip_cqe :
polled_hw_cqes + = 1 ;
cur_getp = ( cur_getp + 1 ) % cq - > max_hw_cqe ;
ocrdma_change_cq_phase ( cq , cqe , cur_getp ) ;
expand_cqe :
if ( polled ) {
num_entries - = 1 ;
i + = 1 ;
ibwc = ibwc + 1 ;
polled = false ;
}
}
stop_cqe :
cq - > getp = cur_getp ;
2016-02-11 08:21:52 +03:00
if ( polled_hw_cqes )
ocrdma_ring_cq_db ( dev , cq - > id , false , false , polled_hw_cqes ) ;
2014-02-04 10:26:54 +04:00
2012-03-21 02:39:06 +04:00
return i ;
}
/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
static int ocrdma_add_err_cqe ( struct ocrdma_cq * cq , int num_entries ,
struct ocrdma_qp * qp , struct ib_wc * ibwc )
{
int err_cqes = 0 ;
while ( num_entries ) {
if ( is_hw_sq_empty ( qp ) & & is_hw_rq_empty ( qp ) )
break ;
if ( ! is_hw_sq_empty ( qp ) & & qp - > sq_cq = = cq ) {
ocrdma_update_wc ( qp , ibwc , qp - > sq . tail ) ;
ocrdma_hwq_inc_tail ( & qp - > sq ) ;
} else if ( ! is_hw_rq_empty ( qp ) & & qp - > rq_cq = = cq ) {
ibwc - > wr_id = qp - > rqe_wr_id_tbl [ qp - > rq . tail ] ;
ocrdma_hwq_inc_tail ( & qp - > rq ) ;
2013-08-07 11:22:32 +04:00
} else {
2012-03-21 02:39:06 +04:00
return err_cqes ;
2013-08-07 11:22:32 +04:00
}
2012-03-21 02:39:06 +04:00
ibwc - > byte_len = 0 ;
ibwc - > status = IB_WC_WR_FLUSH_ERR ;
ibwc = ibwc + 1 ;
err_cqes + = 1 ;
num_entries - = 1 ;
}
return err_cqes ;
}
int ocrdma_poll_cq ( struct ib_cq * ibcq , int num_entries , struct ib_wc * wc )
{
int cqes_to_poll = num_entries ;
2013-08-07 11:22:33 +04:00
struct ocrdma_cq * cq = get_ocrdma_cq ( ibcq ) ;
struct ocrdma_dev * dev = get_ocrdma_dev ( ibcq - > device ) ;
2012-03-21 02:39:06 +04:00
int num_os_cqe = 0 , err_cqes = 0 ;
struct ocrdma_qp * qp ;
2013-08-07 11:22:33 +04:00
unsigned long flags ;
2012-03-21 02:39:06 +04:00
/* poll cqes from adapter CQ */
spin_lock_irqsave ( & cq - > cq_lock , flags ) ;
num_os_cqe = ocrdma_poll_hwcq ( cq , cqes_to_poll , wc ) ;
spin_unlock_irqrestore ( & cq - > cq_lock , flags ) ;
cqes_to_poll - = num_os_cqe ;
if ( cqes_to_poll ) {
wc = wc + num_os_cqe ;
/* adapter returns single error cqe when qp moves to
* error state . So insert error cqes with wc_status as
* FLUSHED for pending WQEs and RQEs of QP ' s SQ and RQ
* respectively which uses this CQ .
*/
spin_lock_irqsave ( & dev - > flush_q_lock , flags ) ;
list_for_each_entry ( qp , & cq - > sq_head , sq_entry ) {
if ( cqes_to_poll = = 0 )
break ;
err_cqes = ocrdma_add_err_cqe ( cq , cqes_to_poll , qp , wc ) ;
cqes_to_poll - = err_cqes ;
num_os_cqe + = err_cqes ;
wc = wc + err_cqes ;
}
spin_unlock_irqrestore ( & dev - > flush_q_lock , flags ) ;
}
return num_os_cqe ;
}
int ocrdma_arm_cq ( struct ib_cq * ibcq , enum ib_cq_notify_flags cq_flags )
{
2013-08-07 11:22:33 +04:00
struct ocrdma_cq * cq = get_ocrdma_cq ( ibcq ) ;
struct ocrdma_dev * dev = get_ocrdma_dev ( ibcq - > device ) ;
2012-03-21 02:39:06 +04:00
u16 cq_id ;
2013-08-07 11:22:33 +04:00
unsigned long flags ;
2014-02-04 10:26:54 +04:00
bool arm_needed = false , sol_needed = false ;
2012-03-21 02:39:06 +04:00
cq_id = cq - > id ;
spin_lock_irqsave ( & cq - > cq_lock , flags ) ;
if ( cq_flags & IB_CQ_NEXT_COMP | | cq_flags & IB_CQ_SOLICITED )
2014-02-04 10:26:54 +04:00
arm_needed = true ;
2012-03-21 02:39:06 +04:00
if ( cq_flags & IB_CQ_SOLICITED )
2014-02-04 10:26:54 +04:00
sol_needed = true ;
2012-03-21 02:39:06 +04:00
2016-02-11 08:21:52 +03:00
ocrdma_ring_cq_db ( dev , cq_id , arm_needed , sol_needed , 0 ) ;
2012-03-21 02:39:06 +04:00
spin_unlock_irqrestore ( & cq - > cq_lock , flags ) ;
2014-02-04 10:26:54 +04:00
2012-03-21 02:39:06 +04:00
return 0 ;
}
2013-08-26 13:57:39 +04:00
2019-03-31 19:10:05 +03:00
struct ib_mr * ocrdma_alloc_mr ( struct ib_pd * ibpd , enum ib_mr_type mr_type ,
u32 max_num_sg , struct ib_udata * udata )
2013-08-26 13:57:39 +04:00
{
int status ;
struct ocrdma_mr * mr ;
struct ocrdma_pd * pd = get_ocrdma_pd ( ibpd ) ;
struct ocrdma_dev * dev = get_ocrdma_dev ( ibpd - > device ) ;
2015-07-30 10:32:43 +03:00
if ( mr_type ! = IB_MR_TYPE_MEM_REG )
return ERR_PTR ( - EINVAL ) ;
if ( max_num_sg > dev - > attr . max_pages_per_frmr )
2013-08-26 13:57:39 +04:00
return ERR_PTR ( - EINVAL ) ;
mr = kzalloc ( sizeof ( * mr ) , GFP_KERNEL ) ;
if ( ! mr )
return ERR_PTR ( - ENOMEM ) ;
2015-10-13 19:11:28 +03:00
mr - > pages = kcalloc ( max_num_sg , sizeof ( u64 ) , GFP_KERNEL ) ;
if ( ! mr - > pages ) {
status = - ENOMEM ;
goto pl_err ;
}
2015-07-30 10:32:43 +03:00
status = ocrdma_get_pbl_info ( dev , mr , max_num_sg ) ;
2013-08-26 13:57:39 +04:00
if ( status )
goto pbl_err ;
mr - > hwmr . fr_mr = 1 ;
mr - > hwmr . remote_rd = 0 ;
mr - > hwmr . remote_wr = 0 ;
mr - > hwmr . local_rd = 0 ;
mr - > hwmr . local_wr = 0 ;
mr - > hwmr . mw_bind = 0 ;
status = ocrdma_build_pbl_tbl ( dev , & mr - > hwmr ) ;
if ( status )
goto pbl_err ;
status = ocrdma_reg_mr ( dev , & mr - > hwmr , pd - > id , 0 ) ;
if ( status )
goto mbx_err ;
mr - > ibmr . rkey = mr - > hwmr . lkey ;
mr - > ibmr . lkey = mr - > hwmr . lkey ;
2014-03-18 10:14:17 +04:00
dev - > stag_arr [ ( mr - > hwmr . lkey > > 8 ) & ( OCRDMA_MAX_STAG - 1 ) ] =
( unsigned long ) mr ;
2013-08-26 13:57:39 +04:00
return & mr - > ibmr ;
mbx_err :
ocrdma_free_mr_pbl_tbl ( dev , & mr - > hwmr ) ;
pbl_err :
2015-10-13 19:11:28 +03:00
kfree ( mr - > pages ) ;
pl_err :
2013-08-26 13:57:39 +04:00
kfree ( mr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2015-10-13 19:11:28 +03:00
static int ocrdma_set_page ( struct ib_mr * ibmr , u64 addr )
{
struct ocrdma_mr * mr = get_ocrdma_mr ( ibmr ) ;
if ( unlikely ( mr - > npages = = mr - > hwmr . num_pbes ) )
return - ENOMEM ;
mr - > pages [ mr - > npages + + ] = addr ;
return 0 ;
}
2016-05-03 19:01:04 +03:00
int ocrdma_map_mr_sg ( struct ib_mr * ibmr , struct scatterlist * sg , int sg_nents ,
2016-05-12 20:49:15 +03:00
unsigned int * sg_offset )
2015-10-13 19:11:28 +03:00
{
struct ocrdma_mr * mr = get_ocrdma_mr ( ibmr ) ;
mr - > npages = 0 ;
2016-05-03 19:01:04 +03:00
return ib_sg_to_pages ( ibmr , sg , sg_nents , sg_offset , ocrdma_set_page ) ;
2015-10-13 19:11:28 +03:00
}