2017-02-10 03:19:33 -08:00
/*
* Broadcom NetXtreme - E RoCE driver .
*
* Copyright ( c ) 2016 - 2017 , Broadcom . All rights reserved . The term
* Broadcom refers to Broadcom Limited and / or its subsidiaries .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* BSD license below :
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* 1. Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* 2. Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ` ` AS IS ' '
* AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT LIMITED TO ,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL , SPECIAL , EXEMPLARY , OR
* CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT LIMITED TO , PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE , DATA , OR PROFITS ; OR
* BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY ,
* WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT ( INCLUDING NEGLIGENCE
* OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE , EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
* Description : Slow Path Operators
*/
2018-08-10 11:42:46 -07:00
# define dev_fmt(fmt) "QPLIB: " fmt
2017-02-10 03:19:33 -08:00
# include <linux/interrupt.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/pci.h>
# include "roce_hsi.h"
# include "qplib_res.h"
# include "qplib_rcfw.h"
# include "qplib_sp.h"
const struct bnxt_qplib_gid bnxt_qplib_gid_zero = { { 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,
0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 } } ;
/* Device */
2017-06-29 12:28:16 -07:00
2018-01-11 11:52:08 -05:00
static void bnxt_qplib_query_version ( struct bnxt_qplib_rcfw * rcfw ,
char * fw_ver )
{
struct cmdq_query_version req ;
struct creq_query_version_resp resp ;
u16 cmd_flags = 0 ;
int rc = 0 ;
RCFW_CMD_PREP ( req , QUERY_VERSION , cmd_flags ) ;
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , 0 ) ;
if ( rc )
return ;
fw_ver [ 0 ] = resp . fw_maj ;
fw_ver [ 1 ] = resp . fw_minor ;
fw_ver [ 2 ] = resp . fw_bld ;
fw_ver [ 3 ] = resp . fw_rsvd ;
}
2017-02-10 03:19:33 -08:00
int bnxt_qplib_get_dev_attr ( struct bnxt_qplib_rcfw * rcfw ,
2018-01-11 11:52:07 -05:00
struct bnxt_qplib_dev_attr * attr , bool vf )
2017-02-10 03:19:33 -08:00
{
struct cmdq_query_func req ;
2017-05-22 03:15:31 -07:00
struct creq_query_func_resp resp ;
struct bnxt_qplib_rcfw_sbuf * sbuf ;
2017-02-10 03:19:33 -08:00
struct creq_query_func_resp_sb * sb ;
u16 cmd_flags = 0 ;
u32 temp ;
u8 * tqm_alloc ;
2017-05-22 03:15:31 -07:00
int i , rc = 0 ;
2017-02-10 03:19:33 -08:00
RCFW_CMD_PREP ( req , QUERY_FUNC , cmd_flags ) ;
2017-05-22 03:15:31 -07:00
sbuf = bnxt_qplib_rcfw_alloc_sbuf ( rcfw , sizeof ( * sb ) ) ;
if ( ! sbuf ) {
2017-02-10 03:19:33 -08:00
dev_err ( & rcfw - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SP: QUERY_FUNC alloc side buffer failed \n " ) ;
2017-05-22 03:15:31 -07:00
return - ENOMEM ;
2017-02-10 03:19:33 -08:00
}
2017-05-22 03:15:31 -07:00
sb = sbuf - > sb ;
req . resp_size = sizeof ( * sb ) / BNXT_QPLIB_CMDQE_UNITS ;
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp ,
( void * ) sbuf , 0 ) ;
if ( rc )
goto bail ;
2017-02-10 03:19:33 -08:00
/* Extract the context from the side buffer */
attr - > max_qp = le32_to_cpu ( sb - > max_qp ) ;
2017-06-29 12:28:12 -07:00
/* max_qp value reported by FW for PF doesn't include the QP1 for PF */
2018-01-11 11:52:07 -05:00
if ( ! vf )
attr - > max_qp + = 1 ;
2017-02-10 03:19:33 -08:00
attr - > max_qp_rd_atom =
sb - > max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb - > max_qp_rd_atom ;
attr - > max_qp_init_rd_atom =
sb - > max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb - > max_qp_init_rd_atom ;
attr - > max_qp_wqes = le16_to_cpu ( sb - > max_qp_wr ) ;
2017-06-14 03:26:23 -07:00
/*
* 128 WQEs needs to be reserved for the HW ( 8916 ) . Prevent
* reporting the max number
*/
attr - > max_qp_wqes - = BNXT_QPLIB_RESERVED_QP_WRS ;
2017-02-10 03:19:33 -08:00
attr - > max_qp_sges = sb - > max_sge ;
attr - > max_cq = le32_to_cpu ( sb - > max_cq ) ;
attr - > max_cq_wqes = le32_to_cpu ( sb - > max_cqe ) ;
attr - > max_cq_sges = attr - > max_qp_sges ;
attr - > max_mr = le32_to_cpu ( sb - > max_mr ) ;
attr - > max_mw = le32_to_cpu ( sb - > max_mw ) ;
attr - > max_mr_size = le64_to_cpu ( sb - > max_mr_size ) ;
attr - > max_pd = 64 * 1024 ;
attr - > max_raw_ethy_qp = le32_to_cpu ( sb - > max_raw_eth_qp ) ;
attr - > max_ah = le32_to_cpu ( sb - > max_ah ) ;
attr - > max_fmr = le32_to_cpu ( sb - > max_fmr ) ;
attr - > max_map_per_fmr = sb - > max_map_per_fmr ;
attr - > max_srq = le16_to_cpu ( sb - > max_srq ) ;
attr - > max_srq_wqes = le32_to_cpu ( sb - > max_srq_wr ) - 1 ;
attr - > max_srq_sges = sb - > max_srq_sge ;
attr - > max_pkey = le32_to_cpu ( sb - > max_pkeys ) ;
2018-10-08 03:28:02 -07:00
/*
* Some versions of FW reports more than 0xFFFF .
* Restrict it for now to 0xFFFF to avoid
* reporting trucated value
*/
if ( attr - > max_pkey > 0xFFFF ) {
/* ib_port_attr::pkey_tbl_len is u16 */
attr - > max_pkey = 0xFFFF ;
}
2017-02-10 03:19:33 -08:00
attr - > max_inline_data = le32_to_cpu ( sb - > max_inline_data ) ;
2018-02-26 01:51:38 -08:00
attr - > l2_db_size = ( sb - > l2_db_space_size + 1 ) *
( 0x01 < < RCFW_DBR_BASE_PAGE_SHIFT ) ;
2017-02-10 03:19:33 -08:00
attr - > max_sgid = le32_to_cpu ( sb - > max_gid ) ;
2018-01-11 11:52:08 -05:00
bnxt_qplib_query_version ( rcfw , attr - > fw_ver ) ;
2017-02-10 03:19:33 -08:00
for ( i = 0 ; i < MAX_TQM_ALLOC_REQ / 4 ; i + + ) {
temp = le32_to_cpu ( sb - > tqm_alloc_reqs [ i ] ) ;
tqm_alloc = ( u8 * ) & temp ;
attr - > tqm_alloc_reqs [ i * 4 ] = * tqm_alloc ;
attr - > tqm_alloc_reqs [ i * 4 + 1 ] = * ( + + tqm_alloc ) ;
attr - > tqm_alloc_reqs [ i * 4 + 2 ] = * ( + + tqm_alloc ) ;
attr - > tqm_alloc_reqs [ i * 4 + 3 ] = * ( + + tqm_alloc ) ;
}
2017-05-22 03:15:31 -07:00
2018-03-05 17:36:47 -06:00
attr - > is_atomic = false ;
2017-05-22 03:15:31 -07:00
bail :
bnxt_qplib_rcfw_free_sbuf ( rcfw , sbuf ) ;
return rc ;
2018-01-11 11:52:07 -05:00
}
int bnxt_qplib_set_func_resources ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_rcfw * rcfw ,
struct bnxt_qplib_ctx * ctx )
{
struct cmdq_set_func_resources req ;
struct creq_set_func_resources_resp resp ;
u16 cmd_flags = 0 ;
int rc = 0 ;
RCFW_CMD_PREP ( req , SET_FUNC_RESOURCES , cmd_flags ) ;
req . number_of_qp = cpu_to_le32 ( ctx - > qpc_count ) ;
req . number_of_mrw = cpu_to_le32 ( ctx - > mrw_count ) ;
req . number_of_srq = cpu_to_le32 ( ctx - > srqc_count ) ;
req . number_of_cq = cpu_to_le32 ( ctx - > cq_count ) ;
req . max_qp_per_vf = cpu_to_le32 ( ctx - > vf_res . max_qp_per_vf ) ;
req . max_mrw_per_vf = cpu_to_le32 ( ctx - > vf_res . max_mrw_per_vf ) ;
req . max_srq_per_vf = cpu_to_le32 ( ctx - > vf_res . max_srq_per_vf ) ;
req . max_cq_per_vf = cpu_to_le32 ( ctx - > vf_res . max_cq_per_vf ) ;
req . max_gid_per_vf = cpu_to_le32 ( ctx - > vf_res . max_gid_per_vf ) ;
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp ,
NULL , 0 ) ;
if ( rc ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " Failed to set function resources \n " ) ;
2018-01-11 11:52:07 -05:00
}
return rc ;
2017-02-10 03:19:33 -08:00
}
/* SGID */
int bnxt_qplib_get_sgid ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_sgid_tbl * sgid_tbl , int index ,
struct bnxt_qplib_gid * gid )
{
2018-07-04 12:57:11 +03:00
if ( index > = sgid_tbl - > max ) {
2017-02-10 03:19:33 -08:00
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" Index %d exceeded SGID table max (%d) \n " ,
2017-02-10 03:19:33 -08:00
index , sgid_tbl - > max ) ;
return - EINVAL ;
}
memcpy ( gid , & sgid_tbl - > tbl [ index ] , sizeof ( * gid ) ) ;
return 0 ;
}
int bnxt_qplib_del_sgid ( struct bnxt_qplib_sgid_tbl * sgid_tbl ,
struct bnxt_qplib_gid * gid , bool update )
{
struct bnxt_qplib_res * res = to_bnxt_qplib ( sgid_tbl ,
struct bnxt_qplib_res ,
sgid_tbl ) ;
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
int index ;
if ( ! sgid_tbl ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " SGID table not allocated \n " ) ;
2017-02-10 03:19:33 -08:00
return - EINVAL ;
}
/* Do we need a sgid_lock here? */
if ( ! sgid_tbl - > active ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " SGID table has no active entries \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
for ( index = 0 ; index < sgid_tbl - > max ; index + + ) {
if ( ! memcmp ( & sgid_tbl - > tbl [ index ] , gid , sizeof ( * gid ) ) )
break ;
}
if ( index = = sgid_tbl - > max ) {
2018-08-10 11:42:46 -07:00
dev_warn ( & res - > pdev - > dev , " GID not found in the SGID table \n " ) ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
/* Remove GID from the SGID table */
if ( update ) {
struct cmdq_delete_gid req ;
2017-05-22 03:15:31 -07:00
struct creq_delete_gid_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
RCFW_CMD_PREP ( req , DELETE_GID , cmd_flags ) ;
if ( sgid_tbl - > hw_id [ index ] = = 0xFFFF ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" GID entry contains an invalid HW id \n " ) ;
2017-02-10 03:19:33 -08:00
return - EINVAL ;
}
req . gid_index = cpu_to_le16 ( sgid_tbl - > hw_id [ index ] ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , 0 ) ;
if ( rc )
return rc ;
2017-02-10 03:19:33 -08:00
}
memcpy ( & sgid_tbl - > tbl [ index ] , & bnxt_qplib_gid_zero ,
sizeof ( bnxt_qplib_gid_zero ) ) ;
2017-06-29 12:28:10 -07:00
sgid_tbl - > vlan [ index ] = 0 ;
2017-02-10 03:19:33 -08:00
sgid_tbl - > active - - ;
dev_dbg ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SGID deleted hw_id[0x%x] = 0x%x active = 0x%x \n " ,
2017-02-10 03:19:33 -08:00
index , sgid_tbl - > hw_id [ index ] , sgid_tbl - > active ) ;
sgid_tbl - > hw_id [ index ] = ( u16 ) - 1 ;
/* unlock */
return 0 ;
}
int bnxt_qplib_add_sgid ( struct bnxt_qplib_sgid_tbl * sgid_tbl ,
struct bnxt_qplib_gid * gid , u8 * smac , u16 vlan_id ,
bool update , u32 * index )
{
struct bnxt_qplib_res * res = to_bnxt_qplib ( sgid_tbl ,
struct bnxt_qplib_res ,
sgid_tbl ) ;
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
2017-05-22 03:15:31 -07:00
int i , free_idx ;
2017-02-10 03:19:33 -08:00
if ( ! sgid_tbl ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " SGID table not allocated \n " ) ;
2017-02-10 03:19:33 -08:00
return - EINVAL ;
}
/* Do we need a sgid_lock here? */
if ( sgid_tbl - > active = = sgid_tbl - > max ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " SGID table is full \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
free_idx = sgid_tbl - > max ;
for ( i = 0 ; i < sgid_tbl - > max ; i + + ) {
if ( ! memcmp ( & sgid_tbl - > tbl [ i ] , gid , sizeof ( * gid ) ) ) {
dev_dbg ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SGID entry already exist in entry %d! \n " , i ) ;
2017-02-10 03:19:33 -08:00
* index = i ;
return - EALREADY ;
} else if ( ! memcmp ( & sgid_tbl - > tbl [ i ] , & bnxt_qplib_gid_zero ,
sizeof ( bnxt_qplib_gid_zero ) ) & &
free_idx = = sgid_tbl - > max ) {
free_idx = i ;
}
}
if ( free_idx = = sgid_tbl - > max ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SGID table is FULL but count is not MAX?? \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
if ( update ) {
struct cmdq_add_gid req ;
2017-05-22 03:15:31 -07:00
struct creq_add_gid_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
RCFW_CMD_PREP ( req , ADD_GID , cmd_flags ) ;
2017-06-29 12:28:10 -07:00
req . gid [ 0 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 3 ] ) ;
req . gid [ 1 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 2 ] ) ;
req . gid [ 2 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 1 ] ) ;
req . gid [ 3 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 0 ] ) ;
/*
* driver should ensure that all RoCE traffic is always VLAN
* tagged if RoCE traffic is running on non - zero VLAN ID or
* RoCE traffic is running on non - zero Priority .
*/
if ( ( vlan_id ! = 0xFFFF ) | | res - > prio ) {
if ( vlan_id ! = 0xFFFF )
req . vlan = cpu_to_le16
( vlan_id & CMDQ_ADD_GID_VLAN_VLAN_ID_MASK ) ;
req . vlan | = cpu_to_le16
( CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
CMDQ_ADD_GID_VLAN_VLAN_EN ) ;
}
2017-02-10 03:19:33 -08:00
/* MAC in network format */
2017-06-29 12:28:10 -07:00
req . src_mac [ 0 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 0 ] ) ;
req . src_mac [ 1 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 1 ] ) ;
req . src_mac [ 2 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 2 ] ) ;
2017-02-10 03:19:33 -08:00
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , 0 ) ;
if ( rc )
return rc ;
sgid_tbl - > hw_id [ free_idx ] = le32_to_cpu ( resp . xid ) ;
2017-02-10 03:19:33 -08:00
}
/* Add GID to the sgid_tbl */
memcpy ( & sgid_tbl - > tbl [ free_idx ] , gid , sizeof ( * gid ) ) ;
sgid_tbl - > active + + ;
2017-06-29 12:28:10 -07:00
if ( vlan_id ! = 0xFFFF )
sgid_tbl - > vlan [ free_idx ] = 1 ;
2017-02-10 03:19:33 -08:00
dev_dbg ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SGID added hw_id[0x%x] = 0x%x active = 0x%x \n " ,
2017-02-10 03:19:33 -08:00
free_idx , sgid_tbl - > hw_id [ free_idx ] , sgid_tbl - > active ) ;
* index = free_idx ;
/* unlock */
2017-05-22 03:15:31 -07:00
return 0 ;
2017-02-10 03:19:33 -08:00
}
2017-06-29 12:28:10 -07:00
int bnxt_qplib_update_sgid ( struct bnxt_qplib_sgid_tbl * sgid_tbl ,
struct bnxt_qplib_gid * gid , u16 gid_idx ,
u8 * smac )
{
struct bnxt_qplib_res * res = to_bnxt_qplib ( sgid_tbl ,
struct bnxt_qplib_res ,
sgid_tbl ) ;
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct creq_modify_gid_resp resp ;
struct cmdq_modify_gid req ;
int rc ;
u16 cmd_flags = 0 ;
RCFW_CMD_PREP ( req , MODIFY_GID , cmd_flags ) ;
req . gid [ 0 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 3 ] ) ;
req . gid [ 1 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 2 ] ) ;
req . gid [ 2 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 1 ] ) ;
req . gid [ 3 ] = cpu_to_be32 ( ( ( u32 * ) gid - > data ) [ 0 ] ) ;
if ( res - > prio ) {
req . vlan | = cpu_to_le16
( CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
CMDQ_ADD_GID_VLAN_VLAN_EN ) ;
}
/* MAC in network format */
req . src_mac [ 0 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 0 ] ) ;
req . src_mac [ 1 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 1 ] ) ;
req . src_mac [ 2 ] = cpu_to_be16 ( ( ( u16 * ) smac ) [ 2 ] ) ;
req . gid_index = cpu_to_le16 ( gid_idx ) ;
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , 0 ) ;
return rc ;
}
2017-02-10 03:19:33 -08:00
/* pkeys */
int bnxt_qplib_get_pkey ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_pkey_tbl * pkey_tbl , u16 index ,
u16 * pkey )
{
if ( index = = 0xFFFF ) {
* pkey = 0xFFFF ;
return 0 ;
}
2018-07-04 12:57:11 +03:00
if ( index > = pkey_tbl - > max ) {
2017-02-10 03:19:33 -08:00
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" Index %d exceeded PKEY table max (%d) \n " ,
2017-02-10 03:19:33 -08:00
index , pkey_tbl - > max ) ;
return - EINVAL ;
}
memcpy ( pkey , & pkey_tbl - > tbl [ index ] , sizeof ( * pkey ) ) ;
return 0 ;
}
int bnxt_qplib_del_pkey ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_pkey_tbl * pkey_tbl , u16 * pkey ,
bool update )
{
int i , rc = 0 ;
if ( ! pkey_tbl ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " PKEY table not allocated \n " ) ;
2017-02-10 03:19:33 -08:00
return - EINVAL ;
}
/* Do we need a pkey_lock here? */
if ( ! pkey_tbl - > active ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " PKEY table has no active entries \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
for ( i = 0 ; i < pkey_tbl - > max ; i + + ) {
if ( ! memcmp ( & pkey_tbl - > tbl [ i ] , pkey , sizeof ( * pkey ) ) )
break ;
}
if ( i = = pkey_tbl - > max ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" PKEY 0x%04x not found in the pkey table \n " , * pkey ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
memset ( & pkey_tbl - > tbl [ i ] , 0 , sizeof ( * pkey ) ) ;
pkey_tbl - > active - - ;
/* unlock */
return rc ;
}
int bnxt_qplib_add_pkey ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_pkey_tbl * pkey_tbl , u16 * pkey ,
bool update )
{
int i , free_idx , rc = 0 ;
if ( ! pkey_tbl ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " PKEY table not allocated \n " ) ;
2017-02-10 03:19:33 -08:00
return - EINVAL ;
}
/* Do we need a pkey_lock here? */
if ( pkey_tbl - > active = = pkey_tbl - > max ) {
2018-08-10 11:42:46 -07:00
dev_err ( & res - > pdev - > dev , " PKEY table is full \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
free_idx = pkey_tbl - > max ;
for ( i = 0 ; i < pkey_tbl - > max ; i + + ) {
if ( ! memcmp ( & pkey_tbl - > tbl [ i ] , pkey , sizeof ( * pkey ) ) )
return - EALREADY ;
else if ( ! pkey_tbl - > tbl [ i ] & & free_idx = = pkey_tbl - > max )
free_idx = i ;
}
if ( free_idx = = pkey_tbl - > max ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" PKEY table is FULL but count is not MAX?? \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
/* Add PKEY to the pkey_tbl */
memcpy ( & pkey_tbl - > tbl [ free_idx ] , pkey , sizeof ( * pkey ) ) ;
pkey_tbl - > active + + ;
/* unlock */
return rc ;
}
/* AH */
int bnxt_qplib_create_ah ( struct bnxt_qplib_res * res , struct bnxt_qplib_ah * ah )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_create_ah req ;
2017-05-22 03:15:31 -07:00
struct creq_create_ah_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
u32 temp32 [ 4 ] ;
u16 temp16 [ 3 ] ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
RCFW_CMD_PREP ( req , CREATE_AH , cmd_flags ) ;
memcpy ( temp32 , ah - > dgid . data , sizeof ( struct bnxt_qplib_gid ) ) ;
req . dgid [ 0 ] = cpu_to_le32 ( temp32 [ 0 ] ) ;
req . dgid [ 1 ] = cpu_to_le32 ( temp32 [ 1 ] ) ;
req . dgid [ 2 ] = cpu_to_le32 ( temp32 [ 2 ] ) ;
req . dgid [ 3 ] = cpu_to_le32 ( temp32 [ 3 ] ) ;
req . type = ah - > nw_type ;
req . hop_limit = ah - > hop_limit ;
req . sgid_index = cpu_to_le16 ( res - > sgid_tbl . hw_id [ ah - > sgid_index ] ) ;
req . dest_vlan_id_flow_label = cpu_to_le32 ( ( ah - > flow_label &
CMDQ_CREATE_AH_FLOW_LABEL_MASK ) |
CMDQ_CREATE_AH_DEST_VLAN_ID_MASK ) ;
req . pd_id = cpu_to_le32 ( ah - > pd - > id ) ;
req . traffic_class = ah - > traffic_class ;
/* MAC in network format */
memcpy ( temp16 , ah - > dmac , 6 ) ;
req . dest_mac [ 0 ] = cpu_to_le16 ( temp16 [ 0 ] ) ;
req . dest_mac [ 1 ] = cpu_to_le16 ( temp16 [ 1 ] ) ;
req . dest_mac [ 2 ] = cpu_to_le16 ( temp16 [ 2 ] ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp ,
NULL , 1 ) ;
if ( rc )
return rc ;
ah - > id = le32_to_cpu ( resp . xid ) ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
int bnxt_qplib_destroy_ah ( struct bnxt_qplib_res * res , struct bnxt_qplib_ah * ah )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_destroy_ah req ;
2017-05-22 03:15:31 -07:00
struct creq_destroy_ah_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
/* Clean up the AH table in the device */
RCFW_CMD_PREP ( req , DESTROY_AH , cmd_flags ) ;
req . ah_cid = cpu_to_le32 ( ah - > id ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp ,
NULL , 1 ) ;
if ( rc )
return rc ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
/* MRW */
int bnxt_qplib_free_mrw ( struct bnxt_qplib_res * res , struct bnxt_qplib_mrw * mrw )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_deallocate_key req ;
2017-05-22 03:15:31 -07:00
struct creq_deallocate_key_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
if ( mrw - > lkey = = 0xFFFFFFFF ) {
2018-08-10 11:42:46 -07:00
dev_info ( & res - > pdev - > dev , " SP: Free a reserved lkey MRW \n " ) ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
RCFW_CMD_PREP ( req , DEALLOCATE_KEY , cmd_flags ) ;
req . mrw_flags = mrw - > type ;
if ( ( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 ) | |
( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ) | |
( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B ) )
req . key = cpu_to_le32 ( mrw - > rkey ) ;
else
req . key = cpu_to_le32 ( mrw - > lkey ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp ,
NULL , 0 ) ;
if ( rc )
return rc ;
2017-02-10 03:19:33 -08:00
/* Free the qplib's MRW memory */
if ( mrw - > hwq . max_elements )
bnxt_qplib_free_hwq ( res - > pdev , & mrw - > hwq ) ;
return 0 ;
}
int bnxt_qplib_alloc_mrw ( struct bnxt_qplib_res * res , struct bnxt_qplib_mrw * mrw )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_allocate_mrw req ;
2017-05-22 03:15:31 -07:00
struct creq_allocate_mrw_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
unsigned long tmp ;
2017-05-22 03:15:31 -07:00
int rc ;
2017-02-10 03:19:33 -08:00
RCFW_CMD_PREP ( req , ALLOCATE_MRW , cmd_flags ) ;
req . pd_id = cpu_to_le32 ( mrw - > pd - > id ) ;
req . mrw_flags = mrw - > type ;
if ( ( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR & &
mrw - > flags & BNXT_QPLIB_FR_PMR ) | |
mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A | |
mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B )
req . access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY ;
tmp = ( unsigned long ) mrw ;
req . mrw_handle = cpu_to_le64 ( tmp ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , 0 ) ;
if ( rc )
return rc ;
2017-02-10 03:19:33 -08:00
if ( ( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 ) | |
( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ) | |
( mrw - > type = = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B ) )
2017-05-22 03:15:31 -07:00
mrw - > rkey = le32_to_cpu ( resp . xid ) ;
2017-02-10 03:19:33 -08:00
else
2017-05-22 03:15:31 -07:00
mrw - > lkey = le32_to_cpu ( resp . xid ) ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
int bnxt_qplib_dereg_mrw ( struct bnxt_qplib_res * res , struct bnxt_qplib_mrw * mrw ,
bool block )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_deregister_mr req ;
2017-05-22 03:15:31 -07:00
struct creq_deregister_mr_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
int rc ;
RCFW_CMD_PREP ( req , DEREGISTER_MR , cmd_flags ) ;
req . lkey = cpu_to_le32 ( mrw - > lkey ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , block ) ;
if ( rc )
return rc ;
2017-02-10 03:19:33 -08:00
/* Free the qplib's MR memory */
if ( mrw - > hwq . max_elements ) {
mrw - > va = 0 ;
mrw - > total_size = 0 ;
bnxt_qplib_free_hwq ( res - > pdev , & mrw - > hwq ) ;
}
return 0 ;
}
int bnxt_qplib_reg_mr ( struct bnxt_qplib_res * res , struct bnxt_qplib_mrw * mr ,
2018-01-11 11:52:09 -05:00
u64 * pbl_tbl , int num_pbls , bool block , u32 buf_pg_size )
2017-02-10 03:19:33 -08:00
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_register_mr req ;
2017-05-22 03:15:31 -07:00
struct creq_register_mr_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 , level ;
int pg_ptrs , pages , i , rc ;
dma_addr_t * * pbl_ptr ;
u32 pg_size ;
if ( num_pbls ) {
2018-01-11 11:52:09 -05:00
/* Allocate memory for the non-leaf pages to store buf ptrs.
* Non - leaf pages always uses system PAGE_SIZE
*/
2017-02-10 03:19:33 -08:00
pg_ptrs = roundup_pow_of_two ( num_pbls ) ;
pages = pg_ptrs > > MAX_PBL_LVL_1_PGS_SHIFT ;
if ( ! pages )
pages + + ;
if ( pages > MAX_PBL_LVL_1_PGS ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SP: Reg MR pages requested (0x%x) exceeded max (0x%x) \n " ,
2017-02-10 03:19:33 -08:00
pages , MAX_PBL_LVL_1_PGS ) ;
return - ENOMEM ;
}
/* Free the hwq if it already exist, must be a rereg */
if ( mr - > hwq . max_elements )
bnxt_qplib_free_hwq ( res - > pdev , & mr - > hwq ) ;
mr - > hwq . max_elements = pages ;
2018-01-11 11:52:09 -05:00
/* Use system PAGE_SIZE */
2017-02-10 03:19:33 -08:00
rc = bnxt_qplib_alloc_init_hwq ( res - > pdev , & mr - > hwq , NULL , 0 ,
& mr - > hwq . max_elements ,
PAGE_SIZE , 0 , PAGE_SIZE ,
HWQ_TYPE_CTX ) ;
if ( rc ) {
dev_err ( & res - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SP: Reg MR memory allocation failed \n " ) ;
2017-02-10 03:19:33 -08:00
return - ENOMEM ;
}
/* Write to the hwq */
pbl_ptr = ( dma_addr_t * * ) mr - > hwq . pbl_ptr ;
for ( i = 0 ; i < num_pbls ; i + + )
pbl_ptr [ PTR_PG ( i ) ] [ PTR_IDX ( i ) ] =
( pbl_tbl [ i ] & PAGE_MASK ) | PTU_PTE_VALID ;
}
RCFW_CMD_PREP ( req , REGISTER_MR , cmd_flags ) ;
/* Configure the request */
if ( mr - > hwq . level = = PBL_LVL_MAX ) {
2018-01-11 11:52:09 -05:00
/* No PBL provided, just use system PAGE_SIZE */
2017-02-10 03:19:33 -08:00
level = 0 ;
req . pbl = 0 ;
pg_size = PAGE_SIZE ;
} else {
level = mr - > hwq . level + 1 ;
req . pbl = cpu_to_le64 ( mr - > hwq . pbl [ PBL_LVL_0 ] . pg_map_arr [ 0 ] ) ;
}
2018-01-11 11:52:09 -05:00
pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE ;
2017-02-10 03:19:33 -08:00
req . log2_pg_size_lvl = ( level < < CMDQ_REGISTER_MR_LVL_SFT ) |
( ( ilog2 ( pg_size ) < <
CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT ) &
CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK ) ;
2018-01-11 11:52:09 -05:00
req . log2_pbl_pg_size = cpu_to_le16 ( ( ( ilog2 ( PAGE_SIZE ) < <
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT ) &
CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK ) ) ;
2017-02-10 03:19:33 -08:00
req . access = ( mr - > flags & 0xFFFF ) ;
req . va = cpu_to_le64 ( mr - > va ) ;
req . key = cpu_to_le32 ( mr - > lkey ) ;
req . mr_size = cpu_to_le64 ( mr - > total_size ) ;
2017-05-22 03:15:31 -07:00
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req ,
( void * ) & resp , NULL , block ) ;
if ( rc )
2017-02-10 03:19:33 -08:00
goto fail ;
2017-05-22 03:15:31 -07:00
2017-02-10 03:19:33 -08:00
return 0 ;
fail :
if ( mr - > hwq . max_elements )
bnxt_qplib_free_hwq ( res - > pdev , & mr - > hwq ) ;
return rc ;
}
int bnxt_qplib_alloc_fast_reg_page_list ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_frpl * frpl ,
int max_pg_ptrs )
{
int pg_ptrs , pages , rc ;
/* Re-calculate the max to fit the HWQ allocation model */
pg_ptrs = roundup_pow_of_two ( max_pg_ptrs ) ;
pages = pg_ptrs > > MAX_PBL_LVL_1_PGS_SHIFT ;
if ( ! pages )
pages + + ;
if ( pages > MAX_PBL_LVL_1_PGS )
return - ENOMEM ;
frpl - > hwq . max_elements = pages ;
rc = bnxt_qplib_alloc_init_hwq ( res - > pdev , & frpl - > hwq , NULL , 0 ,
& frpl - > hwq . max_elements , PAGE_SIZE , 0 ,
PAGE_SIZE , HWQ_TYPE_CTX ) ;
if ( ! rc )
frpl - > max_pg_ptrs = pg_ptrs ;
return rc ;
}
int bnxt_qplib_free_fast_reg_page_list ( struct bnxt_qplib_res * res ,
struct bnxt_qplib_frpl * frpl )
{
bnxt_qplib_free_hwq ( res - > pdev , & frpl - > hwq ) ;
return 0 ;
}
int bnxt_qplib_map_tc2cos ( struct bnxt_qplib_res * res , u16 * cids )
{
struct bnxt_qplib_rcfw * rcfw = res - > rcfw ;
struct cmdq_map_tc_to_cos req ;
2017-05-22 03:15:31 -07:00
struct creq_map_tc_to_cos_resp resp ;
2017-02-10 03:19:33 -08:00
u16 cmd_flags = 0 ;
RCFW_CMD_PREP ( req , MAP_TC_TO_COS , cmd_flags ) ;
req . cos0 = cpu_to_le16 ( cids [ 0 ] ) ;
req . cos1 = cpu_to_le16 ( cids [ 1 ] ) ;
2017-10-11 10:48:49 -07:00
bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp , NULL ,
0 ) ;
2017-02-10 03:19:33 -08:00
return 0 ;
}
2018-01-11 11:52:10 -05:00
int bnxt_qplib_get_roce_stats ( struct bnxt_qplib_rcfw * rcfw ,
struct bnxt_qplib_roce_stats * stats )
{
struct cmdq_query_roce_stats req ;
struct creq_query_roce_stats_resp resp ;
struct bnxt_qplib_rcfw_sbuf * sbuf ;
struct creq_query_roce_stats_resp_sb * sb ;
u16 cmd_flags = 0 ;
int rc = 0 ;
RCFW_CMD_PREP ( req , QUERY_ROCE_STATS , cmd_flags ) ;
sbuf = bnxt_qplib_rcfw_alloc_sbuf ( rcfw , sizeof ( * sb ) ) ;
if ( ! sbuf ) {
dev_err ( & rcfw - > pdev - > dev ,
2018-08-10 11:42:46 -07:00
" SP: QUERY_ROCE_STATS alloc side buffer failed \n " ) ;
2018-01-11 11:52:10 -05:00
return - ENOMEM ;
}
sb = sbuf - > sb ;
req . resp_size = sizeof ( * sb ) / BNXT_QPLIB_CMDQE_UNITS ;
rc = bnxt_qplib_rcfw_send_message ( rcfw , ( void * ) & req , ( void * ) & resp ,
( void * ) sbuf , 0 ) ;
if ( rc )
goto bail ;
/* Extract the context from the side buffer */
stats - > to_retransmits = le64_to_cpu ( sb - > to_retransmits ) ;
stats - > seq_err_naks_rcvd = le64_to_cpu ( sb - > seq_err_naks_rcvd ) ;
stats - > max_retry_exceeded = le64_to_cpu ( sb - > max_retry_exceeded ) ;
stats - > rnr_naks_rcvd = le64_to_cpu ( sb - > rnr_naks_rcvd ) ;
stats - > missing_resp = le64_to_cpu ( sb - > missing_resp ) ;
stats - > unrecoverable_err = le64_to_cpu ( sb - > unrecoverable_err ) ;
stats - > bad_resp_err = le64_to_cpu ( sb - > bad_resp_err ) ;
stats - > local_qp_op_err = le64_to_cpu ( sb - > local_qp_op_err ) ;
stats - > local_protection_err = le64_to_cpu ( sb - > local_protection_err ) ;
stats - > mem_mgmt_op_err = le64_to_cpu ( sb - > mem_mgmt_op_err ) ;
stats - > remote_invalid_req_err = le64_to_cpu ( sb - > remote_invalid_req_err ) ;
stats - > remote_access_err = le64_to_cpu ( sb - > remote_access_err ) ;
stats - > remote_op_err = le64_to_cpu ( sb - > remote_op_err ) ;
stats - > dup_req = le64_to_cpu ( sb - > dup_req ) ;
stats - > res_exceed_max = le64_to_cpu ( sb - > res_exceed_max ) ;
stats - > res_length_mismatch = le64_to_cpu ( sb - > res_length_mismatch ) ;
stats - > res_exceeds_wqe = le64_to_cpu ( sb - > res_exceeds_wqe ) ;
stats - > res_opcode_err = le64_to_cpu ( sb - > res_opcode_err ) ;
stats - > res_rx_invalid_rkey = le64_to_cpu ( sb - > res_rx_invalid_rkey ) ;
stats - > res_rx_domain_err = le64_to_cpu ( sb - > res_rx_domain_err ) ;
stats - > res_rx_no_perm = le64_to_cpu ( sb - > res_rx_no_perm ) ;
stats - > res_rx_range_err = le64_to_cpu ( sb - > res_rx_range_err ) ;
stats - > res_tx_invalid_rkey = le64_to_cpu ( sb - > res_tx_invalid_rkey ) ;
stats - > res_tx_domain_err = le64_to_cpu ( sb - > res_tx_domain_err ) ;
stats - > res_tx_no_perm = le64_to_cpu ( sb - > res_tx_no_perm ) ;
stats - > res_tx_range_err = le64_to_cpu ( sb - > res_tx_range_err ) ;
stats - > res_irrq_oflow = le64_to_cpu ( sb - > res_irrq_oflow ) ;
stats - > res_unsup_opcode = le64_to_cpu ( sb - > res_unsup_opcode ) ;
stats - > res_unaligned_atomic = le64_to_cpu ( sb - > res_unaligned_atomic ) ;
stats - > res_rem_inv_err = le64_to_cpu ( sb - > res_rem_inv_err ) ;
stats - > res_mem_error = le64_to_cpu ( sb - > res_mem_error ) ;
stats - > res_srq_err = le64_to_cpu ( sb - > res_srq_err ) ;
stats - > res_cmp_err = le64_to_cpu ( sb - > res_cmp_err ) ;
stats - > res_invalid_dup_rkey = le64_to_cpu ( sb - > res_invalid_dup_rkey ) ;
stats - > res_wqe_format_err = le64_to_cpu ( sb - > res_wqe_format_err ) ;
stats - > res_cq_load_err = le64_to_cpu ( sb - > res_cq_load_err ) ;
stats - > res_srq_load_err = le64_to_cpu ( sb - > res_srq_load_err ) ;
stats - > res_tx_pci_err = le64_to_cpu ( sb - > res_tx_pci_err ) ;
stats - > res_rx_pci_err = le64_to_cpu ( sb - > res_rx_pci_err ) ;
2018-10-08 03:28:00 -07:00
if ( ! rcfw - > init_oos_stats ) {
rcfw - > oos_prev = le64_to_cpu ( sb - > res_oos_drop_count ) ;
rcfw - > init_oos_stats = 1 ;
} else {
stats - > res_oos_drop_count + =
( le64_to_cpu ( sb - > res_oos_drop_count ) -
rcfw - > oos_prev ) & BNXT_QPLIB_OOS_COUNT_MASK ;
rcfw - > oos_prev = le64_to_cpu ( sb - > res_oos_drop_count ) ;
}
2018-01-11 11:52:10 -05:00
bail :
bnxt_qplib_rcfw_free_sbuf ( rcfw , sbuf ) ;
return rc ;
}