2007-05-08 18:00:38 -07:00
/*
* Copyright ( c ) 2007 Cisco Systems , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/mlx4/cq.h>
# include <linux/mlx4/qp.h>
# include "mlx4_ib.h"
# include "user.h"
static void mlx4_ib_cq_comp ( struct mlx4_cq * cq )
{
struct ib_cq * ibcq = & to_mibcq ( cq ) - > ibcq ;
ibcq - > comp_handler ( ibcq , ibcq - > cq_context ) ;
}
static void mlx4_ib_cq_event ( struct mlx4_cq * cq , enum mlx4_event type )
{
struct ib_event event ;
struct ib_cq * ibcq ;
if ( type ! = MLX4_EVENT_TYPE_CQ_ERROR ) {
printk ( KERN_WARNING " mlx4_ib: Unexpected event type %d "
" on CQ %06x \n " , type , cq - > cqn ) ;
return ;
}
ibcq = & to_mibcq ( cq ) - > ibcq ;
if ( ibcq - > event_handler ) {
event . device = ibcq - > device ;
event . event = IB_EVENT_CQ_ERR ;
event . element . cq = ibcq ;
ibcq - > event_handler ( & event , ibcq - > cq_context ) ;
}
}
static void * get_cqe_from_buf ( struct mlx4_ib_cq_buf * buf , int n )
{
2008-02-06 21:07:54 -08:00
return mlx4_buf_offset ( & buf - > buf , n * sizeof ( struct mlx4_cqe ) ) ;
2007-05-08 18:00:38 -07:00
}
static void * get_cqe ( struct mlx4_ib_cq * cq , int n )
{
return get_cqe_from_buf ( & cq - > buf , n ) ;
}
static void * get_sw_cqe ( struct mlx4_ib_cq * cq , int n )
{
struct mlx4_cqe * cqe = get_cqe ( cq , n & cq - > ibcq . cqe ) ;
return ( ! ! ( cqe - > owner_sr_opcode & MLX4_CQE_OWNER_MASK ) ^
! ! ( n & ( cq - > ibcq . cqe + 1 ) ) ) ? NULL : cqe ;
}
static struct mlx4_cqe * next_cqe_sw ( struct mlx4_ib_cq * cq )
{
return get_sw_cqe ( cq , cq - > mcq . cons_index ) ;
}
2008-04-16 21:09:33 -07:00
int mlx4_ib_modify_cq ( struct ib_cq * cq , u16 cq_count , u16 cq_period )
{
struct mlx4_ib_cq * mcq = to_mcq ( cq ) ;
struct mlx4_ib_dev * dev = to_mdev ( cq - > device ) ;
return mlx4_cq_modify ( dev - > dev , & mcq - > mcq , cq_count , cq_period ) ;
}
2008-04-16 21:09:33 -07:00
static int mlx4_ib_alloc_cq_buf ( struct mlx4_ib_dev * dev , struct mlx4_ib_cq_buf * buf , int nent )
{
int err ;
err = mlx4_buf_alloc ( dev - > dev , nent * sizeof ( struct mlx4_cqe ) ,
PAGE_SIZE * 2 , & buf - > buf ) ;
if ( err )
goto out ;
err = mlx4_mtt_init ( dev - > dev , buf - > buf . npages , buf - > buf . page_shift ,
& buf - > mtt ) ;
if ( err )
goto err_buf ;
err = mlx4_buf_write_mtt ( dev - > dev , & buf - > mtt , & buf - > buf ) ;
if ( err )
goto err_mtt ;
return 0 ;
err_mtt :
mlx4_mtt_cleanup ( dev - > dev , & buf - > mtt ) ;
err_buf :
mlx4_buf_free ( dev - > dev , nent * sizeof ( struct mlx4_cqe ) ,
& buf - > buf ) ;
out :
return err ;
}
static void mlx4_ib_free_cq_buf ( struct mlx4_ib_dev * dev , struct mlx4_ib_cq_buf * buf , int cqe )
{
mlx4_buf_free ( dev - > dev , ( cqe + 1 ) * sizeof ( struct mlx4_cqe ) , & buf - > buf ) ;
}
static int mlx4_ib_get_cq_umem ( struct mlx4_ib_dev * dev , struct ib_ucontext * context ,
struct mlx4_ib_cq_buf * buf , struct ib_umem * * umem ,
u64 buf_addr , int cqe )
{
int err ;
* umem = ib_umem_get ( context , buf_addr , cqe * sizeof ( struct mlx4_cqe ) ,
2008-04-29 01:00:34 -07:00
IB_ACCESS_LOCAL_WRITE , 1 ) ;
2008-04-16 21:09:33 -07:00
if ( IS_ERR ( * umem ) )
return PTR_ERR ( * umem ) ;
err = mlx4_mtt_init ( dev - > dev , ib_umem_page_count ( * umem ) ,
ilog2 ( ( * umem ) - > page_size ) , & buf - > mtt ) ;
if ( err )
goto err_buf ;
err = mlx4_ib_umem_write_mtt ( dev , & buf - > mtt , * umem ) ;
if ( err )
goto err_mtt ;
return 0 ;
err_mtt :
mlx4_mtt_cleanup ( dev - > dev , & buf - > mtt ) ;
err_buf :
ib_umem_release ( * umem ) ;
return err ;
}
2007-05-08 18:00:38 -07:00
struct ib_cq * mlx4_ib_create_cq ( struct ib_device * ibdev , int entries , int vector ,
struct ib_ucontext * context ,
struct ib_udata * udata )
{
struct mlx4_ib_dev * dev = to_mdev ( ibdev ) ;
struct mlx4_ib_cq * cq ;
struct mlx4_uar * uar ;
int err ;
if ( entries < 1 | | entries > dev - > dev - > caps . max_cqes )
return ERR_PTR ( - EINVAL ) ;
cq = kmalloc ( sizeof * cq , GFP_KERNEL ) ;
if ( ! cq )
return ERR_PTR ( - ENOMEM ) ;
entries = roundup_pow_of_two ( entries + 1 ) ;
cq - > ibcq . cqe = entries - 1 ;
2008-04-16 21:09:33 -07:00
mutex_init ( & cq - > resize_mutex ) ;
2007-05-08 18:00:38 -07:00
spin_lock_init ( & cq - > lock ) ;
2008-04-16 21:09:33 -07:00
cq - > resize_buf = NULL ;
cq - > resize_umem = NULL ;
2007-05-08 18:00:38 -07:00
if ( context ) {
struct mlx4_ib_create_cq ucmd ;
if ( ib_copy_from_udata ( & ucmd , udata , sizeof ucmd ) ) {
err = - EFAULT ;
goto err_cq ;
}
2008-04-16 21:09:33 -07:00
err = mlx4_ib_get_cq_umem ( dev , context , & cq - > buf , & cq - > umem ,
ucmd . buf_addr , entries ) ;
2007-05-08 18:00:38 -07:00
if ( err )
2008-04-16 21:09:33 -07:00
goto err_cq ;
2007-05-08 18:00:38 -07:00
err = mlx4_ib_db_map_user ( to_mucontext ( context ) , ucmd . db_addr ,
& cq - > db ) ;
if ( err )
goto err_mtt ;
uar = & to_mucontext ( context ) - > uar ;
} else {
2008-04-23 11:55:45 -07:00
err = mlx4_db_alloc ( dev - > dev , & cq - > db , 1 ) ;
2007-05-08 18:00:38 -07:00
if ( err )
goto err_cq ;
cq - > mcq . set_ci_db = cq - > db . db ;
cq - > mcq . arm_db = cq - > db . db + 1 ;
* cq - > mcq . set_ci_db = 0 ;
* cq - > mcq . arm_db = 0 ;
2008-04-16 21:09:33 -07:00
err = mlx4_ib_alloc_cq_buf ( dev , & cq - > buf , entries ) ;
2007-05-08 18:00:38 -07:00
if ( err )
2008-04-16 21:09:33 -07:00
goto err_db ;
2007-05-08 18:00:38 -07:00
uar = & dev - > priv_uar ;
}
err = mlx4_cq_alloc ( dev - > dev , entries , & cq - > buf . mtt , uar ,
2008-04-29 13:46:50 -07:00
cq - > db . dma , & cq - > mcq , 0 ) ;
2007-05-08 18:00:38 -07:00
if ( err )
goto err_dbmap ;
cq - > mcq . comp = mlx4_ib_cq_comp ;
cq - > mcq . event = mlx4_ib_cq_event ;
if ( context )
if ( ib_copy_to_udata ( udata , & cq - > mcq . cqn , sizeof ( __u32 ) ) ) {
err = - EFAULT ;
goto err_dbmap ;
}
return & cq - > ibcq ;
err_dbmap :
if ( context )
mlx4_ib_db_unmap_user ( to_mucontext ( context ) , & cq - > db ) ;
err_mtt :
mlx4_mtt_cleanup ( dev - > dev , & cq - > buf . mtt ) ;
if ( context )
ib_umem_release ( cq - > umem ) ;
else
2008-04-30 19:52:55 -07:00
mlx4_ib_free_cq_buf ( dev , & cq - > buf , cq - > ibcq . cqe ) ;
2007-05-08 18:00:38 -07:00
err_db :
if ( ! context )
2008-04-23 11:55:45 -07:00
mlx4_db_free ( dev - > dev , & cq - > db ) ;
2007-05-08 18:00:38 -07:00
err_cq :
kfree ( cq ) ;
return ERR_PTR ( err ) ;
}
2008-04-16 21:09:33 -07:00
static int mlx4_alloc_resize_buf ( struct mlx4_ib_dev * dev , struct mlx4_ib_cq * cq ,
int entries )
{
int err ;
if ( cq - > resize_buf )
return - EBUSY ;
cq - > resize_buf = kmalloc ( sizeof * cq - > resize_buf , GFP_ATOMIC ) ;
if ( ! cq - > resize_buf )
return - ENOMEM ;
err = mlx4_ib_alloc_cq_buf ( dev , & cq - > resize_buf - > buf , entries ) ;
if ( err ) {
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
return err ;
}
cq - > resize_buf - > cqe = entries - 1 ;
return 0 ;
}
static int mlx4_alloc_resize_umem ( struct mlx4_ib_dev * dev , struct mlx4_ib_cq * cq ,
int entries , struct ib_udata * udata )
{
struct mlx4_ib_resize_cq ucmd ;
int err ;
if ( cq - > resize_umem )
return - EBUSY ;
if ( ib_copy_from_udata ( & ucmd , udata , sizeof ucmd ) )
return - EFAULT ;
cq - > resize_buf = kmalloc ( sizeof * cq - > resize_buf , GFP_ATOMIC ) ;
if ( ! cq - > resize_buf )
return - ENOMEM ;
err = mlx4_ib_get_cq_umem ( dev , cq - > umem - > context , & cq - > resize_buf - > buf ,
& cq - > resize_umem , ucmd . buf_addr , entries ) ;
if ( err ) {
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
return err ;
}
cq - > resize_buf - > cqe = entries - 1 ;
return 0 ;
}
static int mlx4_ib_get_outstanding_cqes ( struct mlx4_ib_cq * cq )
{
u32 i ;
i = cq - > mcq . cons_index ;
while ( get_sw_cqe ( cq , i & cq - > ibcq . cqe ) )
+ + i ;
return i - cq - > mcq . cons_index ;
}
static void mlx4_ib_cq_resize_copy_cqes ( struct mlx4_ib_cq * cq )
{
struct mlx4_cqe * cqe ;
int i ;
i = cq - > mcq . cons_index ;
cqe = get_cqe ( cq , i & cq - > ibcq . cqe ) ;
while ( ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) ! = MLX4_CQE_OPCODE_RESIZE ) {
memcpy ( get_cqe_from_buf ( & cq - > resize_buf - > buf ,
( i + 1 ) & cq - > resize_buf - > cqe ) ,
get_cqe ( cq , i & cq - > ibcq . cqe ) , sizeof ( struct mlx4_cqe ) ) ;
cqe = get_cqe ( cq , + + i & cq - > ibcq . cqe ) ;
}
+ + cq - > mcq . cons_index ;
}
int mlx4_ib_resize_cq ( struct ib_cq * ibcq , int entries , struct ib_udata * udata )
{
struct mlx4_ib_dev * dev = to_mdev ( ibcq - > device ) ;
struct mlx4_ib_cq * cq = to_mcq ( ibcq ) ;
int outst_cqe ;
int err ;
mutex_lock ( & cq - > resize_mutex ) ;
if ( entries < 1 | | entries > dev - > dev - > caps . max_cqes ) {
err = - EINVAL ;
goto out ;
}
entries = roundup_pow_of_two ( entries + 1 ) ;
if ( entries = = ibcq - > cqe + 1 ) {
err = 0 ;
goto out ;
}
if ( ibcq - > uobject ) {
err = mlx4_alloc_resize_umem ( dev , cq , entries , udata ) ;
if ( err )
goto out ;
} else {
/* Can't be smaller then the number of outstanding CQEs */
outst_cqe = mlx4_ib_get_outstanding_cqes ( cq ) ;
if ( entries < outst_cqe + 1 ) {
err = 0 ;
goto out ;
}
err = mlx4_alloc_resize_buf ( dev , cq , entries ) ;
if ( err )
goto out ;
}
err = mlx4_cq_resize ( dev - > dev , & cq - > mcq , entries , & cq - > resize_buf - > buf . mtt ) ;
if ( err )
goto err_buf ;
if ( ibcq - > uobject ) {
cq - > buf = cq - > resize_buf - > buf ;
cq - > ibcq . cqe = cq - > resize_buf - > cqe ;
ib_umem_release ( cq - > umem ) ;
cq - > umem = cq - > resize_umem ;
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
cq - > resize_umem = NULL ;
} else {
spin_lock_irq ( & cq - > lock ) ;
if ( cq - > resize_buf ) {
mlx4_ib_cq_resize_copy_cqes ( cq ) ;
mlx4_ib_free_cq_buf ( dev , & cq - > buf , cq - > ibcq . cqe ) ;
cq - > buf = cq - > resize_buf - > buf ;
cq - > ibcq . cqe = cq - > resize_buf - > cqe ;
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
}
spin_unlock_irq ( & cq - > lock ) ;
}
goto out ;
err_buf :
if ( ! ibcq - > uobject )
mlx4_ib_free_cq_buf ( dev , & cq - > resize_buf - > buf ,
cq - > resize_buf - > cqe ) ;
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
if ( cq - > resize_umem ) {
ib_umem_release ( cq - > resize_umem ) ;
cq - > resize_umem = NULL ;
}
out :
mutex_unlock ( & cq - > resize_mutex ) ;
return err ;
}
2007-05-08 18:00:38 -07:00
int mlx4_ib_destroy_cq ( struct ib_cq * cq )
{
struct mlx4_ib_dev * dev = to_mdev ( cq - > device ) ;
struct mlx4_ib_cq * mcq = to_mcq ( cq ) ;
mlx4_cq_free ( dev - > dev , & mcq - > mcq ) ;
mlx4_mtt_cleanup ( dev - > dev , & mcq - > buf . mtt ) ;
if ( cq - > uobject ) {
mlx4_ib_db_unmap_user ( to_mucontext ( cq - > uobject - > context ) , & mcq - > db ) ;
ib_umem_release ( mcq - > umem ) ;
} else {
2008-04-30 19:52:55 -07:00
mlx4_ib_free_cq_buf ( dev , & mcq - > buf , cq - > cqe ) ;
2008-04-23 11:55:45 -07:00
mlx4_db_free ( dev - > dev , & mcq - > db ) ;
2007-05-08 18:00:38 -07:00
}
kfree ( mcq ) ;
return 0 ;
}
static void dump_cqe ( void * cqe )
{
__be32 * buf = cqe ;
printk ( KERN_DEBUG " CQE contents %08x %08x %08x %08x %08x %08x %08x %08x \n " ,
be32_to_cpu ( buf [ 0 ] ) , be32_to_cpu ( buf [ 1 ] ) , be32_to_cpu ( buf [ 2 ] ) ,
be32_to_cpu ( buf [ 3 ] ) , be32_to_cpu ( buf [ 4 ] ) , be32_to_cpu ( buf [ 5 ] ) ,
be32_to_cpu ( buf [ 6 ] ) , be32_to_cpu ( buf [ 7 ] ) ) ;
}
static void mlx4_ib_handle_error_cqe ( struct mlx4_err_cqe * cqe ,
struct ib_wc * wc )
{
if ( cqe - > syndrome = = MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR ) {
printk ( KERN_DEBUG " local QP operation err "
" (QPN %06x, WQE index %x, vendor syndrome %02x, "
" opcode = %02x) \n " ,
be32_to_cpu ( cqe - > my_qpn ) , be16_to_cpu ( cqe - > wqe_index ) ,
cqe - > vendor_err_syndrome ,
cqe - > owner_sr_opcode & ~ MLX4_CQE_OWNER_MASK ) ;
dump_cqe ( cqe ) ;
}
switch ( cqe - > syndrome ) {
case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR :
wc - > status = IB_WC_LOC_LEN_ERR ;
break ;
case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR :
wc - > status = IB_WC_LOC_QP_OP_ERR ;
break ;
case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR :
wc - > status = IB_WC_LOC_PROT_ERR ;
break ;
case MLX4_CQE_SYNDROME_WR_FLUSH_ERR :
wc - > status = IB_WC_WR_FLUSH_ERR ;
break ;
case MLX4_CQE_SYNDROME_MW_BIND_ERR :
wc - > status = IB_WC_MW_BIND_ERR ;
break ;
case MLX4_CQE_SYNDROME_BAD_RESP_ERR :
wc - > status = IB_WC_BAD_RESP_ERR ;
break ;
case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR :
wc - > status = IB_WC_LOC_ACCESS_ERR ;
break ;
case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR :
wc - > status = IB_WC_REM_INV_REQ_ERR ;
break ;
case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR :
wc - > status = IB_WC_REM_ACCESS_ERR ;
break ;
case MLX4_CQE_SYNDROME_REMOTE_OP_ERR :
wc - > status = IB_WC_REM_OP_ERR ;
break ;
case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR :
wc - > status = IB_WC_RETRY_EXC_ERR ;
break ;
case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR :
wc - > status = IB_WC_RNR_RETRY_EXC_ERR ;
break ;
case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR :
wc - > status = IB_WC_REM_ABORT_ERR ;
break ;
default :
wc - > status = IB_WC_GENERAL_ERR ;
break ;
}
wc - > vendor_err = cqe - > vendor_err_syndrome ;
}
2008-04-16 21:01:10 -07:00
static int mlx4_ib_ipoib_csum_ok ( __be32 status , __be16 checksum )
{
return ( ( status & cpu_to_be32 ( MLX4_CQE_IPOIB_STATUS_IPV4 |
MLX4_CQE_IPOIB_STATUS_IPV4F |
MLX4_CQE_IPOIB_STATUS_IPV4OPT |
MLX4_CQE_IPOIB_STATUS_IPV6 |
MLX4_CQE_IPOIB_STATUS_IPOK ) ) = =
cpu_to_be32 ( MLX4_CQE_IPOIB_STATUS_IPV4 |
MLX4_CQE_IPOIB_STATUS_IPOK ) ) & &
( status & cpu_to_be32 ( MLX4_CQE_IPOIB_STATUS_UDP |
MLX4_CQE_IPOIB_STATUS_TCP ) ) & &
checksum = = cpu_to_be16 ( 0xffff ) ;
}
2007-05-08 18:00:38 -07:00
static int mlx4_ib_poll_one ( struct mlx4_ib_cq * cq ,
struct mlx4_ib_qp * * cur_qp ,
struct ib_wc * wc )
{
struct mlx4_cqe * cqe ;
struct mlx4_qp * mqp ;
struct mlx4_ib_wq * wq ;
struct mlx4_ib_srq * srq ;
int is_send ;
int is_error ;
2008-01-25 14:15:34 -08:00
u32 g_mlpath_rqpn ;
2007-05-08 18:00:38 -07:00
u16 wqe_ctr ;
2008-04-16 21:09:33 -07:00
repoll :
2007-05-08 18:00:38 -07:00
cqe = next_cqe_sw ( cq ) ;
if ( ! cqe )
return - EAGAIN ;
+ + cq - > mcq . cons_index ;
/*
* Make sure we read CQ entry contents after we ' ve checked the
* ownership bit .
*/
rmb ( ) ;
is_send = cqe - > owner_sr_opcode & MLX4_CQE_IS_SEND_MASK ;
is_error = ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) = =
MLX4_CQE_OPCODE_ERROR ;
IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
ConnectX HCA supports shrinking WQEs, so that a single work request
can be made of multiple units of wqe_shift. This way, WRs can differ
in size, and do not have to be a power of 2 in size, saving memory and
speeding up send WR posting. Unfortunately, if we do this then the
wqe_index field in CQEs can't be used to look up the WR ID anymore, so
our implementation does this only if selective signaling is off.
Further, on 32-bit platforms, we can't use vmap() to make the QP
buffer virtually contigious. Thus we have to use constant-sized WRs to
make sure a WR is always fully within a single page-sized chunk.
Finally, we use WRs with the NOP opcode to avoid wrapping around the
queue buffer in the middle of posting a WR, and we set the
NoErrorCompletion bit to avoid getting completions with error for NOP
WRs. However, NEC is only supported starting with firmware 2.2.232,
so we use constant-sized WRs for older firmware. And, since MLX QPs
only support SEND, we use constant-sized WRs in this case.
When stamping during NOP posting, do stamping following setting of the
NOP WQE valid bit.
Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-28 10:40:59 +02:00
if ( unlikely ( ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) = = MLX4_OPCODE_NOP & &
is_send ) ) {
printk ( KERN_WARNING " Completion for NOP opcode detected! \n " ) ;
return - EINVAL ;
}
2008-04-16 21:09:33 -07:00
/* Resize CQ in progress */
if ( unlikely ( ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) = = MLX4_CQE_OPCODE_RESIZE ) ) {
if ( cq - > resize_buf ) {
struct mlx4_ib_dev * dev = to_mdev ( cq - > ibcq . device ) ;
mlx4_ib_free_cq_buf ( dev , & cq - > buf , cq - > ibcq . cqe ) ;
cq - > buf = cq - > resize_buf - > buf ;
cq - > ibcq . cqe = cq - > resize_buf - > cqe ;
kfree ( cq - > resize_buf ) ;
cq - > resize_buf = NULL ;
}
goto repoll ;
}
2007-05-08 18:00:38 -07:00
if ( ! * cur_qp | |
( be32_to_cpu ( cqe - > my_qpn ) & 0xffffff ) ! = ( * cur_qp ) - > mqp . qpn ) {
/*
* We do not have to take the QP table lock here ,
* because CQs will be locked while QPs are removed
* from the table .
*/
mqp = __mlx4_qp_lookup ( to_mdev ( cq - > ibcq . device ) - > dev ,
be32_to_cpu ( cqe - > my_qpn ) ) ;
if ( unlikely ( ! mqp ) ) {
printk ( KERN_WARNING " CQ %06x with entry for unknown QPN %06x \n " ,
cq - > mcq . cqn , be32_to_cpu ( cqe - > my_qpn ) & 0xffffff ) ;
return - EINVAL ;
}
* cur_qp = to_mibqp ( mqp ) ;
}
wc - > qp = & ( * cur_qp ) - > ibqp ;
if ( is_send ) {
wq = & ( * cur_qp ) - > sq ;
IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
ConnectX HCA supports shrinking WQEs, so that a single work request
can be made of multiple units of wqe_shift. This way, WRs can differ
in size, and do not have to be a power of 2 in size, saving memory and
speeding up send WR posting. Unfortunately, if we do this then the
wqe_index field in CQEs can't be used to look up the WR ID anymore, so
our implementation does this only if selective signaling is off.
Further, on 32-bit platforms, we can't use vmap() to make the QP
buffer virtually contigious. Thus we have to use constant-sized WRs to
make sure a WR is always fully within a single page-sized chunk.
Finally, we use WRs with the NOP opcode to avoid wrapping around the
queue buffer in the middle of posting a WR, and we set the
NoErrorCompletion bit to avoid getting completions with error for NOP
WRs. However, NEC is only supported starting with firmware 2.2.232,
so we use constant-sized WRs for older firmware. And, since MLX QPs
only support SEND, we use constant-sized WRs in this case.
When stamping during NOP posting, do stamping following setting of the
NOP WQE valid bit.
Signed-off-by: Michael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
2008-01-28 10:40:59 +02:00
if ( ! ( * cur_qp ) - > sq_signal_bits ) {
wqe_ctr = be16_to_cpu ( cqe - > wqe_index ) ;
wq - > tail + = ( u16 ) ( wqe_ctr - ( u16 ) wq - > tail ) ;
}
2007-06-18 08:13:48 -07:00
wc - > wr_id = wq - > wrid [ wq - > tail & ( wq - > wqe_cnt - 1 ) ] ;
2007-05-08 18:00:38 -07:00
+ + wq - > tail ;
} else if ( ( * cur_qp ) - > ibqp . srq ) {
srq = to_msrq ( ( * cur_qp ) - > ibqp . srq ) ;
wqe_ctr = be16_to_cpu ( cqe - > wqe_index ) ;
wc - > wr_id = srq - > wrid [ wqe_ctr ] ;
mlx4_ib_free_srq_wqe ( srq , wqe_ctr ) ;
} else {
wq = & ( * cur_qp ) - > rq ;
2007-06-18 08:13:48 -07:00
wc - > wr_id = wq - > wrid [ wq - > tail & ( wq - > wqe_cnt - 1 ) ] ;
2007-05-08 18:00:38 -07:00
+ + wq - > tail ;
}
if ( unlikely ( is_error ) ) {
mlx4_ib_handle_error_cqe ( ( struct mlx4_err_cqe * ) cqe , wc ) ;
return 0 ;
}
wc - > status = IB_WC_SUCCESS ;
if ( is_send ) {
wc - > wc_flags = 0 ;
switch ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) {
case MLX4_OPCODE_RDMA_WRITE_IMM :
wc - > wc_flags | = IB_WC_WITH_IMM ;
case MLX4_OPCODE_RDMA_WRITE :
wc - > opcode = IB_WC_RDMA_WRITE ;
break ;
case MLX4_OPCODE_SEND_IMM :
wc - > wc_flags | = IB_WC_WITH_IMM ;
case MLX4_OPCODE_SEND :
wc - > opcode = IB_WC_SEND ;
break ;
case MLX4_OPCODE_RDMA_READ :
2007-08-03 14:25:48 -07:00
wc - > opcode = IB_WC_RDMA_READ ;
2007-05-08 18:00:38 -07:00
wc - > byte_len = be32_to_cpu ( cqe - > byte_cnt ) ;
break ;
case MLX4_OPCODE_ATOMIC_CS :
wc - > opcode = IB_WC_COMP_SWAP ;
wc - > byte_len = 8 ;
break ;
case MLX4_OPCODE_ATOMIC_FA :
wc - > opcode = IB_WC_FETCH_ADD ;
wc - > byte_len = 8 ;
break ;
case MLX4_OPCODE_BIND_MW :
wc - > opcode = IB_WC_BIND_MW ;
break ;
2008-04-16 21:09:27 -07:00
case MLX4_OPCODE_LSO :
wc - > opcode = IB_WC_LSO ;
break ;
2007-05-08 18:00:38 -07:00
}
} else {
wc - > byte_len = be32_to_cpu ( cqe - > byte_cnt ) ;
switch ( cqe - > owner_sr_opcode & MLX4_CQE_OPCODE_MASK ) {
case MLX4_RECV_OPCODE_RDMA_WRITE_IMM :
wc - > opcode = IB_WC_RECV_RDMA_WITH_IMM ;
wc - > wc_flags = IB_WC_WITH_IMM ;
wc - > imm_data = cqe - > immed_rss_invalid ;
break ;
case MLX4_RECV_OPCODE_SEND :
wc - > opcode = IB_WC_RECV ;
wc - > wc_flags = 0 ;
break ;
case MLX4_RECV_OPCODE_SEND_IMM :
wc - > opcode = IB_WC_RECV ;
wc - > wc_flags = IB_WC_WITH_IMM ;
wc - > imm_data = cqe - > immed_rss_invalid ;
break ;
}
wc - > slid = be16_to_cpu ( cqe - > rlid ) ;
wc - > sl = cqe - > sl > > 4 ;
2008-01-25 14:15:34 -08:00
g_mlpath_rqpn = be32_to_cpu ( cqe - > g_mlpath_rqpn ) ;
wc - > src_qp = g_mlpath_rqpn & 0xffffff ;
wc - > dlid_path_bits = ( g_mlpath_rqpn > > 24 ) & 0x7f ;
wc - > wc_flags | = g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0 ;
2008-01-07 09:01:25 +02:00
wc - > pkey_index = be32_to_cpu ( cqe - > immed_rss_invalid ) & 0x7f ;
2008-04-16 21:01:10 -07:00
wc - > csum_ok = mlx4_ib_ipoib_csum_ok ( cqe - > ipoib_status ,
cqe - > checksum ) ;
2007-05-08 18:00:38 -07:00
}
return 0 ;
}
int mlx4_ib_poll_cq ( struct ib_cq * ibcq , int num_entries , struct ib_wc * wc )
{
struct mlx4_ib_cq * cq = to_mcq ( ibcq ) ;
struct mlx4_ib_qp * cur_qp = NULL ;
unsigned long flags ;
int npolled ;
int err = 0 ;
spin_lock_irqsave ( & cq - > lock , flags ) ;
for ( npolled = 0 ; npolled < num_entries ; + + npolled ) {
err = mlx4_ib_poll_one ( cq , & cur_qp , wc + npolled ) ;
if ( err )
break ;
}
if ( npolled )
mlx4_cq_set_ci ( & cq - > mcq ) ;
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
if ( err = = 0 | | err = = - EAGAIN )
return npolled ;
else
return err ;
}
int mlx4_ib_arm_cq ( struct ib_cq * ibcq , enum ib_cq_notify_flags flags )
{
mlx4_cq_arm ( & to_mcq ( ibcq ) - > mcq ,
( flags & IB_CQ_SOLICITED_MASK ) = = IB_CQ_SOLICITED ?
MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT ,
to_mdev ( ibcq - > device ) - > uar_map ,
MLX4_GET_DOORBELL_LOCK ( & to_mdev ( ibcq - > device ) - > uar_lock ) ) ;
return 0 ;
}
void __mlx4_ib_cq_clean ( struct mlx4_ib_cq * cq , u32 qpn , struct mlx4_ib_srq * srq )
{
u32 prod_index ;
int nfreed = 0 ;
2007-06-18 08:13:59 -07:00
struct mlx4_cqe * cqe , * dest ;
u8 owner_bit ;
2007-05-08 18:00:38 -07:00
/*
* First we need to find the current producer index , so we
* know where to start cleaning from . It doesn ' t matter if HW
* adds new entries after this loop - - the QP we ' re worried
* about is already in RESET , so the new entries won ' t come
* from our QP and therefore don ' t need to be checked .
*/
for ( prod_index = cq - > mcq . cons_index ; get_sw_cqe ( cq , prod_index ) ; + + prod_index )
if ( prod_index = = cq - > mcq . cons_index + cq - > ibcq . cqe )
break ;
/*
* Now sweep backwards through the CQ , removing CQ entries
* that match our QP by copying older entries on top of them .
*/
while ( ( int ) - - prod_index - ( int ) cq - > mcq . cons_index > = 0 ) {
cqe = get_cqe ( cq , prod_index & cq - > ibcq . cqe ) ;
if ( ( be32_to_cpu ( cqe - > my_qpn ) & 0xffffff ) = = qpn ) {
if ( srq & & ! ( cqe - > owner_sr_opcode & MLX4_CQE_IS_SEND_MASK ) )
mlx4_ib_free_srq_wqe ( srq , be16_to_cpu ( cqe - > wqe_index ) ) ;
+ + nfreed ;
2007-06-18 08:13:59 -07:00
} else if ( nfreed ) {
dest = get_cqe ( cq , ( prod_index + nfreed ) & cq - > ibcq . cqe ) ;
owner_bit = dest - > owner_sr_opcode & MLX4_CQE_OWNER_MASK ;
memcpy ( dest , cqe , sizeof * cqe ) ;
dest - > owner_sr_opcode = owner_bit |
( dest - > owner_sr_opcode & ~ MLX4_CQE_OWNER_MASK ) ;
}
2007-05-08 18:00:38 -07:00
}
if ( nfreed ) {
cq - > mcq . cons_index + = nfreed ;
/*
* Make sure update of buffer contents is done before
* updating consumer index .
*/
wmb ( ) ;
mlx4_cq_set_ci ( & cq - > mcq ) ;
}
}
void mlx4_ib_cq_clean ( struct mlx4_ib_cq * cq , u32 qpn , struct mlx4_ib_srq * srq )
{
spin_lock_irq ( & cq - > lock ) ;
__mlx4_ib_cq_clean ( cq , qpn , srq ) ;
spin_unlock_irq ( & cq - > lock ) ;
}