2010-05-24 08:44:54 +04:00
/*
2017-08-28 21:24:04 +03:00
* Copyright ( c ) 2012 - 2017 Intel Corporation . All rights reserved .
2012-07-16 21:11:06 +04:00
* Copyright ( c ) 2006 - 2012 QLogic Corporation . * All rights reserved .
2010-05-24 08:44:54 +04:00
* Copyright ( c ) 2005 , 2006 PathScale , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/err.h>
# include <linux/vmalloc.h>
2016-01-22 23:45:02 +03:00
# include <rdma/rdma_vt.h>
2013-06-16 01:07:14 +04:00
# ifdef CONFIG_DEBUG_FS
# include <linux/seq_file.h>
# endif
2010-05-24 08:44:54 +04:00
# include "qib.h"
2016-01-22 23:56:27 +03:00
static inline unsigned mk_qpn ( struct rvt_qpn_table * qpt ,
struct rvt_qpn_map * map , unsigned off )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:56:27 +03:00
return ( map - qpt - > map ) * RVT_BITS_PER_PAGE + off ;
2010-05-24 08:44:54 +04:00
}
2016-01-22 23:56:27 +03:00
static inline unsigned find_next_offset ( struct rvt_qpn_table * qpt ,
struct rvt_qpn_map * map , unsigned off ,
2016-09-25 17:41:05 +03:00
unsigned n , u16 qpt_mask )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:56:27 +03:00
if ( qpt_mask ) {
2010-05-24 08:44:54 +04:00
off + + ;
2016-01-22 23:56:27 +03:00
if ( ( ( off & qpt_mask ) > > 1 ) > = n )
off = ( off | qpt_mask ) + 2 ;
} else {
off = find_next_zero_bit ( map - > page , RVT_BITS_PER_PAGE , off ) ;
}
2010-05-24 08:44:54 +04:00
return off ;
}
2016-07-02 02:02:18 +03:00
const struct rvt_operation_params qib_post_parms [ RVT_OPERATION_MAX ] = {
[ IB_WR_RDMA_WRITE ] = {
. length = sizeof ( struct ib_rdma_wr ) ,
. qpt_support = BIT ( IB_QPT_UC ) | BIT ( IB_QPT_RC ) ,
} ,
[ IB_WR_RDMA_READ ] = {
. length = sizeof ( struct ib_rdma_wr ) ,
. qpt_support = BIT ( IB_QPT_RC ) ,
. flags = RVT_OPERATION_ATOMIC ,
} ,
[ IB_WR_ATOMIC_CMP_AND_SWP ] = {
. length = sizeof ( struct ib_atomic_wr ) ,
. qpt_support = BIT ( IB_QPT_RC ) ,
. flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE ,
} ,
[ IB_WR_ATOMIC_FETCH_AND_ADD ] = {
. length = sizeof ( struct ib_atomic_wr ) ,
. qpt_support = BIT ( IB_QPT_RC ) ,
. flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE ,
} ,
[ IB_WR_RDMA_WRITE_WITH_IMM ] = {
. length = sizeof ( struct ib_rdma_wr ) ,
. qpt_support = BIT ( IB_QPT_UC ) | BIT ( IB_QPT_RC ) ,
} ,
[ IB_WR_SEND ] = {
. length = sizeof ( struct ib_send_wr ) ,
. qpt_support = BIT ( IB_QPT_UD ) | BIT ( IB_QPT_SMI ) | BIT ( IB_QPT_GSI ) |
BIT ( IB_QPT_UC ) | BIT ( IB_QPT_RC ) ,
} ,
[ IB_WR_SEND_WITH_IMM ] = {
. length = sizeof ( struct ib_send_wr ) ,
. qpt_support = BIT ( IB_QPT_UD ) | BIT ( IB_QPT_SMI ) | BIT ( IB_QPT_GSI ) |
BIT ( IB_QPT_UC ) | BIT ( IB_QPT_RC ) ,
} ,
} ;
2017-05-23 14:38:14 +03:00
static void get_map_page ( struct rvt_qpn_table * qpt , struct rvt_qpn_map * map )
2010-05-24 08:44:54 +04:00
{
2017-05-23 14:38:14 +03:00
unsigned long page = get_zeroed_page ( GFP_KERNEL ) ;
2010-05-24 08:44:54 +04:00
/*
* Free the page if someone raced with us installing it .
*/
spin_lock ( & qpt - > lock ) ;
if ( map - > page )
free_page ( page ) ;
else
map - > page = ( void * ) page ;
spin_unlock ( & qpt - > lock ) ;
}
/*
* Allocate the next available QPN or
* zero / one for QP type IB_QPT_SMI / IB_QPT_GSI .
*/
2016-02-14 23:09:55 +03:00
int qib_alloc_qpn ( struct rvt_dev_info * rdi , struct rvt_qpn_table * qpt ,
2017-05-23 14:38:14 +03:00
enum ib_qp_type type , u8 port )
2010-05-24 08:44:54 +04:00
{
u32 i , offset , max_scan , qpn ;
2016-01-22 23:56:27 +03:00
struct rvt_qpn_map * map ;
2010-05-24 08:44:54 +04:00
u32 ret ;
2016-01-22 23:56:52 +03:00
struct qib_ibdev * verbs_dev = container_of ( rdi , struct qib_ibdev , rdi ) ;
struct qib_devdata * dd = container_of ( verbs_dev , struct qib_devdata ,
verbs_dev ) ;
2016-09-25 17:41:05 +03:00
u16 qpt_mask = dd - > qpn_mask ;
2010-05-24 08:44:54 +04:00
if ( type = = IB_QPT_SMI | | type = = IB_QPT_GSI ) {
unsigned n ;
ret = type = = IB_QPT_GSI ;
n = 1 < < ( ret + 2 * ( port - 1 ) ) ;
spin_lock ( & qpt - > lock ) ;
if ( qpt - > flags & n )
ret = - EINVAL ;
else
qpt - > flags | = n ;
spin_unlock ( & qpt - > lock ) ;
goto bail ;
}
2011-01-11 04:42:22 +03:00
qpn = qpt - > last + 2 ;
2016-01-22 23:56:27 +03:00
if ( qpn > = RVT_QPN_MAX )
2010-05-24 08:44:54 +04:00
qpn = 2 ;
2016-01-22 23:56:27 +03:00
if ( qpt_mask & & ( ( qpn & qpt_mask ) > > 1 ) > = dd - > n_krcv_queues )
qpn = ( qpn | qpt_mask ) + 2 ;
offset = qpn & RVT_BITS_PER_PAGE_MASK ;
map = & qpt - > map [ qpn / RVT_BITS_PER_PAGE ] ;
2010-05-24 08:44:54 +04:00
max_scan = qpt - > nmaps - ! offset ;
for ( i = 0 ; ; ) {
if ( unlikely ( ! map - > page ) ) {
2017-05-23 14:38:14 +03:00
get_map_page ( qpt , map ) ;
2010-05-24 08:44:54 +04:00
if ( unlikely ( ! map - > page ) )
break ;
}
do {
if ( ! test_and_set_bit ( offset , map - > page ) ) {
qpt - > last = qpn ;
ret = qpn ;
goto bail ;
}
2011-01-11 04:42:21 +03:00
offset = find_next_offset ( qpt , map , offset ,
2016-09-25 17:41:05 +03:00
dd - > n_krcv_queues , qpt_mask ) ;
2010-05-24 08:44:54 +04:00
qpn = mk_qpn ( qpt , map , offset ) ;
/*
* This test differs from alloc_pidmap ( ) .
* If find_next_offset ( ) does find a zero
* bit , we don ' t need to check for QPN
* wrapping around past our starting QPN .
* We just need to be sure we don ' t loop
* forever .
*/
2016-01-22 23:56:27 +03:00
} while ( offset < RVT_BITS_PER_PAGE & & qpn < RVT_QPN_MAX ) ;
2010-05-24 08:44:54 +04:00
/*
* In order to keep the number of pages allocated to a
* minimum , we scan the all existing pages before increasing
* the size of the bitmap table .
*/
if ( + + i > max_scan ) {
2016-01-22 23:56:27 +03:00
if ( qpt - > nmaps = = RVT_QPNMAP_ENTRIES )
2010-05-24 08:44:54 +04:00
break ;
map = & qpt - > map [ qpt - > nmaps + + ] ;
2011-01-11 04:42:21 +03:00
offset = 0 ;
2010-05-24 08:44:54 +04:00
} else if ( map < & qpt - > map [ qpt - > nmaps ] ) {
+ + map ;
2011-01-11 04:42:21 +03:00
offset = 0 ;
2010-05-24 08:44:54 +04:00
} else {
map = & qpt - > map [ 0 ] ;
2011-01-11 04:42:21 +03:00
offset = 2 ;
2010-05-24 08:44:54 +04:00
}
qpn = mk_qpn ( qpt , map , offset ) ;
}
ret = - ENOMEM ;
bail :
return ret ;
}
/**
* qib_free_all_qps - check for QPs still in use
*/
2016-01-22 23:56:52 +03:00
unsigned qib_free_all_qps ( struct rvt_dev_info * rdi )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:56:52 +03:00
struct qib_ibdev * verbs_dev = container_of ( rdi , struct qib_ibdev , rdi ) ;
struct qib_devdata * dd = container_of ( verbs_dev , struct qib_devdata ,
verbs_dev ) ;
2010-05-24 08:44:54 +04:00
unsigned n , qp_inuse = 0 ;
for ( n = 0 ; n < dd - > num_pports ; n + + ) {
struct qib_ibport * ibp = & dd - > pport [ n ] . ibport_data ;
2011-09-23 21:16:44 +04:00
rcu_read_lock ( ) ;
2016-01-22 23:56:02 +03:00
if ( rcu_dereference ( ibp - > rvp . qp [ 0 ] ) )
2010-05-24 08:44:54 +04:00
qp_inuse + + ;
2016-01-22 23:56:02 +03:00
if ( rcu_dereference ( ibp - > rvp . qp [ 1 ] ) )
2010-05-24 08:44:54 +04:00
qp_inuse + + ;
2011-09-23 21:16:44 +04:00
rcu_read_unlock ( ) ;
2010-05-24 08:44:54 +04:00
}
return qp_inuse ;
}
2016-02-14 23:09:55 +03:00
void qib_notify_qp_reset ( struct rvt_qp * qp )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:45:11 +03:00
struct qib_qp_priv * priv = qp - > priv ;
2016-01-22 23:56:52 +03:00
2016-01-22 23:45:11 +03:00
atomic_set ( & priv - > s_dma_busy , 0 ) ;
2010-05-24 08:44:54 +04:00
}
2016-02-14 23:09:55 +03:00
void qib_notify_error_qp ( struct rvt_qp * qp )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:45:11 +03:00
struct qib_qp_priv * priv = qp - > priv ;
2010-05-24 08:44:54 +04:00
struct qib_ibdev * dev = to_idev ( qp - > ibqp . device ) ;
2011-01-11 04:42:20 +03:00
2016-01-22 23:56:14 +03:00
spin_lock ( & dev - > rdi . pending_lock ) ;
2016-01-22 23:56:46 +03:00
if ( ! list_empty ( & priv - > iowait ) & & ! ( qp - > s_flags & RVT_S_BUSY ) ) {
qp - > s_flags & = ~ RVT_S_ANY_WAIT_IO ;
2016-01-22 23:45:11 +03:00
list_del_init ( & priv - > iowait ) ;
2010-05-24 08:44:54 +04:00
}
2016-01-22 23:56:14 +03:00
spin_unlock ( & dev - > rdi . pending_lock ) ;
2010-05-24 08:44:54 +04:00
2016-01-22 23:56:46 +03:00
if ( ! ( qp - > s_flags & RVT_S_BUSY ) ) {
2010-05-24 08:44:54 +04:00
qp - > s_hdrwords = 0 ;
if ( qp - > s_rdma_mr ) {
2016-01-22 23:45:59 +03:00
rvt_put_mr ( qp - > s_rdma_mr ) ;
2010-05-24 08:44:54 +04:00
qp - > s_rdma_mr = NULL ;
}
2016-01-22 23:45:11 +03:00
if ( priv - > s_tx ) {
qib_put_txreq ( priv - > s_tx ) ;
priv - > s_tx = NULL ;
2010-05-24 08:44:54 +04:00
}
}
}
2016-02-04 01:20:27 +03:00
static int mtu_to_enum ( u32 mtu )
2010-05-24 08:44:54 +04:00
{
2016-02-04 01:20:27 +03:00
int enum_mtu ;
2010-05-24 08:44:54 +04:00
2016-02-04 01:20:27 +03:00
switch ( mtu ) {
case 4096 :
enum_mtu = IB_MTU_4096 ;
2010-05-24 08:44:54 +04:00
break ;
2016-02-04 01:20:27 +03:00
case 2048 :
enum_mtu = IB_MTU_2048 ;
2010-05-24 08:44:54 +04:00
break ;
2016-02-04 01:20:27 +03:00
case 1024 :
enum_mtu = IB_MTU_1024 ;
2010-05-24 08:44:54 +04:00
break ;
2016-02-04 01:20:27 +03:00
case 512 :
enum_mtu = IB_MTU_512 ;
2010-05-24 08:44:54 +04:00
break ;
2016-02-04 01:20:27 +03:00
case 256 :
enum_mtu = IB_MTU_256 ;
2010-05-24 08:44:54 +04:00
break ;
default :
2016-02-04 01:20:27 +03:00
enum_mtu = IB_MTU_2048 ;
2011-09-23 21:16:49 +04:00
}
2016-02-04 01:20:27 +03:00
return enum_mtu ;
}
2010-05-24 08:44:54 +04:00
2016-02-14 23:09:55 +03:00
int qib_get_pmtu_from_attr ( struct rvt_dev_info * rdi , struct rvt_qp * qp ,
struct ib_qp_attr * attr )
2016-02-04 01:20:27 +03:00
{
int mtu , pmtu , pidx = qp - > port_num - 1 ;
struct qib_ibdev * verbs_dev = container_of ( rdi , struct qib_ibdev , rdi ) ;
struct qib_devdata * dd = container_of ( verbs_dev , struct qib_devdata ,
verbs_dev ) ;
mtu = ib_mtu_enum_to_int ( attr - > path_mtu ) ;
if ( mtu = = - 1 )
return - EINVAL ;
2010-05-24 08:44:54 +04:00
2016-02-04 01:20:27 +03:00
if ( mtu > dd - > pport [ pidx ] . ibmtu )
pmtu = mtu_to_enum ( dd - > pport [ pidx ] . ibmtu ) ;
else
pmtu = attr - > path_mtu ;
return pmtu ;
}
2010-05-24 08:44:54 +04:00
2016-02-14 23:09:55 +03:00
int qib_mtu_to_path_mtu ( u32 mtu )
2016-02-04 01:20:27 +03:00
{
return mtu_to_enum ( mtu ) ;
}
2010-05-24 08:44:54 +04:00
2016-02-14 23:09:55 +03:00
u32 qib_mtu_from_qp ( struct rvt_dev_info * rdi , struct rvt_qp * qp , u32 pmtu )
2016-02-04 01:20:27 +03:00
{
return ib_mtu_enum_to_int ( pmtu ) ;
2010-05-24 08:44:54 +04:00
}
2017-05-23 14:38:14 +03:00
void * qib_qp_priv_alloc ( struct rvt_dev_info * rdi , struct rvt_qp * qp )
2010-05-24 08:44:54 +04:00
{
2016-01-22 23:45:11 +03:00
struct qib_qp_priv * priv ;
2010-05-24 08:44:54 +04:00
2017-05-23 14:38:14 +03:00
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
2016-01-22 23:56:52 +03:00
if ( ! priv )
return ERR_PTR ( - ENOMEM ) ;
priv - > owner = qp ;
2010-05-24 08:44:54 +04:00
2017-05-23 14:38:14 +03:00
priv - > s_hdr = kzalloc ( sizeof ( * priv - > s_hdr ) , GFP_KERNEL ) ;
2016-01-22 23:56:52 +03:00
if ( ! priv - > s_hdr ) {
kfree ( priv ) ;
return ERR_PTR ( - ENOMEM ) ;
2010-05-24 08:44:54 +04:00
}
2016-01-22 23:56:52 +03:00
init_waitqueue_head ( & priv - > wait_dma ) ;
2016-01-23 00:07:42 +03:00
INIT_WORK ( & priv - > s_work , _qib_do_send ) ;
2016-01-22 23:56:52 +03:00
INIT_LIST_HEAD ( & priv - > iowait ) ;
2010-05-24 08:44:54 +04:00
2016-01-22 23:56:52 +03:00
return priv ;
}
2010-05-24 08:44:54 +04:00
2016-02-14 23:09:55 +03:00
void qib_qp_priv_free ( struct rvt_dev_info * rdi , struct rvt_qp * qp )
2016-01-22 23:56:52 +03:00
{
struct qib_qp_priv * priv = qp - > priv ;
2010-05-24 08:44:54 +04:00
2016-01-22 23:45:11 +03:00
kfree ( priv - > s_hdr ) ;
kfree ( priv ) ;
2010-05-24 08:44:54 +04:00
}
2016-02-14 23:09:55 +03:00
void qib_stop_send_queue ( struct rvt_qp * qp )
2016-02-04 01:20:27 +03:00
{
struct qib_qp_priv * priv = qp - > priv ;
cancel_work_sync ( & priv - > s_work ) ;
}
2016-02-14 23:09:55 +03:00
void qib_quiesce_qp ( struct rvt_qp * qp )
2016-02-04 01:20:27 +03:00
{
struct qib_qp_priv * priv = qp - > priv ;
wait_event ( priv - > wait_dma , ! atomic_read ( & priv - > s_dma_busy ) ) ;
if ( priv - > s_tx ) {
qib_put_txreq ( priv - > s_tx ) ;
priv - > s_tx = NULL ;
}
}
2016-02-14 23:09:55 +03:00
void qib_flush_qp_waiters ( struct rvt_qp * qp )
2016-02-04 01:20:27 +03:00
{
struct qib_qp_priv * priv = qp - > priv ;
struct qib_ibdev * dev = to_idev ( qp - > ibqp . device ) ;
spin_lock ( & dev - > rdi . pending_lock ) ;
if ( ! list_empty ( & priv - > iowait ) )
list_del_init ( & priv - > iowait ) ;
spin_unlock ( & dev - > rdi . pending_lock ) ;
}
2016-02-14 23:10:04 +03:00
/**
* qib_check_send_wqe - validate wr / wqe
* @ qp - The qp
* @ wqe - The built wqe
*
* validate wr / wqe . This is called
* prior to inserting the wqe into
* the ring but after the wqe has been
* setup .
*
2016-02-14 23:45:44 +03:00
* Returns 1 to force direct progress , 0 otherwise , - EINVAL on failure
2016-02-14 23:10:04 +03:00
*/
int qib_check_send_wqe ( struct rvt_qp * qp ,
struct rvt_swqe * wqe )
{
struct rvt_ah * ah ;
2016-02-14 23:45:44 +03:00
int ret = 0 ;
2016-02-14 23:10:04 +03:00
switch ( qp - > ibqp . qp_type ) {
case IB_QPT_RC :
case IB_QPT_UC :
if ( wqe - > length > 0x80000000U )
return - EINVAL ;
break ;
case IB_QPT_SMI :
case IB_QPT_GSI :
case IB_QPT_UD :
ah = ibah_to_rvtah ( wqe - > ud_wr . ah ) ;
if ( wqe - > length > ( 1 < < ah - > log_pmtu ) )
return - EINVAL ;
2016-02-14 23:45:44 +03:00
/* progress hint */
ret = 1 ;
2016-02-14 23:10:04 +03:00
break ;
default :
break ;
}
2016-02-14 23:45:44 +03:00
return ret ;
2016-02-14 23:10:04 +03:00
}
2013-06-16 01:07:14 +04:00
# ifdef CONFIG_DEBUG_FS
static const char * const qp_type_str [ ] = {
" SMI " , " GSI " , " RC " , " UC " , " UD " ,
} ;
2017-08-28 21:24:04 +03:00
/**
* qib_qp_iter_print - print information to seq_file
* @ s - the seq_file
* @ iter - the iterator
*/
void qib_qp_iter_print ( struct seq_file * s , struct rvt_qp_iter * iter )
2013-06-16 01:07:14 +04:00
{
2016-01-22 23:45:59 +03:00
struct rvt_swqe * wqe ;
struct rvt_qp * qp = iter - > qp ;
2016-01-22 23:45:11 +03:00
struct qib_qp_priv * priv = qp - > priv ;
2013-06-16 01:07:14 +04:00
2016-01-23 00:07:42 +03:00
wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2013-06-16 01:07:14 +04:00
seq_printf ( s ,
" N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x \n " ,
iter - > n ,
qp - > ibqp . qp_num ,
qp_type_str [ qp - > ibqp . qp_type ] ,
qp - > state ,
wqe - > wr . opcode ,
qp - > s_hdrwords ,
qp - > s_flags ,
2016-01-22 23:45:11 +03:00
atomic_read ( & priv - > s_dma_busy ) ,
! list_empty ( & priv - > iowait ) ,
2013-06-16 01:07:14 +04:00
qp - > timeout ,
wqe - > ssn ,
qp - > s_lsn ,
qp - > s_last_psn ,
qp - > s_psn , qp - > s_next_psn ,
qp - > s_sending_psn , qp - > s_sending_hpsn ,
qp - > s_last , qp - > s_acked , qp - > s_cur ,
qp - > s_tail , qp - > s_head , qp - > s_size ,
qp - > remote_qpn ,
2017-04-29 21:41:28 +03:00
rdma_ah_get_dlid ( & qp - > remote_ah_attr ) ) ;
2013-06-16 01:07:14 +04:00
}
# endif