2006-03-29 15:23:37 -08:00
/*
2008-04-16 21:09:32 -07:00
* Copyright ( c ) 2006 , 2007 , 2008 QLogic Corporation . All rights reserved .
2006-03-29 15:23:37 -08:00
* Copyright ( c ) 2005 , 2006 PathScale , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/err.h>
# include <linux/vmalloc.h>
# include "ipath_verbs.h"
2006-09-22 15:22:26 -07:00
# include "ipath_kernel.h"
2006-03-29 15:23:37 -08:00
# define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
# define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
# define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
( off ) )
# define find_next_offset(map, off) find_next_zero_bit((map)->page, \
BITS_PER_PAGE , off )
/*
* Convert the AETH credit code into the number of credits .
*/
static u32 credit_table [ 31 ] = {
0 , /* 0 */
1 , /* 1 */
2 , /* 2 */
3 , /* 3 */
4 , /* 4 */
6 , /* 5 */
8 , /* 6 */
12 , /* 7 */
16 , /* 8 */
24 , /* 9 */
32 , /* A */
48 , /* B */
64 , /* C */
96 , /* D */
128 , /* E */
192 , /* F */
256 , /* 10 */
384 , /* 11 */
512 , /* 12 */
768 , /* 13 */
1024 , /* 14 */
1536 , /* 15 */
2048 , /* 16 */
3072 , /* 17 */
4096 , /* 18 */
6144 , /* 19 */
8192 , /* 1A */
12288 , /* 1B */
16384 , /* 1C */
24576 , /* 1D */
32768 /* 1E */
} ;
2007-03-15 14:45:12 -07:00
static void get_map_page ( struct ipath_qp_table * qpt , struct qpn_map * map )
{
unsigned long page = get_zeroed_page ( GFP_KERNEL ) ;
unsigned long flags ;
/*
* Free the page if someone raced with us installing it .
*/
spin_lock_irqsave ( & qpt - > lock , flags ) ;
if ( map - > page )
free_page ( page ) ;
else
map - > page = ( void * ) page ;
spin_unlock_irqrestore ( & qpt - > lock , flags ) ;
}
static int alloc_qpn ( struct ipath_qp_table * qpt , enum ib_qp_type type )
2006-03-29 15:23:37 -08:00
{
u32 i , offset , max_scan , qpn ;
struct qpn_map * map ;
2007-03-15 14:45:12 -07:00
u32 ret = - 1 ;
if ( type = = IB_QPT_SMI )
ret = 0 ;
else if ( type = = IB_QPT_GSI )
ret = 1 ;
if ( ret ! = - 1 ) {
map = & qpt - > map [ 0 ] ;
if ( unlikely ( ! map - > page ) ) {
get_map_page ( qpt , map ) ;
if ( unlikely ( ! map - > page ) ) {
ret = - ENOMEM ;
goto bail ;
}
}
if ( ! test_and_set_bit ( ret , map - > page ) )
atomic_dec ( & map - > n_free ) ;
else
ret = - EBUSY ;
goto bail ;
}
2006-03-29 15:23:37 -08:00
qpn = qpt - > last + 1 ;
if ( qpn > = QPN_MAX )
qpn = 2 ;
offset = qpn & BITS_PER_PAGE_MASK ;
map = & qpt - > map [ qpn / BITS_PER_PAGE ] ;
max_scan = qpt - > nmaps - ! offset ;
for ( i = 0 ; ; ) {
if ( unlikely ( ! map - > page ) ) {
2007-03-15 14:45:12 -07:00
get_map_page ( qpt , map ) ;
2006-03-29 15:23:37 -08:00
if ( unlikely ( ! map - > page ) )
break ;
}
if ( likely ( atomic_read ( & map - > n_free ) ) ) {
do {
if ( ! test_and_set_bit ( offset , map - > page ) ) {
atomic_dec ( & map - > n_free ) ;
qpt - > last = qpn ;
ret = qpn ;
goto bail ;
}
offset = find_next_offset ( map , offset ) ;
qpn = mk_qpn ( qpt , map , offset ) ;
/*
* This test differs from alloc_pidmap ( ) .
* If find_next_offset ( ) does find a zero
* bit , we don ' t need to check for QPN
* wrapping around past our starting QPN .
* We just need to be sure we don ' t loop
* forever .
*/
} while ( offset < BITS_PER_PAGE & & qpn < QPN_MAX ) ;
}
/*
* In order to keep the number of pages allocated to a
* minimum , we scan the all existing pages before increasing
* the size of the bitmap table .
*/
if ( + + i > max_scan ) {
if ( qpt - > nmaps = = QPNMAP_ENTRIES )
break ;
map = & qpt - > map [ qpt - > nmaps + + ] ;
offset = 0 ;
} else if ( map < & qpt - > map [ qpt - > nmaps ] ) {
+ + map ;
offset = 0 ;
} else {
map = & qpt - > map [ 0 ] ;
offset = 2 ;
}
qpn = mk_qpn ( qpt , map , offset ) ;
}
2007-03-15 14:45:12 -07:00
ret = - ENOMEM ;
2006-03-29 15:23:37 -08:00
bail :
return ret ;
}
static void free_qpn ( struct ipath_qp_table * qpt , u32 qpn )
{
struct qpn_map * map ;
map = qpt - > map + qpn / BITS_PER_PAGE ;
if ( map - > page )
clear_bit ( qpn & BITS_PER_PAGE_MASK , map - > page ) ;
atomic_inc ( & map - > n_free ) ;
}
/**
* ipath_alloc_qpn - allocate a QP number
* @ qpt : the QP table
* @ qp : the QP
* @ type : the QP type ( IB_QPT_SMI and IB_QPT_GSI are special )
*
* Allocate the next available QPN and put the QP into the hash table .
* The hash table holds a reference to the QP .
*/
2006-04-19 11:40:12 -07:00
static int ipath_alloc_qpn ( struct ipath_qp_table * qpt , struct ipath_qp * qp ,
enum ib_qp_type type )
2006-03-29 15:23:37 -08:00
{
unsigned long flags ;
int ret ;
2007-03-15 14:45:12 -07:00
ret = alloc_qpn ( qpt , type ) ;
if ( ret < 0 )
goto bail ;
qp - > ibqp . qp_num = ret ;
2006-03-29 15:23:37 -08:00
/* Add the QP to the hash table. */
spin_lock_irqsave ( & qpt - > lock , flags ) ;
2007-03-15 14:45:12 -07:00
ret % = qpt - > max ;
qp - > next = qpt - > table [ ret ] ;
qpt - > table [ ret ] = qp ;
2006-03-29 15:23:37 -08:00
atomic_inc ( & qp - > refcount ) ;
spin_unlock_irqrestore ( & qpt - > lock , flags ) ;
ret = 0 ;
bail :
return ret ;
}
/**
* ipath_free_qp - remove a QP from the QP table
* @ qpt : the QP table
* @ qp : the QP to remove
*
* Remove the QP from the table so it can ' t be found asynchronously by
* the receive interrupt routine .
*/
2006-04-19 11:40:12 -07:00
static void ipath_free_qp ( struct ipath_qp_table * qpt , struct ipath_qp * qp )
2006-03-29 15:23:37 -08:00
{
struct ipath_qp * q , * * qpp ;
unsigned long flags ;
spin_lock_irqsave ( & qpt - > lock , flags ) ;
/* Remove QP from the hash table. */
qpp = & qpt - > table [ qp - > ibqp . qp_num % qpt - > max ] ;
for ( ; ( q = * qpp ) ! = NULL ; qpp = & q - > next ) {
if ( q = = qp ) {
* qpp = qp - > next ;
qp - > next = NULL ;
atomic_dec ( & qp - > refcount ) ;
break ;
}
}
spin_unlock_irqrestore ( & qpt - > lock , flags ) ;
}
/**
2008-05-13 11:41:29 -07:00
* ipath_free_all_qps - check for QPs still in use
2006-03-29 15:23:37 -08:00
* @ qpt : the QP table to empty
2008-05-13 11:41:29 -07:00
*
* There should not be any QPs still in use .
* Free memory for table .
2006-03-29 15:23:37 -08:00
*/
2008-05-13 11:41:29 -07:00
unsigned ipath_free_all_qps ( struct ipath_qp_table * qpt )
2006-03-29 15:23:37 -08:00
{
unsigned long flags ;
2008-05-13 11:41:29 -07:00
struct ipath_qp * qp ;
u32 n , qp_inuse = 0 ;
2006-03-29 15:23:37 -08:00
2008-05-13 11:41:29 -07:00
spin_lock_irqsave ( & qpt - > lock , flags ) ;
2006-03-29 15:23:37 -08:00
for ( n = 0 ; n < qpt - > max ; n + + ) {
qp = qpt - > table [ n ] ;
qpt - > table [ n ] = NULL ;
2008-05-13 11:41:29 -07:00
for ( ; qp ; qp = qp - > next )
qp_inuse + + ;
2006-03-29 15:23:37 -08:00
}
2008-05-13 11:41:29 -07:00
spin_unlock_irqrestore ( & qpt - > lock , flags ) ;
2006-03-29 15:23:37 -08:00
2008-05-13 11:41:29 -07:00
for ( n = 0 ; n < ARRAY_SIZE ( qpt - > map ) ; n + + )
2006-03-29 15:23:37 -08:00
if ( qpt - > map [ n ] . page )
2008-05-13 11:41:29 -07:00
free_page ( ( unsigned long ) qpt - > map [ n ] . page ) ;
return qp_inuse ;
2006-03-29 15:23:37 -08:00
}
/**
* ipath_lookup_qpn - return the QP with the given QPN
* @ qpt : the QP table
* @ qpn : the QP number to look up
*
* The caller is responsible for decrementing the QP reference count
* when done .
*/
struct ipath_qp * ipath_lookup_qpn ( struct ipath_qp_table * qpt , u32 qpn )
{
unsigned long flags ;
struct ipath_qp * qp ;
spin_lock_irqsave ( & qpt - > lock , flags ) ;
for ( qp = qpt - > table [ qpn % qpt - > max ] ; qp ; qp = qp - > next ) {
if ( qp - > ibqp . qp_num = = qpn ) {
atomic_inc ( & qp - > refcount ) ;
break ;
}
}
spin_unlock_irqrestore ( & qpt - > lock , flags ) ;
return qp ;
}
/**
* ipath_reset_qp - initialize the QP state to the reset state
* @ qp : the QP to reset
2008-01-18 20:10:48 -08:00
* @ type : the QP type
2006-03-29 15:23:37 -08:00
*/
2008-01-18 20:10:48 -08:00
static void ipath_reset_qp ( struct ipath_qp * qp , enum ib_qp_type type )
2006-03-29 15:23:37 -08:00
{
qp - > remote_qpn = 0 ;
qp - > qkey = 0 ;
qp - > qp_access_flags = 0 ;
2008-05-13 11:41:29 -07:00
atomic_set ( & qp - > s_dma_busy , 0 ) ;
2007-06-04 09:55:48 -07:00
qp - > s_flags & = IPATH_S_SIGNAL_REQ_WR ;
2006-03-29 15:23:37 -08:00
qp - > s_hdrwords = 0 ;
2007-07-25 11:08:28 -07:00
qp - > s_wqe = NULL ;
2008-04-16 21:09:32 -07:00
qp - > s_pkt_delay = 0 ;
2008-05-13 11:41:29 -07:00
qp - > s_draining = 0 ;
2006-03-29 15:23:37 -08:00
qp - > s_psn = 0 ;
qp - > r_psn = 0 ;
2006-07-01 04:36:10 -07:00
qp - > r_msn = 0 ;
2008-01-18 20:10:48 -08:00
if ( type = = IB_QPT_RC ) {
2006-03-29 15:23:37 -08:00
qp - > s_state = IB_OPCODE_RC_SEND_LAST ;
qp - > r_state = IB_OPCODE_RC_SEND_LAST ;
} else {
qp - > s_state = IB_OPCODE_UC_SEND_LAST ;
qp - > r_state = IB_OPCODE_UC_SEND_LAST ;
}
qp - > s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE ;
2006-07-01 04:36:10 -07:00
qp - > r_nak_state = 0 ;
2008-05-13 11:41:29 -07:00
qp - > r_aflags = 0 ;
qp - > r_flags = 0 ;
2006-03-29 15:23:37 -08:00
qp - > s_rnr_timeout = 0 ;
qp - > s_head = 0 ;
qp - > s_tail = 0 ;
qp - > s_cur = 0 ;
qp - > s_last = 0 ;
qp - > s_ssn = 1 ;
qp - > s_lsn = 0 ;
2007-03-15 14:44:51 -07:00
memset ( qp - > s_ack_queue , 0 , sizeof ( qp - > s_ack_queue ) ) ;
qp - > r_head_ack_queue = 0 ;
qp - > s_tail_ack_queue = 0 ;
qp - > s_num_rd_atomic = 0 ;
2006-09-22 15:22:26 -07:00
if ( qp - > r_rq . wq ) {
qp - > r_rq . wq - > head = 0 ;
qp - > r_rq . wq - > tail = 0 ;
}
2006-03-29 15:23:37 -08:00
}
2006-04-19 11:40:12 -07:00
/**
2008-05-13 11:40:25 -07:00
* ipath_error_qp - put a QP into the error state
* @ qp : the QP to put into the error state
2006-09-28 09:00:14 -07:00
* @ err : the receive completion error to signal if a RWQE is active
2006-04-19 11:40:12 -07:00
*
* Flushes both send and receive work queues .
2007-08-25 16:45:03 -07:00
* Returns true if last WQE event should be generated .
2007-03-15 14:44:53 -07:00
* The QP s_lock should be held and interrupts disabled .
2008-05-13 11:40:25 -07:00
* If we are already in error state , just return .
2006-04-19 11:40:12 -07:00
*/
2007-08-25 16:45:03 -07:00
int ipath_error_qp ( struct ipath_qp * qp , enum ib_wc_status err )
2006-04-19 11:40:12 -07:00
{
struct ipath_ibdev * dev = to_idev ( qp - > ibqp . device ) ;
struct ib_wc wc ;
2007-08-25 16:45:03 -07:00
int ret = 0 ;
2006-04-19 11:40:12 -07:00
2008-05-13 11:40:25 -07:00
if ( qp - > state = = IB_QPS_ERR )
goto bail ;
qp - > state = IB_QPS_ERR ;
2006-04-19 11:40:12 -07:00
spin_lock ( & dev - > pending_lock ) ;
2006-05-23 11:32:32 -07:00
if ( ! list_empty ( & qp - > timerwait ) )
list_del_init ( & qp - > timerwait ) ;
if ( ! list_empty ( & qp - > piowait ) )
list_del_init ( & qp - > piowait ) ;
2006-04-19 11:40:12 -07:00
spin_unlock ( & dev - > pending_lock ) ;
2008-05-13 11:41:29 -07:00
/* Schedule the sending tasklet to drain the send work queue. */
if ( qp - > s_last ! = qp - > s_head )
ipath_schedule_send ( qp ) ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
2006-12-31 21:09:42 +02:00
wc . qp = & qp - > ibqp ;
2008-05-13 11:41:29 -07:00
wc . opcode = IB_WC_RECV ;
if ( test_and_clear_bit ( IPATH_R_WRID_VALID , & qp - > r_aflags ) ) {
2007-03-15 14:44:53 -07:00
wc . wr_id = qp - > r_wr_id ;
2006-09-28 09:00:14 -07:00
wc . status = err ;
2008-01-31 00:24:37 -08:00
ipath_cq_enter ( to_icq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
2006-09-28 09:00:14 -07:00
}
wc . status = IB_WC_WR_FLUSH_ERR ;
2006-04-19 11:40:12 -07:00
2006-09-22 15:22:26 -07:00
if ( qp - > r_rq . wq ) {
struct ipath_rwq * wq ;
u32 head ;
u32 tail ;
spin_lock ( & qp - > r_rq . lock ) ;
/* sanity check pointers before trusting them */
wq = qp - > r_rq . wq ;
head = wq - > head ;
if ( head > = qp - > r_rq . size )
head = 0 ;
tail = wq - > tail ;
if ( tail > = qp - > r_rq . size )
tail = 0 ;
while ( tail ! = head ) {
wc . wr_id = get_rwqe_ptr ( & qp - > r_rq , tail ) - > wr_id ;
if ( + + tail > = qp - > r_rq . size )
tail = 0 ;
ipath_cq_enter ( to_icq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
}
wq - > tail = tail ;
spin_unlock ( & qp - > r_rq . lock ) ;
2007-08-25 16:45:03 -07:00
} else if ( qp - > ibqp . event_handler )
ret = 1 ;
2008-05-13 11:40:25 -07:00
bail :
2007-08-25 16:45:03 -07:00
return ret ;
2006-04-19 11:40:12 -07:00
}
2006-03-29 15:23:37 -08:00
/**
* ipath_modify_qp - modify the attributes of a queue pair
* @ ibqp : the queue pair who ' s attributes we ' re modifying
* @ attr : the new attributes
* @ attr_mask : the mask of attributes to modify
2006-08-11 14:58:09 -07:00
* @ udata : user data for ipathverbs . so
2006-03-29 15:23:37 -08:00
*
* Returns 0 on success , otherwise returns an errno .
*/
int ipath_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
2006-08-11 14:58:09 -07:00
int attr_mask , struct ib_udata * udata )
2006-03-29 15:23:37 -08:00
{
2006-05-23 11:32:30 -07:00
struct ipath_ibdev * dev = to_idev ( ibqp - > device ) ;
2006-03-29 15:23:37 -08:00
struct ipath_qp * qp = to_iqp ( ibqp ) ;
enum ib_qp_state cur_state , new_state ;
2007-08-25 16:45:03 -07:00
int lastwqe = 0 ;
2006-03-29 15:23:37 -08:00
int ret ;
2008-05-13 11:41:29 -07:00
spin_lock_irq ( & qp - > s_lock ) ;
2006-03-29 15:23:37 -08:00
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr - > cur_qp_state : qp - > state ;
new_state = attr_mask & IB_QP_STATE ? attr - > qp_state : cur_state ;
if ( ! ib_modify_qp_is_ok ( cur_state , new_state , ibqp - > qp_type ,
attr_mask ) )
goto inval ;
2006-08-25 11:24:41 -07:00
if ( attr_mask & IB_QP_AV ) {
2006-05-23 11:32:30 -07:00
if ( attr - > ah_attr . dlid = = 0 | |
2006-07-01 04:36:17 -07:00
attr - > ah_attr . dlid > = IPATH_MULTICAST_LID_BASE )
2006-05-23 11:32:30 -07:00
goto inval ;
2006-08-25 11:24:41 -07:00
if ( ( attr - > ah_attr . ah_flags & IB_AH_GRH ) & &
( attr - > ah_attr . grh . sgid_index > 1 ) )
goto inval ;
}
2006-05-23 11:32:30 -07:00
if ( attr_mask & IB_QP_PKEY_INDEX )
2006-08-25 11:24:32 -07:00
if ( attr - > pkey_index > = ipath_get_npkeys ( dev - > dd ) )
2006-05-23 11:32:30 -07:00
goto inval ;
if ( attr_mask & IB_QP_MIN_RNR_TIMER )
if ( attr - > min_rnr_timer > 31 )
goto inval ;
2006-08-25 11:24:41 -07:00
if ( attr_mask & IB_QP_PORT )
if ( attr - > port_num = = 0 | |
attr - > port_num > ibqp - > device - > phys_port_cnt )
goto inval ;
2007-06-18 14:24:35 -07:00
/*
2008-04-16 21:01:12 -07:00
* don ' t allow invalid Path MTU values or greater than 2048
* unless we are configured for a 4 KB MTU
2007-06-18 14:24:35 -07:00
*/
2008-04-16 21:01:12 -07:00
if ( ( attr_mask & IB_QP_PATH_MTU ) & &
( ib_mtu_enum_to_int ( attr - > path_mtu ) = = - 1 | |
( attr - > path_mtu > IB_MTU_2048 & & ! ipath_mtu4096 ) ) )
goto inval ;
2006-08-25 11:24:41 -07:00
if ( attr_mask & IB_QP_PATH_MIG_STATE )
2006-08-25 11:24:42 -07:00
if ( attr - > path_mig_state ! = IB_MIG_MIGRATED & &
attr - > path_mig_state ! = IB_MIG_REARM )
2006-08-25 11:24:41 -07:00
goto inval ;
2007-03-15 14:44:51 -07:00
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
if ( attr - > max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC )
goto inval ;
2006-03-29 15:23:37 -08:00
switch ( new_state ) {
case IB_QPS_RESET :
2008-05-13 11:41:29 -07:00
if ( qp - > state ! = IB_QPS_RESET ) {
qp - > state = IB_QPS_RESET ;
spin_lock ( & dev - > pending_lock ) ;
if ( ! list_empty ( & qp - > timerwait ) )
list_del_init ( & qp - > timerwait ) ;
if ( ! list_empty ( & qp - > piowait ) )
list_del_init ( & qp - > piowait ) ;
spin_unlock ( & dev - > pending_lock ) ;
qp - > s_flags & = ~ IPATH_S_ANY_WAIT ;
spin_unlock_irq ( & qp - > s_lock ) ;
/* Stop the sending tasklet */
tasklet_kill ( & qp - > s_task ) ;
wait_event ( qp - > wait_dma , ! atomic_read ( & qp - > s_dma_busy ) ) ;
spin_lock_irq ( & qp - > s_lock ) ;
}
2008-01-18 20:10:48 -08:00
ipath_reset_qp ( qp , ibqp - > qp_type ) ;
2006-03-29 15:23:37 -08:00
break ;
2008-05-13 11:41:29 -07:00
case IB_QPS_SQD :
qp - > s_draining = qp - > s_last ! = qp - > s_cur ;
qp - > state = new_state ;
break ;
case IB_QPS_SQE :
if ( qp - > ibqp . qp_type = = IB_QPT_RC )
goto inval ;
qp - > state = new_state ;
break ;
2006-03-29 15:23:37 -08:00
case IB_QPS_ERR :
2007-08-25 16:45:03 -07:00
lastwqe = ipath_error_qp ( qp , IB_WC_WR_FLUSH_ERR ) ;
2006-03-29 15:23:37 -08:00
break ;
default :
2008-05-13 11:41:29 -07:00
qp - > state = new_state ;
2006-03-29 15:23:37 -08:00
break ;
}
2006-05-23 11:32:30 -07:00
if ( attr_mask & IB_QP_PKEY_INDEX )
2006-03-29 15:23:37 -08:00
qp - > s_pkey_index = attr - > pkey_index ;
if ( attr_mask & IB_QP_DEST_QPN )
qp - > remote_qpn = attr - > dest_qp_num ;
if ( attr_mask & IB_QP_SQ_PSN ) {
2006-09-28 08:59:57 -07:00
qp - > s_psn = qp - > s_next_psn = attr - > sq_psn ;
2006-03-29 15:23:37 -08:00
qp - > s_last_psn = qp - > s_next_psn - 1 ;
}
if ( attr_mask & IB_QP_RQ_PSN )
qp - > r_psn = attr - > rq_psn ;
if ( attr_mask & IB_QP_ACCESS_FLAGS )
qp - > qp_access_flags = attr - > qp_access_flags ;
2008-04-16 21:09:32 -07:00
if ( attr_mask & IB_QP_AV ) {
2006-03-29 15:23:37 -08:00
qp - > remote_ah_attr = attr - > ah_attr ;
2008-04-16 21:09:32 -07:00
qp - > s_dmult = ipath_ib_rate_to_mult ( attr - > ah_attr . static_rate ) ;
}
2006-03-29 15:23:37 -08:00
if ( attr_mask & IB_QP_PATH_MTU )
qp - > path_mtu = attr - > path_mtu ;
if ( attr_mask & IB_QP_RETRY_CNT )
qp - > s_retry = qp - > s_retry_cnt = attr - > retry_cnt ;
if ( attr_mask & IB_QP_RNR_RETRY ) {
qp - > s_rnr_retry = attr - > rnr_retry ;
if ( qp - > s_rnr_retry > 7 )
qp - > s_rnr_retry = 7 ;
qp - > s_rnr_retry_cnt = qp - > s_rnr_retry ;
}
2006-05-23 11:32:30 -07:00
if ( attr_mask & IB_QP_MIN_RNR_TIMER )
2006-07-01 04:36:10 -07:00
qp - > r_min_rnr_timer = attr - > min_rnr_timer ;
2006-03-29 15:23:37 -08:00
2006-08-25 11:24:41 -07:00
if ( attr_mask & IB_QP_TIMEOUT )
qp - > timeout = attr - > timeout ;
2006-03-29 15:23:37 -08:00
if ( attr_mask & IB_QP_QKEY )
qp - > qkey = attr - > qkey ;
2007-03-15 14:44:51 -07:00
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
qp - > r_max_rd_atomic = attr - > max_dest_rd_atomic ;
if ( attr_mask & IB_QP_MAX_QP_RD_ATOMIC )
qp - > s_max_rd_atomic = attr - > max_rd_atomic ;
2008-05-13 11:41:29 -07:00
spin_unlock_irq ( & qp - > s_lock ) ;
2006-07-01 04:36:10 -07:00
2007-08-25 16:45:03 -07:00
if ( lastwqe ) {
struct ib_event ev ;
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_QP_LAST_WQE_REACHED ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
2006-03-29 15:23:37 -08:00
ret = 0 ;
goto bail ;
inval :
2008-05-13 11:41:29 -07:00
spin_unlock_irq ( & qp - > s_lock ) ;
2006-03-29 15:23:37 -08:00
ret = - EINVAL ;
bail :
return ret ;
}
int ipath_query_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask , struct ib_qp_init_attr * init_attr )
{
struct ipath_qp * qp = to_iqp ( ibqp ) ;
attr - > qp_state = qp - > state ;
attr - > cur_qp_state = attr - > qp_state ;
attr - > path_mtu = qp - > path_mtu ;
attr - > path_mig_state = 0 ;
attr - > qkey = qp - > qkey ;
attr - > rq_psn = qp - > r_psn ;
attr - > sq_psn = qp - > s_next_psn ;
attr - > dest_qp_num = qp - > remote_qpn ;
attr - > qp_access_flags = qp - > qp_access_flags ;
attr - > cap . max_send_wr = qp - > s_size - 1 ;
2006-09-22 15:22:26 -07:00
attr - > cap . max_recv_wr = qp - > ibqp . srq ? 0 : qp - > r_rq . size - 1 ;
2006-03-29 15:23:37 -08:00
attr - > cap . max_send_sge = qp - > s_max_sge ;
attr - > cap . max_recv_sge = qp - > r_rq . max_sge ;
attr - > cap . max_inline_data = 0 ;
attr - > ah_attr = qp - > remote_ah_attr ;
memset ( & attr - > alt_ah_attr , 0 , sizeof ( attr - > alt_ah_attr ) ) ;
attr - > pkey_index = qp - > s_pkey_index ;
attr - > alt_pkey_index = 0 ;
attr - > en_sqd_async_notify = 0 ;
2008-05-13 11:41:29 -07:00
attr - > sq_draining = qp - > s_draining ;
2007-03-15 14:44:51 -07:00
attr - > max_rd_atomic = qp - > s_max_rd_atomic ;
attr - > max_dest_rd_atomic = qp - > r_max_rd_atomic ;
2006-07-01 04:36:10 -07:00
attr - > min_rnr_timer = qp - > r_min_rnr_timer ;
2006-03-29 15:23:37 -08:00
attr - > port_num = 1 ;
2006-08-25 11:24:41 -07:00
attr - > timeout = qp - > timeout ;
2006-03-29 15:23:37 -08:00
attr - > retry_cnt = qp - > s_retry_cnt ;
2008-01-07 23:43:04 -08:00
attr - > rnr_retry = qp - > s_rnr_retry_cnt ;
2006-03-29 15:23:37 -08:00
attr - > alt_port_num = 0 ;
attr - > alt_timeout = 0 ;
init_attr - > event_handler = qp - > ibqp . event_handler ;
init_attr - > qp_context = qp - > ibqp . qp_context ;
init_attr - > send_cq = qp - > ibqp . send_cq ;
init_attr - > recv_cq = qp - > ibqp . recv_cq ;
init_attr - > srq = qp - > ibqp . srq ;
init_attr - > cap = attr - > cap ;
2007-03-15 14:44:51 -07:00
if ( qp - > s_flags & IPATH_S_SIGNAL_REQ_WR )
2006-08-25 11:24:44 -07:00
init_attr - > sq_sig_type = IB_SIGNAL_REQ_WR ;
else
init_attr - > sq_sig_type = IB_SIGNAL_ALL_WR ;
2006-03-29 15:23:37 -08:00
init_attr - > qp_type = qp - > ibqp . qp_type ;
init_attr - > port_num = 1 ;
return 0 ;
}
/**
* ipath_compute_aeth - compute the AETH ( syndrome + MSN )
* @ qp : the queue pair to compute the AETH for
*
* Returns the AETH .
*/
__be32 ipath_compute_aeth ( struct ipath_qp * qp )
{
2006-07-01 04:36:17 -07:00
u32 aeth = qp - > r_msn & IPATH_MSN_MASK ;
2006-03-29 15:23:37 -08:00
2006-07-01 04:36:10 -07:00
if ( qp - > ibqp . srq ) {
2006-03-29 15:23:37 -08:00
/*
* Shared receive queues don ' t generate credits .
* Set the credit field to the invalid value .
*/
2006-07-01 04:36:17 -07:00
aeth | = IPATH_AETH_CREDIT_INVAL < < IPATH_AETH_CREDIT_SHIFT ;
2006-03-29 15:23:37 -08:00
} else {
u32 min , max , x ;
u32 credits ;
2006-09-22 15:22:26 -07:00
struct ipath_rwq * wq = qp - > r_rq . wq ;
u32 head ;
u32 tail ;
/* sanity check pointers before trusting them */
head = wq - > head ;
if ( head > = qp - > r_rq . size )
head = 0 ;
tail = wq - > tail ;
if ( tail > = qp - > r_rq . size )
tail = 0 ;
2006-03-29 15:23:37 -08:00
/*
* Compute the number of credits available ( RWQEs ) .
* XXX Not holding the r_rq . lock here so there is a small
* chance that the pair of reads are not atomic .
*/
2006-09-22 15:22:26 -07:00
credits = head - tail ;
2006-03-29 15:23:37 -08:00
if ( ( int ) credits < 0 )
credits + = qp - > r_rq . size ;
/*
* Binary search the credit table to find the code to
* use .
*/
min = 0 ;
max = 31 ;
for ( ; ; ) {
x = ( min + max ) / 2 ;
if ( credit_table [ x ] = = credits )
break ;
if ( credit_table [ x ] > credits )
max = x ;
else if ( min = = x )
break ;
else
min = x ;
}
2006-07-01 04:36:17 -07:00
aeth | = x < < IPATH_AETH_CREDIT_SHIFT ;
2006-03-29 15:23:37 -08:00
}
return cpu_to_be32 ( aeth ) ;
}
/**
* ipath_create_qp - create a queue pair for a device
* @ ibpd : the protection domain who ' s device we create the queue pair for
* @ init_attr : the attributes of the queue pair
* @ udata : unused by InfiniPath
*
* Returns the queue pair on success , otherwise returns an errno .
*
* Called by the ib_create_qp ( ) core verbs function .
*/
struct ib_qp * ipath_create_qp ( struct ib_pd * ibpd ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata )
{
struct ipath_qp * qp ;
int err ;
struct ipath_swqe * swq = NULL ;
struct ipath_ibdev * dev ;
size_t sz ;
2008-12-01 20:59:08 -08:00
size_t sg_list_sz ;
2006-03-29 15:23:37 -08:00
struct ib_qp * ret ;
2008-04-16 21:09:27 -07:00
if ( init_attr - > create_flags ) {
ret = ERR_PTR ( - EINVAL ) ;
goto bail ;
}
2006-07-01 04:35:58 -07:00
if ( init_attr - > cap . max_send_sge > ib_ipath_max_sges | |
2008-04-16 21:09:25 -07:00
init_attr - > cap . max_send_wr > ib_ipath_max_qp_wrs ) {
ret = ERR_PTR ( - EINVAL ) ;
2006-03-29 15:23:37 -08:00
goto bail ;
}
2008-04-16 21:09:25 -07:00
/* Check receive queue parameters if no SRQ is specified. */
if ( ! init_attr - > srq ) {
if ( init_attr - > cap . max_recv_sge > ib_ipath_max_sges | |
init_attr - > cap . max_recv_wr > ib_ipath_max_qp_wrs ) {
ret = ERR_PTR ( - EINVAL ) ;
goto bail ;
}
if ( init_attr - > cap . max_send_sge +
init_attr - > cap . max_send_wr +
init_attr - > cap . max_recv_sge +
init_attr - > cap . max_recv_wr = = 0 ) {
ret = ERR_PTR ( - EINVAL ) ;
goto bail ;
}
2006-07-01 04:35:55 -07:00
}
2006-03-29 15:23:37 -08:00
switch ( init_attr - > qp_type ) {
case IB_QPT_UC :
case IB_QPT_RC :
2007-07-25 11:08:28 -07:00
case IB_QPT_UD :
case IB_QPT_SMI :
case IB_QPT_GSI :
2006-03-29 15:23:37 -08:00
sz = sizeof ( struct ipath_sge ) *
init_attr - > cap . max_send_sge +
sizeof ( struct ipath_swqe ) ;
swq = vmalloc ( ( init_attr - > cap . max_send_wr + 1 ) * sz ) ;
if ( swq = = NULL ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto bail ;
}
2006-09-22 15:22:26 -07:00
sz = sizeof ( * qp ) ;
2008-12-01 20:59:08 -08:00
sg_list_sz = 0 ;
2006-09-22 15:22:26 -07:00
if ( init_attr - > srq ) {
struct ipath_srq * srq = to_isrq ( init_attr - > srq ) ;
2008-12-01 20:59:08 -08:00
if ( srq - > rq . max_sge > 1 )
sg_list_sz = sizeof ( * qp - > r_sg_list ) *
( srq - > rq . max_sge - 1 ) ;
} else if ( init_attr - > cap . max_recv_sge > 1 )
sg_list_sz = sizeof ( * qp - > r_sg_list ) *
( init_attr - > cap . max_recv_sge - 1 ) ;
qp = kmalloc ( sz + sg_list_sz , GFP_KERNEL ) ;
2006-03-29 15:23:37 -08:00
if ( ! qp ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto bail_swq ;
2006-03-29 15:23:37 -08:00
}
2008-12-01 20:59:08 -08:00
if ( sg_list_sz & & ( init_attr - > qp_type = = IB_QPT_UD | |
init_attr - > qp_type = = IB_QPT_SMI | |
init_attr - > qp_type = = IB_QPT_GSI ) ) {
qp - > r_ud_sg_list = kmalloc ( sg_list_sz , GFP_KERNEL ) ;
if ( ! qp - > r_ud_sg_list ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto bail_qp ;
}
} else
qp - > r_ud_sg_list = NULL ;
2006-07-01 04:36:16 -07:00
if ( init_attr - > srq ) {
2006-09-22 15:22:26 -07:00
sz = 0 ;
2006-07-01 04:36:16 -07:00
qp - > r_rq . size = 0 ;
qp - > r_rq . max_sge = 0 ;
qp - > r_rq . wq = NULL ;
2006-09-22 15:22:26 -07:00
init_attr - > cap . max_recv_wr = 0 ;
init_attr - > cap . max_recv_sge = 0 ;
2006-07-01 04:36:16 -07:00
} else {
qp - > r_rq . size = init_attr - > cap . max_recv_wr + 1 ;
qp - > r_rq . max_sge = init_attr - > cap . max_recv_sge ;
2006-09-22 15:22:26 -07:00
sz = ( sizeof ( struct ib_sge ) * qp - > r_rq . max_sge ) +
2006-07-01 04:36:16 -07:00
sizeof ( struct ipath_rwqe ) ;
2006-09-22 15:22:26 -07:00
qp - > r_rq . wq = vmalloc_user ( sizeof ( struct ipath_rwq ) +
qp - > r_rq . size * sz ) ;
2006-07-01 04:36:16 -07:00
if ( ! qp - > r_rq . wq ) {
ret = ERR_PTR ( - ENOMEM ) ;
2008-12-01 20:59:08 -08:00
goto bail_sg_list ;
2006-07-01 04:36:16 -07:00
}
2006-03-29 15:23:37 -08:00
}
/*
* ib_create_qp ( ) will initialize qp - > ibqp
* except for qp - > ibqp . qp_num .
*/
spin_lock_init ( & qp - > s_lock ) ;
spin_lock_init ( & qp - > r_rq . lock ) ;
atomic_set ( & qp - > refcount , 0 ) ;
init_waitqueue_head ( & qp - > wait ) ;
2008-05-13 11:41:29 -07:00
init_waitqueue_head ( & qp - > wait_dma ) ;
2007-07-25 11:08:28 -07:00
tasklet_init ( & qp - > s_task , ipath_do_send , ( unsigned long ) qp ) ;
2006-05-23 11:32:32 -07:00
INIT_LIST_HEAD ( & qp - > piowait ) ;
INIT_LIST_HEAD ( & qp - > timerwait ) ;
2006-03-29 15:23:37 -08:00
qp - > state = IB_QPS_RESET ;
qp - > s_wq = swq ;
qp - > s_size = init_attr - > cap . max_send_wr + 1 ;
qp - > s_max_sge = init_attr - > cap . max_send_sge ;
2006-08-25 11:24:44 -07:00
if ( init_attr - > sq_sig_type = = IB_SIGNAL_REQ_WR )
2007-03-15 14:44:51 -07:00
qp - > s_flags = IPATH_S_SIGNAL_REQ_WR ;
2006-08-25 11:24:44 -07:00
else
qp - > s_flags = 0 ;
2006-03-29 15:23:37 -08:00
dev = to_idev ( ibpd - > device ) ;
err = ipath_alloc_qpn ( & dev - > qp_table , qp ,
init_attr - > qp_type ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
2007-11-14 12:09:05 -08:00
vfree ( qp - > r_rq . wq ) ;
2008-12-01 20:59:08 -08:00
goto bail_sg_list ;
2006-03-29 15:23:37 -08:00
}
2006-09-22 15:22:26 -07:00
qp - > ip = NULL ;
2008-04-16 21:09:32 -07:00
qp - > s_tx = NULL ;
2008-01-18 20:10:48 -08:00
ipath_reset_qp ( qp , init_attr - > qp_type ) ;
2006-03-29 15:23:37 -08:00
break ;
default :
/* Don't support raw QPs */
ret = ERR_PTR ( - ENOSYS ) ;
goto bail ;
}
init_attr - > cap . max_inline_data = 0 ;
2006-09-22 15:22:26 -07:00
/*
* Return the address of the RWQ as the offset to mmap .
* See ipath_mmap ( ) for details .
*/
if ( udata & & udata - > outlen > = sizeof ( __u64 ) ) {
2007-04-27 21:07:23 -07:00
if ( ! qp - > r_rq . wq ) {
__u64 offset = 0 ;
2006-09-22 15:22:26 -07:00
2007-04-27 21:07:23 -07:00
err = ib_copy_to_udata ( udata , & offset ,
sizeof ( offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
2007-11-14 12:09:05 -08:00
goto bail_ip ;
2007-04-27 21:07:23 -07:00
}
} else {
u32 s = sizeof ( struct ipath_rwq ) +
qp - > r_rq . size * sz ;
qp - > ip =
ipath_create_mmap_info ( dev , s ,
ibpd - > uobject - > context ,
qp - > r_rq . wq ) ;
if ( ! qp - > ip ) {
2006-09-22 15:22:26 -07:00
ret = ERR_PTR ( - ENOMEM ) ;
2007-11-14 12:09:05 -08:00
goto bail_ip ;
2006-09-22 15:22:26 -07:00
}
2007-04-27 21:07:23 -07:00
err = ib_copy_to_udata ( udata , & ( qp - > ip - > offset ) ,
sizeof ( qp - > ip - > offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
goto bail_ip ;
}
2006-09-22 15:22:26 -07:00
}
}
2006-08-25 11:24:43 -07:00
spin_lock ( & dev - > n_qps_lock ) ;
if ( dev - > n_qps_allocated = = ib_ipath_max_qps ) {
spin_unlock ( & dev - > n_qps_lock ) ;
ret = ERR_PTR ( - ENOMEM ) ;
goto bail_ip ;
}
dev - > n_qps_allocated + + ;
spin_unlock ( & dev - > n_qps_lock ) ;
2007-04-27 21:07:23 -07:00
if ( qp - > ip ) {
spin_lock_irq ( & dev - > pending_lock ) ;
list_add ( & qp - > ip - > pending_mmaps , & dev - > pending_mmaps ) ;
spin_unlock_irq ( & dev - > pending_lock ) ;
}
2006-03-29 15:23:37 -08:00
ret = & qp - > ibqp ;
2006-09-22 15:22:26 -07:00
goto bail ;
2006-03-29 15:23:37 -08:00
2006-08-25 11:24:43 -07:00
bail_ip :
2007-11-14 12:09:05 -08:00
if ( qp - > ip )
kref_put ( & qp - > ip - > ref , ipath_release_mmap_info ) ;
else
vfree ( qp - > r_rq . wq ) ;
ipath_free_qp ( & dev - > qp_table , qp ) ;
2008-05-13 11:41:29 -07:00
free_qpn ( & dev - > qp_table , qp - > ibqp . qp_num ) ;
2008-12-01 20:59:08 -08:00
bail_sg_list :
kfree ( qp - > r_ud_sg_list ) ;
2006-09-22 15:22:26 -07:00
bail_qp :
kfree ( qp ) ;
bail_swq :
vfree ( swq ) ;
2006-03-29 15:23:37 -08:00
bail :
return ret ;
}
/**
* ipath_destroy_qp - destroy a queue pair
* @ ibqp : the queue pair to destroy
*
* Returns 0 on success .
*
* Note that this can be called while the QP is actively sending or
* receiving !
*/
int ipath_destroy_qp ( struct ib_qp * ibqp )
{
struct ipath_qp * qp = to_iqp ( ibqp ) ;
struct ipath_ibdev * dev = to_idev ( ibqp - > device ) ;
2008-05-13 11:41:29 -07:00
/* Make sure HW and driver activity is stopped. */
spin_lock_irq ( & qp - > s_lock ) ;
if ( qp - > state ! = IB_QPS_RESET ) {
qp - > state = IB_QPS_RESET ;
spin_lock ( & dev - > pending_lock ) ;
if ( ! list_empty ( & qp - > timerwait ) )
list_del_init ( & qp - > timerwait ) ;
if ( ! list_empty ( & qp - > piowait ) )
list_del_init ( & qp - > piowait ) ;
spin_unlock ( & dev - > pending_lock ) ;
qp - > s_flags & = ~ IPATH_S_ANY_WAIT ;
spin_unlock_irq ( & qp - > s_lock ) ;
/* Stop the sending tasklet */
tasklet_kill ( & qp - > s_task ) ;
wait_event ( qp - > wait_dma , ! atomic_read ( & qp - > s_dma_busy ) ) ;
} else
spin_unlock_irq ( & qp - > s_lock ) ;
2006-03-29 15:23:37 -08:00
2008-05-13 11:41:29 -07:00
ipath_free_qp ( & dev - > qp_table , qp ) ;
2006-03-29 15:23:37 -08:00
2008-04-16 21:09:32 -07:00
if ( qp - > s_tx ) {
atomic_dec ( & qp - > refcount ) ;
if ( qp - > s_tx - > txreq . flags & IPATH_SDMA_TXREQ_F_FREEBUF )
kfree ( qp - > s_tx - > txreq . map_addr ) ;
2008-05-13 11:41:29 -07:00
spin_lock_irq ( & dev - > pending_lock ) ;
list_add ( & qp - > s_tx - > txreq . list , & dev - > txreq_free ) ;
spin_unlock_irq ( & dev - > pending_lock ) ;
qp - > s_tx = NULL ;
2008-04-16 21:09:32 -07:00
}
2008-05-13 11:41:29 -07:00
wait_event ( qp - > wait , ! atomic_read ( & qp - > refcount ) ) ;
2006-03-29 15:23:37 -08:00
2008-05-13 11:41:29 -07:00
/* all user's cleaned up, mark it available */
free_qpn ( & dev - > qp_table , qp - > ibqp . qp_num ) ;
spin_lock ( & dev - > n_qps_lock ) ;
dev - > n_qps_allocated - - ;
spin_unlock ( & dev - > n_qps_lock ) ;
2006-03-29 15:23:37 -08:00
2006-09-22 15:22:26 -07:00
if ( qp - > ip )
kref_put ( & qp - > ip - > ref , ipath_release_mmap_info ) ;
else
vfree ( qp - > r_rq . wq ) ;
2008-12-01 20:59:08 -08:00
kfree ( qp - > r_ud_sg_list ) ;
2006-03-29 15:23:37 -08:00
vfree ( qp - > s_wq ) ;
kfree ( qp ) ;
return 0 ;
}
/**
* ipath_init_qp_table - initialize the QP table for a device
* @ idev : the device who ' s QP table we ' re initializing
* @ size : the size of the QP table
*
* Returns 0 on success , otherwise returns an errno .
*/
int ipath_init_qp_table ( struct ipath_ibdev * idev , int size )
{
int i ;
int ret ;
idev - > qp_table . last = 1 ; /* QPN 0 and 1 are special. */
idev - > qp_table . max = size ;
idev - > qp_table . nmaps = 1 ;
idev - > qp_table . table = kzalloc ( size * sizeof ( * idev - > qp_table . table ) ,
GFP_KERNEL ) ;
if ( idev - > qp_table . table = = NULL ) {
ret = - ENOMEM ;
goto bail ;
}
for ( i = 0 ; i < ARRAY_SIZE ( idev - > qp_table . map ) ; i + + ) {
atomic_set ( & idev - > qp_table . map [ i ] . n_free , BITS_PER_PAGE ) ;
idev - > qp_table . map [ i ] . page = NULL ;
}
ret = 0 ;
bail :
return ret ;
}
/**
* ipath_get_credit - flush the send work queue of a QP
* @ qp : the qp who ' s send work queue to flush
* @ aeth : the Acknowledge Extended Transport Header
*
* The QP s_lock should be held .
*/
void ipath_get_credit ( struct ipath_qp * qp , u32 aeth )
{
2006-07-01 04:36:17 -07:00
u32 credit = ( aeth > > IPATH_AETH_CREDIT_SHIFT ) & IPATH_AETH_CREDIT_MASK ;
2006-03-29 15:23:37 -08:00
/*
* If the credit is invalid , we can send
* as many packets as we like . Otherwise , we have to
* honor the credit field .
*/
2006-07-01 04:36:17 -07:00
if ( credit = = IPATH_AETH_CREDIT_INVAL )
2006-03-29 15:23:37 -08:00
qp - > s_lsn = ( u32 ) - 1 ;
2006-07-01 04:35:50 -07:00
else if ( qp - > s_lsn ! = ( u32 ) - 1 ) {
2006-03-29 15:23:37 -08:00
/* Compute new LSN (i.e., MSN + credit) */
2006-07-01 04:36:17 -07:00
credit = ( aeth + credit_table [ credit ] ) & IPATH_MSN_MASK ;
2006-03-29 15:23:37 -08:00
if ( ipath_cmp24 ( credit , qp - > s_lsn ) > 0 )
qp - > s_lsn = credit ;
}
/* Restart sending if it was blocked due to lack of credits. */
2008-05-13 11:41:29 -07:00
if ( ( qp - > s_flags & IPATH_S_WAIT_SSN_CREDIT ) & &
qp - > s_cur ! = qp - > s_head & &
2006-03-29 15:23:37 -08:00
( qp - > s_lsn = = ( u32 ) - 1 | |
ipath_cmp24 ( get_swqe_ptr ( qp , qp - > s_cur ) - > ssn ,
qp - > s_lsn + 1 ) < = 0 ) )
2008-05-13 11:41:29 -07:00
ipath_schedule_send ( qp ) ;
2006-03-29 15:23:37 -08:00
}