2016-01-06 09:56:15 -08:00
/*
2017-03-20 17:25:55 -07:00
* Copyright ( c ) 2016 , 2017 Intel Corporation .
2016-01-06 09:56:15 -08:00
*
* This file is provided under a dual BSD / GPLv2 license . When using or
* redistributing this file , you may do so under either license .
*
* GPL LICENSE SUMMARY
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms , with or without
* modification , are permitted provided that the following conditions
* are met :
*
* - Redistributions of source code must retain the above copyright
* notice , this list of conditions and the following disclaimer .
* - Redistributions in binary form must reproduce the above copyright
* notice , this list of conditions and the following disclaimer in
* the documentation and / or other materials provided with the
* distribution .
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission .
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* " AS IS " AND ANY EXPRESS OR IMPLIED WARRANTIES , INCLUDING , BUT NOT
* LIMITED TO , THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED . IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT , INDIRECT , INCIDENTAL ,
* SPECIAL , EXEMPLARY , OR CONSEQUENTIAL DAMAGES ( INCLUDING , BUT NOT
* LIMITED TO , PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES ; LOSS OF USE ,
* DATA , OR PROFITS ; OR BUSINESS INTERRUPTION ) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY , WHETHER IN CONTRACT , STRICT LIABILITY , OR TORT
* ( INCLUDING NEGLIGENCE OR OTHERWISE ) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE , EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE .
*
*/
2016-01-22 13:00:35 -08:00
# include <linux/hash.h>
2016-01-06 10:04:46 -08:00
# include <linux/bitops.h>
# include <linux/lockdep.h>
2016-01-22 12:50:17 -08:00
# include <linux/vmalloc.h>
# include <linux/slab.h>
# include <rdma/ib_verbs.h>
2017-02-08 05:28:25 -08:00
# include <rdma/ib_hdrs.h>
2017-08-04 13:53:51 -07:00
# include <rdma/opa_addr.h>
2016-01-06 09:56:15 -08:00
# include "qp.h"
2016-01-22 12:50:17 -08:00
# include "vt.h"
2016-01-22 13:00:35 -08:00
# include "trace.h"
2016-01-06 09:56:15 -08:00
2017-10-16 15:51:13 -07:00
static void rvt_rc_timeout ( struct timer_list * t ) ;
2017-02-08 05:27:13 -08:00
/*
* Convert the AETH RNR timeout code into the number of microseconds .
*/
static const u32 ib_rvt_rnr_table [ 32 ] = {
655360 , /* 00: 655.36 */
10 , /* 01: .01 */
20 , /* 02 .02 */
30 , /* 03: .03 */
40 , /* 04: .04 */
60 , /* 05: .06 */
80 , /* 06: .08 */
120 , /* 07: .12 */
160 , /* 08: .16 */
240 , /* 09: .24 */
320 , /* 0A: .32 */
480 , /* 0B: .48 */
640 , /* 0C: .64 */
960 , /* 0D: .96 */
1280 , /* 0E: 1.28 */
1920 , /* 0F: 1.92 */
2560 , /* 10: 2.56 */
3840 , /* 11: 3.84 */
5120 , /* 12: 5.12 */
7680 , /* 13: 7.68 */
10240 , /* 14: 10.24 */
15360 , /* 15: 15.36 */
20480 , /* 16: 20.48 */
30720 , /* 17: 30.72 */
40960 , /* 18: 40.96 */
61440 , /* 19: 61.44 */
81920 , /* 1A: 81.92 */
122880 , /* 1B: 122.88 */
163840 , /* 1C: 163.84 */
245760 , /* 1D: 245.76 */
327680 , /* 1E: 327.68 */
491520 /* 1F: 491.52 */
} ;
2016-01-22 13:00:22 -08:00
/*
* Note that it is OK to post send work requests in the SQE and ERR
* states ; rvt_do_send ( ) will process them and generate error
* completions as per IB 1.2 C10 - 96.
*/
const int ib_rvt_state_ops [ IB_QPS_ERR + 1 ] = {
[ IB_QPS_RESET ] = 0 ,
[ IB_QPS_INIT ] = RVT_POST_RECV_OK ,
[ IB_QPS_RTR ] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK ,
[ IB_QPS_RTS ] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
RVT_PROCESS_NEXT_SEND_OK ,
[ IB_QPS_SQD ] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK ,
[ IB_QPS_SQE ] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
RVT_POST_SEND_OK | RVT_FLUSH_SEND ,
[ IB_QPS_ERR ] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
RVT_POST_SEND_OK | RVT_FLUSH_SEND ,
} ;
EXPORT_SYMBOL ( ib_rvt_state_ops ) ;
2016-01-22 12:50:43 -08:00
static void get_map_page ( struct rvt_qpn_table * qpt ,
2017-05-23 14:38:14 +03:00
struct rvt_qpn_map * map )
2016-01-06 10:04:46 -08:00
{
2017-05-23 14:38:14 +03:00
unsigned long page = get_zeroed_page ( GFP_KERNEL ) ;
2016-01-06 10:04:46 -08:00
/*
* Free the page if someone raced with us installing it .
*/
spin_lock ( & qpt - > lock ) ;
if ( map - > page )
free_page ( page ) ;
else
map - > page = ( void * ) page ;
spin_unlock ( & qpt - > lock ) ;
}
/**
* init_qpn_table - initialize the QP number table for a device
* @ qpt : the QPN table
*/
static int init_qpn_table ( struct rvt_dev_info * rdi , struct rvt_qpn_table * qpt )
{
u32 offset , i ;
struct rvt_qpn_map * map ;
int ret = 0 ;
2016-01-22 12:50:30 -08:00
if ( ! ( rdi - > dparms . qpn_res_end > = rdi - > dparms . qpn_res_start ) )
2016-01-06 10:04:46 -08:00
return - EINVAL ;
spin_lock_init ( & qpt - > lock ) ;
qpt - > last = rdi - > dparms . qpn_start ;
qpt - > incr = rdi - > dparms . qpn_inc < < rdi - > dparms . qos_shift ;
/*
* Drivers may want some QPs beyond what we need for verbs let them use
* our qpn table . No need for two . Lets go ahead and mark the bitmaps
* for those . The reserved range must be * after * the range which verbs
* will pick from .
*/
/* Figure out number of bit maps needed before reserved range */
qpt - > nmaps = rdi - > dparms . qpn_res_start / RVT_BITS_PER_PAGE ;
/* This should always be zero */
offset = rdi - > dparms . qpn_res_start & RVT_BITS_PER_PAGE_MASK ;
/* Starting with the first reserved bit map */
map = & qpt - > map [ qpt - > nmaps ] ;
rvt_pr_info ( rdi , " Reserving QPNs from 0x%x to 0x%x for non-verbs use \n " ,
rdi - > dparms . qpn_res_start , rdi - > dparms . qpn_res_end ) ;
2016-01-22 12:50:30 -08:00
for ( i = rdi - > dparms . qpn_res_start ; i < = rdi - > dparms . qpn_res_end ; i + + ) {
2016-01-06 10:04:46 -08:00
if ( ! map - > page ) {
2017-05-23 14:38:14 +03:00
get_map_page ( qpt , map ) ;
2016-01-06 10:04:46 -08:00
if ( ! map - > page ) {
ret = - ENOMEM ;
break ;
}
}
set_bit ( offset , map - > page ) ;
offset + + ;
if ( offset = = RVT_BITS_PER_PAGE ) {
/* next page */
qpt - > nmaps + + ;
map + + ;
offset = 0 ;
}
}
return ret ;
}
/**
* free_qpn_table - free the QP number table for a device
* @ qpt : the QPN table
*/
static void free_qpn_table ( struct rvt_qpn_table * qpt )
{
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( qpt - > map ) ; i + + )
free_page ( ( unsigned long ) qpt - > map [ i ] . page ) ;
}
2016-02-14 12:10:29 -08:00
/**
* rvt_driver_qp_init - Init driver qp resources
* @ rdi : rvt dev strucutre
*
* Return : 0 on success
*/
2016-01-06 10:04:46 -08:00
int rvt_driver_qp_init ( struct rvt_dev_info * rdi )
{
int i ;
int ret = - ENOMEM ;
if ( ! rdi - > dparms . qp_table_size )
return - EINVAL ;
/*
* If driver is not doing any QP allocation then make sure it is
* providing the necessary QP functions .
*/
2016-01-22 12:50:17 -08:00
if ( ! rdi - > driver_f . free_all_qps | |
! rdi - > driver_f . qp_priv_alloc | |
! rdi - > driver_f . qp_priv_free | |
2017-02-08 05:27:13 -08:00
! rdi - > driver_f . notify_qp_reset | |
! rdi - > driver_f . notify_restart_rc )
2016-01-06 10:04:46 -08:00
return - EINVAL ;
/* allocate parent object */
2016-02-03 14:14:54 -08:00
rdi - > qp_dev = kzalloc_node ( sizeof ( * rdi - > qp_dev ) , GFP_KERNEL ,
rdi - > dparms . node ) ;
2016-01-06 10:04:46 -08:00
if ( ! rdi - > qp_dev )
return - ENOMEM ;
/* allocate hash table */
rdi - > qp_dev - > qp_table_size = rdi - > dparms . qp_table_size ;
rdi - > qp_dev - > qp_table_bits = ilog2 ( rdi - > dparms . qp_table_size ) ;
rdi - > qp_dev - > qp_table =
2017-11-15 17:32:41 -08:00
kmalloc_array_node ( rdi - > qp_dev - > qp_table_size ,
2016-02-03 14:14:54 -08:00
sizeof ( * rdi - > qp_dev - > qp_table ) ,
GFP_KERNEL , rdi - > dparms . node ) ;
2016-01-06 10:04:46 -08:00
if ( ! rdi - > qp_dev - > qp_table )
goto no_qp_table ;
for ( i = 0 ; i < rdi - > qp_dev - > qp_table_size ; i + + )
RCU_INIT_POINTER ( rdi - > qp_dev - > qp_table [ i ] , NULL ) ;
spin_lock_init ( & rdi - > qp_dev - > qpt_lock ) ;
/* initialize qpn map */
if ( init_qpn_table ( rdi , & rdi - > qp_dev - > qpn_table ) )
goto fail_table ;
2016-01-22 12:50:17 -08:00
spin_lock_init ( & rdi - > n_qps_lock ) ;
return 0 ;
2016-01-06 10:04:46 -08:00
fail_table :
kfree ( rdi - > qp_dev - > qp_table ) ;
free_qpn_table ( & rdi - > qp_dev - > qpn_table ) ;
no_qp_table :
kfree ( rdi - > qp_dev ) ;
return ret ;
}
/**
* free_all_qps - check for QPs still in use
2018-01-05 16:22:32 -08:00
* @ rdi : rvt device info structure
2016-01-06 10:04:46 -08:00
*
* There should not be any QPs still in use .
* Free memory for table .
*/
2016-01-22 12:50:17 -08:00
static unsigned rvt_free_all_qps ( struct rvt_dev_info * rdi )
2016-01-06 10:04:46 -08:00
{
unsigned long flags ;
struct rvt_qp * qp ;
unsigned n , qp_inuse = 0 ;
spinlock_t * ql ; /* work around too long line below */
2016-01-22 12:50:17 -08:00
if ( rdi - > driver_f . free_all_qps )
qp_inuse = rdi - > driver_f . free_all_qps ( rdi ) ;
2016-01-06 10:04:46 -08:00
2016-01-22 13:00:55 -08:00
qp_inuse + = rvt_mcast_tree_empty ( rdi ) ;
2016-01-06 10:04:46 -08:00
if ( ! rdi - > qp_dev )
2016-01-22 12:50:17 -08:00
return qp_inuse ;
2016-01-06 10:04:46 -08:00
ql = & rdi - > qp_dev - > qpt_lock ;
2016-01-22 12:50:17 -08:00
spin_lock_irqsave ( ql , flags ) ;
2016-01-06 10:04:46 -08:00
for ( n = 0 ; n < rdi - > qp_dev - > qp_table_size ; n + + ) {
qp = rcu_dereference_protected ( rdi - > qp_dev - > qp_table [ n ] ,
lockdep_is_held ( ql ) ) ;
RCU_INIT_POINTER ( rdi - > qp_dev - > qp_table [ n ] , NULL ) ;
2016-01-22 12:50:17 -08:00
for ( ; qp ; qp = rcu_dereference_protected ( qp - > next ,
lockdep_is_held ( ql ) ) )
2016-01-06 10:04:46 -08:00
qp_inuse + + ;
}
spin_unlock_irqrestore ( ql , flags ) ;
synchronize_rcu ( ) ;
return qp_inuse ;
}
2016-02-14 12:10:29 -08:00
/**
* rvt_qp_exit - clean up qps on device exit
* @ rdi : rvt dev structure
*
* Check for qp leaks and free resources .
*/
2016-01-06 10:04:46 -08:00
void rvt_qp_exit ( struct rvt_dev_info * rdi )
{
2016-01-22 12:50:17 -08:00
u32 qps_inuse = rvt_free_all_qps ( rdi ) ;
2016-01-06 10:04:46 -08:00
if ( qps_inuse )
rvt_pr_err ( rdi , " QP memory leak! %u still in use \n " ,
qps_inuse ) ;
if ( ! rdi - > qp_dev )
return ;
kfree ( rdi - > qp_dev - > qp_table ) ;
free_qpn_table ( & rdi - > qp_dev - > qpn_table ) ;
kfree ( rdi - > qp_dev ) ;
}
2016-01-22 12:50:17 -08:00
static inline unsigned mk_qpn ( struct rvt_qpn_table * qpt ,
struct rvt_qpn_map * map , unsigned off )
{
return ( map - qpt - > map ) * RVT_BITS_PER_PAGE + off ;
}
2016-02-03 14:15:02 -08:00
/**
* alloc_qpn - Allocate the next available qpn or zero / one for QP type
* IB_QPT_SMI / IB_QPT_GSI
2018-01-05 16:22:32 -08:00
* @ rdi : rvt device info structure
* @ qpt : queue pair number table pointer
* @ port_num : IB port number , 1 based , comes from core
2016-02-03 14:15:02 -08:00
*
* Return : The queue pair number
2016-01-22 12:50:17 -08:00
*/
static int alloc_qpn ( struct rvt_dev_info * rdi , struct rvt_qpn_table * qpt ,
2017-05-23 14:38:14 +03:00
enum ib_qp_type type , u8 port_num )
2016-01-22 12:50:17 -08:00
{
u32 i , offset , max_scan , qpn ;
struct rvt_qpn_map * map ;
u32 ret ;
if ( rdi - > driver_f . alloc_qpn )
2017-05-23 14:38:14 +03:00
return rdi - > driver_f . alloc_qpn ( rdi , qpt , type , port_num ) ;
2016-01-22 12:50:17 -08:00
if ( type = = IB_QPT_SMI | | type = = IB_QPT_GSI ) {
unsigned n ;
ret = type = = IB_QPT_GSI ;
2016-02-03 14:15:02 -08:00
n = 1 < < ( ret + 2 * ( port_num - 1 ) ) ;
2016-01-22 12:50:17 -08:00
spin_lock ( & qpt - > lock ) ;
if ( qpt - > flags & n )
ret = - EINVAL ;
else
qpt - > flags | = n ;
spin_unlock ( & qpt - > lock ) ;
goto bail ;
}
qpn = qpt - > last + qpt - > incr ;
if ( qpn > = RVT_QPN_MAX )
qpn = qpt - > incr | ( ( qpt - > last & 1 ) ^ 1 ) ;
/* offset carries bit 0 */
offset = qpn & RVT_BITS_PER_PAGE_MASK ;
map = & qpt - > map [ qpn / RVT_BITS_PER_PAGE ] ;
max_scan = qpt - > nmaps - ! offset ;
for ( i = 0 ; ; ) {
if ( unlikely ( ! map - > page ) ) {
2017-05-23 14:38:14 +03:00
get_map_page ( qpt , map ) ;
2016-01-22 12:50:17 -08:00
if ( unlikely ( ! map - > page ) )
break ;
}
do {
if ( ! test_and_set_bit ( offset , map - > page ) ) {
qpt - > last = qpn ;
ret = qpn ;
goto bail ;
}
offset + = qpt - > incr ;
/*
* This qpn might be bogus if offset > = BITS_PER_PAGE .
* That is OK . It gets re - assigned below
*/
qpn = mk_qpn ( qpt , map , offset ) ;
} while ( offset < RVT_BITS_PER_PAGE & & qpn < RVT_QPN_MAX ) ;
/*
* In order to keep the number of pages allocated to a
* minimum , we scan the all existing pages before increasing
* the size of the bitmap table .
*/
if ( + + i > max_scan ) {
if ( qpt - > nmaps = = RVT_QPNMAP_ENTRIES )
break ;
map = & qpt - > map [ qpt - > nmaps + + ] ;
/* start at incr with current bit 0 */
offset = qpt - > incr | ( offset & 1 ) ;
} else if ( map < & qpt - > map [ qpt - > nmaps ] ) {
+ + map ;
/* start at incr with current bit 0 */
offset = qpt - > incr | ( offset & 1 ) ;
} else {
map = & qpt - > map [ 0 ] ;
/* wrap to first map page, invert bit 0 */
offset = qpt - > incr | ( ( offset & 1 ) ^ 1 ) ;
}
2016-06-09 07:51:20 -07:00
/* there can be no set bits in low-order QoS bits */
WARN_ON ( offset & ( BIT ( rdi - > dparms . qos_shift ) - 1 ) ) ;
2016-01-22 12:50:17 -08:00
qpn = mk_qpn ( qpt , map , offset ) ;
}
ret = - ENOMEM ;
bail :
return ret ;
}
2016-02-14 12:11:20 -08:00
/**
* rvt_clear_mr_refs - Drop help mr refs
* @ qp : rvt qp data structure
* @ clr_sends : If shoudl clear send side or not
*/
static void rvt_clear_mr_refs ( struct rvt_qp * qp , int clr_sends )
{
unsigned n ;
2016-05-24 12:50:40 -07:00
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
2016-02-14 12:11:20 -08:00
if ( test_and_clear_bit ( RVT_R_REWIND_SGE , & qp - > r_aflags ) )
rvt_put_ss ( & qp - > s_rdma_read_sge ) ;
rvt_put_ss ( & qp - > r_sge ) ;
if ( clr_sends ) {
while ( qp - > s_last ! = qp - > s_head ) {
struct rvt_swqe * wqe = rvt_get_swqe_ptr ( qp , qp - > s_last ) ;
2017-08-21 18:26:14 -07:00
rvt_put_swqe ( wqe ) ;
2016-02-14 12:11:20 -08:00
if ( qp - > ibqp . qp_type = = IB_QPT_UD | |
qp - > ibqp . qp_type = = IB_QPT_SMI | |
qp - > ibqp . qp_type = = IB_QPT_GSI )
atomic_dec ( & ibah_to_rvtah (
wqe - > ud_wr . ah ) - > refcount ) ;
if ( + + qp - > s_last > = qp - > s_size )
qp - > s_last = 0 ;
smp_wmb ( ) ; /* see qp_set_savail */
}
if ( qp - > s_rdma_mr ) {
rvt_put_mr ( qp - > s_rdma_mr ) ;
qp - > s_rdma_mr = NULL ;
}
}
2017-08-28 11:24:10 -07:00
for ( n = 0 ; qp - > s_ack_queue & & n < rvt_max_atomic ( rdi ) ; n + + ) {
2016-02-14 12:11:20 -08:00
struct rvt_ack_entry * e = & qp - > s_ack_queue [ n ] ;
2016-07-27 21:07:36 -04:00
if ( e - > rdma_sge . mr ) {
2016-02-14 12:11:20 -08:00
rvt_put_mr ( e - > rdma_sge . mr ) ;
e - > rdma_sge . mr = NULL ;
}
}
}
2017-08-28 11:24:10 -07:00
/**
* rvt_swqe_has_lkey - return true if lkey is used by swqe
* @ wqe - the send wqe
* @ lkey - the lkey
*
* Test the swqe for using lkey
*/
static bool rvt_swqe_has_lkey ( struct rvt_swqe * wqe , u32 lkey )
{
int i ;
for ( i = 0 ; i < wqe - > wr . num_sge ; i + + ) {
struct rvt_sge * sge = & wqe - > sg_list [ i ] ;
if ( rvt_mr_has_lkey ( sge - > mr , lkey ) )
return true ;
}
return false ;
}
/**
* rvt_qp_sends_has_lkey - return true is qp sends use lkey
* @ qp - the rvt_qp
* @ lkey - the lkey
*/
static bool rvt_qp_sends_has_lkey ( struct rvt_qp * qp , u32 lkey )
{
u32 s_last = qp - > s_last ;
while ( s_last ! = qp - > s_head ) {
struct rvt_swqe * wqe = rvt_get_swqe_ptr ( qp , s_last ) ;
if ( rvt_swqe_has_lkey ( wqe , lkey ) )
return true ;
if ( + + s_last > = qp - > s_size )
s_last = 0 ;
}
if ( qp - > s_rdma_mr )
if ( rvt_mr_has_lkey ( qp - > s_rdma_mr , lkey ) )
return true ;
return false ;
}
/**
* rvt_qp_acks_has_lkey - return true if acks have lkey
* @ qp - the qp
* @ lkey - the lkey
*/
static bool rvt_qp_acks_has_lkey ( struct rvt_qp * qp , u32 lkey )
{
int i ;
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
for ( i = 0 ; qp - > s_ack_queue & & i < rvt_max_atomic ( rdi ) ; i + + ) {
struct rvt_ack_entry * e = & qp - > s_ack_queue [ i ] ;
if ( rvt_mr_has_lkey ( e - > rdma_sge . mr , lkey ) )
return true ;
}
return false ;
}
/*
* rvt_qp_mr_clean - clean up remote ops for lkey
* @ qp - the qp
* @ lkey - the lkey that is being de - registered
*
* This routine checks if the lkey is being used by
* the qp .
*
* If so , the qp is put into an error state to elminate
* any references from the qp .
*/
void rvt_qp_mr_clean ( struct rvt_qp * qp , u32 lkey )
{
bool lastwqe = false ;
if ( qp - > ibqp . qp_type = = IB_QPT_SMI | |
qp - > ibqp . qp_type = = IB_QPT_GSI )
/* avoid special QPs */
return ;
spin_lock_irq ( & qp - > r_lock ) ;
spin_lock ( & qp - > s_hlock ) ;
spin_lock ( & qp - > s_lock ) ;
if ( qp - > state = = IB_QPS_ERR | | qp - > state = = IB_QPS_RESET )
goto check_lwqe ;
if ( rvt_ss_has_lkey ( & qp - > r_sge , lkey ) | |
rvt_qp_sends_has_lkey ( qp , lkey ) | |
rvt_qp_acks_has_lkey ( qp , lkey ) )
lastwqe = rvt_error_qp ( qp , IB_WC_LOC_PROT_ERR ) ;
check_lwqe :
spin_unlock ( & qp - > s_lock ) ;
spin_unlock ( & qp - > s_hlock ) ;
spin_unlock_irq ( & qp - > r_lock ) ;
if ( lastwqe ) {
struct ib_event ev ;
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_QP_LAST_WQE_REACHED ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
}
2016-02-14 12:11:20 -08:00
/**
* rvt_remove_qp - remove qp form table
* @ rdi : rvt dev struct
* @ qp : qp to remove
*
* Remove the QP from the table so it can ' t be found asynchronously by
* the receive routine .
*/
static void rvt_remove_qp ( struct rvt_dev_info * rdi , struct rvt_qp * qp )
{
struct rvt_ibport * rvp = rdi - > ports [ qp - > port_num - 1 ] ;
u32 n = hash_32 ( qp - > ibqp . qp_num , rdi - > qp_dev - > qp_table_bits ) ;
unsigned long flags ;
int removed = 1 ;
spin_lock_irqsave ( & rdi - > qp_dev - > qpt_lock , flags ) ;
if ( rcu_dereference_protected ( rvp - > qp [ 0 ] ,
lockdep_is_held ( & rdi - > qp_dev - > qpt_lock ) ) = = qp ) {
RCU_INIT_POINTER ( rvp - > qp [ 0 ] , NULL ) ;
} else if ( rcu_dereference_protected ( rvp - > qp [ 1 ] ,
lockdep_is_held ( & rdi - > qp_dev - > qpt_lock ) ) = = qp ) {
RCU_INIT_POINTER ( rvp - > qp [ 1 ] , NULL ) ;
} else {
struct rvt_qp * q ;
struct rvt_qp __rcu * * qpp ;
removed = 0 ;
qpp = & rdi - > qp_dev - > qp_table [ n ] ;
for ( ; ( q = rcu_dereference_protected ( * qpp ,
lockdep_is_held ( & rdi - > qp_dev - > qpt_lock ) ) ) ! = NULL ;
qpp = & q - > next ) {
if ( q = = qp ) {
RCU_INIT_POINTER ( * qpp ,
rcu_dereference_protected ( qp - > next ,
lockdep_is_held ( & rdi - > qp_dev - > qpt_lock ) ) ) ;
removed = 1 ;
trace_rvt_qpremove ( qp , n ) ;
break ;
}
}
}
spin_unlock_irqrestore ( & rdi - > qp_dev - > qpt_lock , flags ) ;
if ( removed ) {
synchronize_rcu ( ) ;
2016-09-06 04:34:35 -07:00
rvt_put_qp ( qp ) ;
2016-02-14 12:11:20 -08:00
}
}
2016-01-22 12:50:17 -08:00
/**
2016-09-06 04:37:26 -07:00
* rvt_init_qp - initialize the QP state to the reset state
* @ qp : the QP to init or reinit
2016-01-22 12:50:17 -08:00
* @ type : the QP type
2016-09-06 04:37:26 -07:00
*
* This function is called from both rvt_create_qp ( ) and
* rvt_reset_qp ( ) . The difference is that the reset
* patch the necessary locks to protect against concurent
* access .
2016-01-22 12:50:17 -08:00
*/
2016-09-06 04:37:26 -07:00
static void rvt_init_qp ( struct rvt_dev_info * rdi , struct rvt_qp * qp ,
enum ib_qp_type type )
2016-01-22 12:50:17 -08:00
{
2016-01-22 13:00:35 -08:00
qp - > remote_qpn = 0 ;
qp - > qkey = 0 ;
qp - > qp_access_flags = 0 ;
2016-01-22 12:50:17 -08:00
qp - > s_flags & = RVT_S_SIGNAL_REQ_WR ;
qp - > s_hdrwords = 0 ;
qp - > s_wqe = NULL ;
qp - > s_draining = 0 ;
qp - > s_next_psn = 0 ;
qp - > s_last_psn = 0 ;
qp - > s_sending_psn = 0 ;
qp - > s_sending_hpsn = 0 ;
qp - > s_psn = 0 ;
qp - > r_psn = 0 ;
qp - > r_msn = 0 ;
if ( type = = IB_QPT_RC ) {
qp - > s_state = IB_OPCODE_RC_SEND_LAST ;
qp - > r_state = IB_OPCODE_RC_SEND_LAST ;
} else {
qp - > s_state = IB_OPCODE_UC_SEND_LAST ;
qp - > r_state = IB_OPCODE_UC_SEND_LAST ;
}
qp - > s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE ;
qp - > r_nak_state = 0 ;
qp - > r_aflags = 0 ;
qp - > r_flags = 0 ;
qp - > s_head = 0 ;
qp - > s_tail = 0 ;
qp - > s_cur = 0 ;
qp - > s_acked = 0 ;
qp - > s_last = 0 ;
qp - > s_ssn = 1 ;
qp - > s_lsn = 0 ;
qp - > s_mig_state = IB_MIG_MIGRATED ;
qp - > r_head_ack_queue = 0 ;
qp - > s_tail_ack_queue = 0 ;
qp - > s_num_rd_atomic = 0 ;
if ( qp - > r_rq . wq ) {
qp - > r_rq . wq - > head = 0 ;
qp - > r_rq . wq - > tail = 0 ;
}
qp - > r_sge . num_sge = 0 ;
2016-07-25 13:39:39 -07:00
atomic_set ( & qp - > s_reserved_used , 0 ) ;
2016-01-22 12:50:17 -08:00
}
2016-09-06 04:37:26 -07:00
/**
* rvt_reset_qp - initialize the QP state to the reset state
* @ qp : the QP to reset
* @ type : the QP type
*
* r_lock , s_hlock , and s_lock are required to be held by the caller
*/
static void rvt_reset_qp ( struct rvt_dev_info * rdi , struct rvt_qp * qp ,
enum ib_qp_type type )
__must_hold ( & qp - > s_lock )
__must_hold ( & qp - > s_hlock )
__must_hold ( & qp - > r_lock )
{
2016-09-06 04:37:41 -07:00
lockdep_assert_held ( & qp - > r_lock ) ;
lockdep_assert_held ( & qp - > s_hlock ) ;
lockdep_assert_held ( & qp - > s_lock ) ;
2016-09-06 04:37:26 -07:00
if ( qp - > state ! = IB_QPS_RESET ) {
qp - > state = IB_QPS_RESET ;
/* Let drivers flush their waitlist */
rdi - > driver_f . flush_qp_waiters ( qp ) ;
2017-02-08 05:27:13 -08:00
rvt_stop_rc_timers ( qp ) ;
2016-09-06 04:37:26 -07:00
qp - > s_flags & = ~ ( RVT_S_TIMER | RVT_S_ANY_WAIT ) ;
spin_unlock ( & qp - > s_lock ) ;
spin_unlock ( & qp - > s_hlock ) ;
spin_unlock_irq ( & qp - > r_lock ) ;
/* Stop the send queue and the retry timer */
rdi - > driver_f . stop_send_queue ( qp ) ;
2017-02-08 05:27:13 -08:00
rvt_del_timers_sync ( qp ) ;
2016-09-06 04:37:26 -07:00
/* Wait for things to stop */
rdi - > driver_f . quiesce_qp ( qp ) ;
/* take qp out the hash and wait for it to be unused */
rvt_remove_qp ( rdi , qp ) ;
/* grab the lock b/c it was locked at call time */
spin_lock_irq ( & qp - > r_lock ) ;
spin_lock ( & qp - > s_hlock ) ;
spin_lock ( & qp - > s_lock ) ;
rvt_clear_mr_refs ( qp , 1 ) ;
/*
* Let the driver do any tear down or re - init it needs to for
* a qp that has been reset
*/
rdi - > driver_f . notify_qp_reset ( qp ) ;
}
rvt_init_qp ( rdi , qp , type ) ;
2016-09-06 04:37:41 -07:00
lockdep_assert_held ( & qp - > r_lock ) ;
lockdep_assert_held ( & qp - > s_hlock ) ;
lockdep_assert_held ( & qp - > s_lock ) ;
2016-09-06 04:37:26 -07:00
}
2017-05-29 17:17:28 -07:00
/** rvt_free_qpn - Free a qpn from the bit map
* @ qpt : QP table
* @ qpn : queue pair number to free
*/
static void rvt_free_qpn ( struct rvt_qpn_table * qpt , u32 qpn )
{
struct rvt_qpn_map * map ;
2017-05-29 17:19:21 -07:00
map = qpt - > map + ( qpn & RVT_QPN_MASK ) / RVT_BITS_PER_PAGE ;
2017-05-29 17:17:28 -07:00
if ( map - > page )
clear_bit ( qpn & RVT_BITS_PER_PAGE_MASK , map - > page ) ;
}
2016-01-06 09:56:15 -08:00
/**
* rvt_create_qp - create a queue pair for a device
* @ ibpd : the protection domain who ' s device we create the queue pair for
* @ init_attr : the attributes of the queue pair
* @ udata : user data for libibverbs . so
*
2016-01-22 12:50:17 -08:00
* Queue pair creation is mostly an rvt issue . However , drivers have their own
* unique idea of what queue pair numbers mean . For instance there is a reserved
* range for PSM .
*
2016-02-14 12:10:29 -08:00
* Return : the queue pair on success , otherwise returns an errno .
2016-01-06 09:56:15 -08:00
*
* Called by the ib_create_qp ( ) core verbs function .
*/
struct ib_qp * rvt_create_qp ( struct ib_pd * ibpd ,
struct ib_qp_init_attr * init_attr ,
struct ib_udata * udata )
{
2016-01-22 12:50:17 -08:00
struct rvt_qp * qp ;
int err ;
struct rvt_swqe * swq = NULL ;
size_t sz ;
size_t sg_list_sz ;
struct ib_qp * ret = ERR_PTR ( - ENOMEM ) ;
struct rvt_dev_info * rdi = ib_to_rvt ( ibpd - > device ) ;
void * priv = NULL ;
2016-07-01 16:02:07 -07:00
size_t sqsize ;
2016-01-22 12:50:17 -08:00
if ( ! rdi )
return ERR_PTR ( - EINVAL ) ;
2018-06-18 08:05:26 -07:00
if ( init_attr - > cap . max_send_sge > rdi - > dparms . props . max_send_sge | |
2016-01-22 12:50:17 -08:00
init_attr - > cap . max_send_wr > rdi - > dparms . props . max_qp_wr | |
2017-05-23 14:38:14 +03:00
init_attr - > create_flags )
2016-01-22 12:50:17 -08:00
return ERR_PTR ( - EINVAL ) ;
/* Check receive queue parameters if no SRQ is specified. */
if ( ! init_attr - > srq ) {
2018-06-18 08:05:26 -07:00
if ( init_attr - > cap . max_recv_sge >
rdi - > dparms . props . max_recv_sge | |
2016-01-22 12:50:17 -08:00
init_attr - > cap . max_recv_wr > rdi - > dparms . props . max_qp_wr )
return ERR_PTR ( - EINVAL ) ;
if ( init_attr - > cap . max_send_sge +
init_attr - > cap . max_send_wr +
init_attr - > cap . max_recv_sge +
init_attr - > cap . max_recv_wr = = 0 )
return ERR_PTR ( - EINVAL ) ;
}
2016-07-01 16:02:07 -07:00
sqsize =
2016-07-25 13:39:39 -07:00
init_attr - > cap . max_send_wr + 1 +
rdi - > dparms . reserved_operations ;
2016-01-22 12:50:17 -08:00
switch ( init_attr - > qp_type ) {
case IB_QPT_SMI :
case IB_QPT_GSI :
if ( init_attr - > port_num = = 0 | |
init_attr - > port_num > ibpd - > device - > phys_port_cnt )
return ERR_PTR ( - EINVAL ) ;
2017-10-11 10:49:23 -07:00
/* fall through */
2016-01-22 12:50:17 -08:00
case IB_QPT_UC :
case IB_QPT_RC :
case IB_QPT_UD :
sz = sizeof ( struct rvt_sge ) *
init_attr - > cap . max_send_sge +
sizeof ( struct rvt_swqe ) ;
treewide: Use array_size() in vzalloc_node()
The vzalloc_node() function has no 2-factor argument form, so
multiplication factors need to be wrapped in array_size(). This patch
replaces cases of:
vzalloc_node(a * b, node)
with:
vzalloc_node(array_size(a, b), node)
as well as handling cases of:
vzalloc_node(a * b * c, node)
with:
vzalloc_node(array3_size(a, b, c), node)
This does, however, attempt to ignore constant size factors like:
vzalloc_node(4 * 1024, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
vzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
vzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
vzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
vzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
vzalloc_node(
- sizeof(TYPE) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * COUNT_ID
+ array_size(COUNT_ID, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(THING) * (COUNT_ID)
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * COUNT_ID
+ array_size(COUNT_ID, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * (COUNT_CONST)
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * COUNT_CONST
+ array_size(COUNT_CONST, sizeof(THING))
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
vzalloc_node(
- SIZE * COUNT
+ array_size(COUNT, SIZE)
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
vzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
vzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
vzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
vzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
vzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
vzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
vzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
vzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
vzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
vzalloc_node(C1 * C2 * C3, ...)
|
vzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants.
@@
expression E1, E2;
constant C1, C2;
@@
(
vzalloc_node(C1 * C2, ...)
|
vzalloc_node(
- E1 * E2
+ array_size(E1, E2)
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:27:52 -07:00
swq = vzalloc_node ( array_size ( sz , sqsize ) , rdi - > dparms . node ) ;
2016-01-22 12:50:17 -08:00
if ( ! swq )
return ERR_PTR ( - ENOMEM ) ;
sz = sizeof ( * qp ) ;
sg_list_sz = 0 ;
if ( init_attr - > srq ) {
struct rvt_srq * srq = ibsrq_to_rvtsrq ( init_attr - > srq ) ;
if ( srq - > rq . max_sge > 1 )
sg_list_sz = sizeof ( * qp - > r_sg_list ) *
( srq - > rq . max_sge - 1 ) ;
} else if ( init_attr - > cap . max_recv_sge > 1 )
sg_list_sz = sizeof ( * qp - > r_sg_list ) *
( init_attr - > cap . max_recv_sge - 1 ) ;
2017-05-23 14:38:14 +03:00
qp = kzalloc_node ( sz + sg_list_sz , GFP_KERNEL ,
rdi - > dparms . node ) ;
2016-01-22 12:50:17 -08:00
if ( ! qp )
goto bail_swq ;
RCU_INIT_POINTER ( qp - > next , NULL ) ;
2016-05-24 12:50:40 -07:00
if ( init_attr - > qp_type = = IB_QPT_RC ) {
qp - > s_ack_queue =
treewide: kzalloc_node() -> kcalloc_node()
The kzalloc_node() function has a 2-factor argument form, kcalloc_node(). This
patch replaces cases of:
kzalloc_node(a * b, gfp, node)
with:
kcalloc_node(a * b, gfp, node)
as well as handling cases of:
kzalloc_node(a * b * c, gfp, node)
with:
kzalloc_node(array3_size(a, b, c), gfp, node)
as it's slightly less ugly than:
kcalloc_node(array_size(a, b), c, gfp, node)
This does, however, attempt to ignore constant size factors like:
kzalloc_node(4 * 1024, gfp, node)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc_node(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc_node(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc_node(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc_node(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc_node
+ kcalloc_node
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc_node(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc_node(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc_node(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc_node(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc_node(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc_node(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc_node(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc_node(sizeof(THING) * C2, ...)
|
kzalloc_node(sizeof(TYPE) * C2, ...)
|
kzalloc_node(C1 * C2 * C3, ...)
|
kzalloc_node(C1 * C2, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc_node
+ kcalloc_node
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 14:04:20 -07:00
kcalloc_node ( rvt_max_atomic ( rdi ) ,
sizeof ( * qp - > s_ack_queue ) ,
GFP_KERNEL ,
rdi - > dparms . node ) ;
2016-05-24 12:50:40 -07:00
if ( ! qp - > s_ack_queue )
goto bail_qp ;
}
2017-02-08 05:27:13 -08:00
/* initialize timers needed for rc qp */
2017-10-16 15:51:13 -07:00
timer_setup ( & qp - > s_timer , rvt_rc_timeout , 0 ) ;
2017-02-08 05:27:13 -08:00
hrtimer_init ( & qp - > s_rnr_timer , CLOCK_MONOTONIC ,
HRTIMER_MODE_REL ) ;
qp - > s_rnr_timer . function = rvt_rc_rnr_retry ;
2016-01-22 12:50:17 -08:00
/*
* Driver needs to set up it ' s private QP structure and do any
* initialization that is needed .
*/
2017-05-23 14:38:14 +03:00
priv = rdi - > driver_f . qp_priv_alloc ( rdi , qp ) ;
2016-06-22 13:29:33 -07:00
if ( IS_ERR ( priv ) ) {
ret = priv ;
2016-01-22 12:50:17 -08:00
goto bail_qp ;
2016-06-22 13:29:33 -07:00
}
2016-01-22 12:50:17 -08:00
qp - > priv = priv ;
qp - > timeout_jiffies =
usecs_to_jiffies ( ( 4096UL * ( 1UL < < qp - > timeout ) ) /
1000UL ) ;
if ( init_attr - > srq ) {
sz = 0 ;
} else {
qp - > r_rq . size = init_attr - > cap . max_recv_wr + 1 ;
qp - > r_rq . max_sge = init_attr - > cap . max_recv_sge ;
sz = ( sizeof ( struct ib_sge ) * qp - > r_rq . max_sge ) +
sizeof ( struct rvt_rwqe ) ;
2016-01-22 12:50:43 -08:00
if ( udata )
qp - > r_rq . wq = vmalloc_user (
sizeof ( struct rvt_rwq ) +
qp - > r_rq . size * sz ) ;
else
2016-05-19 05:21:25 -07:00
qp - > r_rq . wq = vzalloc_node (
2016-01-22 12:50:43 -08:00
sizeof ( struct rvt_rwq ) +
2016-02-03 14:14:54 -08:00
qp - > r_rq . size * sz ,
rdi - > dparms . node ) ;
2016-01-22 12:50:17 -08:00
if ( ! qp - > r_rq . wq )
goto bail_driver_priv ;
}
/*
* ib_create_qp ( ) will initialize qp - > ibqp
* except for qp - > ibqp . qp_num .
*/
spin_lock_init ( & qp - > r_lock ) ;
2016-02-14 12:10:04 -08:00
spin_lock_init ( & qp - > s_hlock ) ;
2016-01-22 12:50:17 -08:00
spin_lock_init ( & qp - > s_lock ) ;
spin_lock_init ( & qp - > r_rq . lock ) ;
atomic_set ( & qp - > refcount , 0 ) ;
2016-07-25 13:38:25 -07:00
atomic_set ( & qp - > local_ops_pending , 0 ) ;
2016-01-22 12:50:17 -08:00
init_waitqueue_head ( & qp - > wait ) ;
INIT_LIST_HEAD ( & qp - > rspwait ) ;
qp - > state = IB_QPS_RESET ;
qp - > s_wq = swq ;
2016-07-01 16:02:07 -07:00
qp - > s_size = sqsize ;
2016-02-14 12:10:04 -08:00
qp - > s_avail = init_attr - > cap . max_send_wr ;
2016-01-22 12:50:17 -08:00
qp - > s_max_sge = init_attr - > cap . max_send_sge ;
if ( init_attr - > sq_sig_type = = IB_SIGNAL_REQ_WR )
qp - > s_flags = RVT_S_SIGNAL_REQ_WR ;
err = alloc_qpn ( rdi , & rdi - > qp_dev - > qpn_table ,
init_attr - > qp_type ,
2017-05-23 14:38:14 +03:00
init_attr - > port_num ) ;
2016-01-22 12:50:17 -08:00
if ( err < 0 ) {
ret = ERR_PTR ( err ) ;
goto bail_rq_wq ;
}
qp - > ibqp . qp_num = err ;
qp - > port_num = init_attr - > port_num ;
2016-09-06 04:37:26 -07:00
rvt_init_qp ( rdi , qp , init_attr - > qp_type ) ;
2016-01-22 12:50:17 -08:00
break ;
default :
/* Don't support raw QPs */
return ERR_PTR ( - EINVAL ) ;
}
init_attr - > cap . max_inline_data = 0 ;
/*
* Return the address of the RWQ as the offset to mmap .
2016-01-22 13:00:22 -08:00
* See rvt_mmap ( ) for details .
2016-01-22 12:50:17 -08:00
*/
if ( udata & & udata - > outlen > = sizeof ( __u64 ) ) {
if ( ! qp - > r_rq . wq ) {
__u64 offset = 0 ;
err = ib_copy_to_udata ( udata , & offset ,
sizeof ( offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
goto bail_qpn ;
}
} else {
u32 s = sizeof ( struct rvt_rwq ) + qp - > r_rq . size * sz ;
qp - > ip = rvt_create_mmap_info ( rdi , s ,
ibpd - > uobject - > context ,
qp - > r_rq . wq ) ;
if ( ! qp - > ip ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto bail_qpn ;
}
err = ib_copy_to_udata ( udata , & qp - > ip - > offset ,
sizeof ( qp - > ip - > offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
goto bail_ip ;
}
}
2016-03-07 11:35:08 -08:00
qp - > pid = current - > pid ;
2016-01-22 12:50:17 -08:00
}
spin_lock ( & rdi - > n_qps_lock ) ;
if ( rdi - > n_qps_allocated = = rdi - > dparms . props . max_qp ) {
spin_unlock ( & rdi - > n_qps_lock ) ;
ret = ERR_PTR ( - ENOMEM ) ;
goto bail_ip ;
}
rdi - > n_qps_allocated + + ;
2016-02-09 14:29:49 -08:00
/*
* Maintain a busy_jiffies variable that will be added to the timeout
* period in mod_retry_timer and add_retry_timer . This busy jiffies
* is scaled by the number of rc qps created for the device to reduce
* the number of timeouts occurring when there is a large number of
* qps . busy_jiffies is incremented every rc qp scaling interval .
* The scaling interval is selected based on extensive performance
* evaluation of targeted workloads .
*/
if ( init_attr - > qp_type = = IB_QPT_RC ) {
rdi - > n_rc_qps + + ;
rdi - > busy_jiffies = rdi - > n_rc_qps / RC_QP_SCALING_INTERVAL ;
}
2016-01-22 12:50:17 -08:00
spin_unlock ( & rdi - > n_qps_lock ) ;
if ( qp - > ip ) {
spin_lock_irq ( & rdi - > pending_lock ) ;
list_add ( & qp - > ip - > pending_mmaps , & rdi - > pending_mmaps ) ;
spin_unlock_irq ( & rdi - > pending_lock ) ;
}
ret = & qp - > ibqp ;
2016-01-06 09:56:15 -08:00
/*
2016-01-22 12:50:17 -08:00
* We have our QP and its good , now keep track of what types of opcodes
* can be processed on this QP . We do this by keeping track of what the
* 3 high order bits of the opcode are .
2016-01-06 09:56:15 -08:00
*/
2016-01-22 12:50:17 -08:00
switch ( init_attr - > qp_type ) {
case IB_QPT_SMI :
case IB_QPT_GSI :
case IB_QPT_UD :
2016-04-12 11:29:20 -07:00
qp - > allowed_ops = IB_OPCODE_UD ;
2016-01-22 12:50:17 -08:00
break ;
case IB_QPT_RC :
2016-04-12 11:29:20 -07:00
qp - > allowed_ops = IB_OPCODE_RC ;
2016-01-22 12:50:17 -08:00
break ;
case IB_QPT_UC :
2016-04-12 11:29:20 -07:00
qp - > allowed_ops = IB_OPCODE_UC ;
2016-01-22 12:50:17 -08:00
break ;
default :
ret = ERR_PTR ( - EINVAL ) ;
goto bail_ip ;
}
return ret ;
bail_ip :
2016-11-01 13:44:12 -07:00
if ( qp - > ip )
kref_put ( & qp - > ip - > ref , rvt_release_mmap_info ) ;
2016-01-22 12:50:17 -08:00
bail_qpn :
2017-05-29 17:17:28 -07:00
rvt_free_qpn ( & rdi - > qp_dev - > qpn_table , qp - > ibqp . qp_num ) ;
2016-01-22 12:50:17 -08:00
bail_rq_wq :
2016-08-16 13:26:29 -07:00
if ( ! qp - > ip )
vfree ( qp - > r_rq . wq ) ;
2016-01-22 12:50:17 -08:00
bail_driver_priv :
rdi - > driver_f . qp_priv_free ( rdi , qp ) ;
bail_qp :
2016-05-24 12:50:40 -07:00
kfree ( qp - > s_ack_queue ) ;
2016-01-22 12:50:17 -08:00
kfree ( qp ) ;
bail_swq :
vfree ( swq ) ;
return ret ;
2016-01-06 09:56:15 -08:00
}
2016-01-22 13:00:35 -08:00
/**
* rvt_error_qp - put a QP into the error state
* @ qp : the QP to put into the error state
* @ err : the receive completion error to signal if a RWQE is active
*
* Flushes both send and receive work queues .
2016-02-14 12:10:29 -08:00
*
* Return : true if last WQE event should be generated .
2016-01-22 13:00:35 -08:00
* The QP r_lock and s_lock should be held and interrupts disabled .
* If we are already in error state , just return .
*/
int rvt_error_qp ( struct rvt_qp * qp , enum ib_wc_status err )
{
struct ib_wc wc ;
int ret = 0 ;
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
2016-09-06 04:37:41 -07:00
lockdep_assert_held ( & qp - > r_lock ) ;
lockdep_assert_held ( & qp - > s_lock ) ;
2016-01-22 13:00:35 -08:00
if ( qp - > state = = IB_QPS_ERR | | qp - > state = = IB_QPS_RESET )
goto bail ;
qp - > state = IB_QPS_ERR ;
if ( qp - > s_flags & ( RVT_S_TIMER | RVT_S_WAIT_RNR ) ) {
qp - > s_flags & = ~ ( RVT_S_TIMER | RVT_S_WAIT_RNR ) ;
del_timer ( & qp - > s_timer ) ;
}
if ( qp - > s_flags & RVT_S_ANY_WAIT_SEND )
qp - > s_flags & = ~ RVT_S_ANY_WAIT_SEND ;
rdi - > driver_f . notify_error_qp ( qp ) ;
/* Schedule the sending tasklet to drain the send work queue. */
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 14:07:29 -07:00
if ( READ_ONCE ( qp - > s_last ) ! = qp - > s_head )
2016-01-22 13:00:35 -08:00
rdi - > driver_f . schedule_send ( qp ) ;
rvt_clear_mr_refs ( qp , 0 ) ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . qp = & qp - > ibqp ;
wc . opcode = IB_WC_RECV ;
if ( test_and_clear_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) ) {
wc . wr_id = qp - > r_wr_id ;
wc . status = err ;
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
}
wc . status = IB_WC_WR_FLUSH_ERR ;
if ( qp - > r_rq . wq ) {
struct rvt_rwq * wq ;
u32 head ;
u32 tail ;
spin_lock ( & qp - > r_rq . lock ) ;
/* sanity check pointers before trusting them */
wq = qp - > r_rq . wq ;
head = wq - > head ;
if ( head > = qp - > r_rq . size )
head = 0 ;
tail = wq - > tail ;
if ( tail > = qp - > r_rq . size )
tail = 0 ;
while ( tail ! = head ) {
wc . wr_id = rvt_get_rwqe_ptr ( & qp - > r_rq , tail ) - > wr_id ;
if ( + + tail > = qp - > r_rq . size )
tail = 0 ;
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
}
wq - > tail = tail ;
spin_unlock ( & qp - > r_rq . lock ) ;
} else if ( qp - > ibqp . event_handler ) {
ret = 1 ;
}
bail :
return ret ;
}
EXPORT_SYMBOL ( rvt_error_qp ) ;
/*
* Put the QP into the hash table .
* The hash table holds a reference to the QP .
*/
static void rvt_insert_qp ( struct rvt_dev_info * rdi , struct rvt_qp * qp )
{
struct rvt_ibport * rvp = rdi - > ports [ qp - > port_num - 1 ] ;
unsigned long flags ;
2016-09-06 04:34:35 -07:00
rvt_get_qp ( qp ) ;
2016-01-22 13:00:35 -08:00
spin_lock_irqsave ( & rdi - > qp_dev - > qpt_lock , flags ) ;
if ( qp - > ibqp . qp_num < = 1 ) {
rcu_assign_pointer ( rvp - > qp [ qp - > ibqp . qp_num ] , qp ) ;
} else {
u32 n = hash_32 ( qp - > ibqp . qp_num , rdi - > qp_dev - > qp_table_bits ) ;
qp - > next = rdi - > qp_dev - > qp_table [ n ] ;
rcu_assign_pointer ( rdi - > qp_dev - > qp_table [ n ] , qp ) ;
trace_rvt_qpinsert ( qp , n ) ;
}
spin_unlock_irqrestore ( & rdi - > qp_dev - > qpt_lock , flags ) ;
}
2016-01-06 09:56:15 -08:00
/**
2016-09-13 19:40:50 +05:30
* rvt_modify_qp - modify the attributes of a queue pair
2016-01-06 09:56:15 -08:00
* @ ibqp : the queue pair who ' s attributes we ' re modifying
* @ attr : the new attributes
* @ attr_mask : the mask of attributes to modify
* @ udata : user data for libibverbs . so
*
2016-02-14 12:10:29 -08:00
* Return : 0 on success , otherwise returns an errno .
2016-01-06 09:56:15 -08:00
*/
int rvt_modify_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask , struct ib_udata * udata )
{
2016-01-22 13:00:35 -08:00
struct rvt_dev_info * rdi = ib_to_rvt ( ibqp - > device ) ;
struct rvt_qp * qp = ibqp_to_rvtqp ( ibqp ) ;
enum ib_qp_state cur_state , new_state ;
struct ib_event ev ;
int lastwqe = 0 ;
int mig = 0 ;
int pmtu = 0 ; /* for gcc warning only */
enum rdma_link_layer link ;
2017-08-04 13:53:51 -07:00
int opa_ah ;
2016-01-22 13:00:35 -08:00
link = rdma_port_get_link_layer ( ibqp - > device , qp - > port_num ) ;
spin_lock_irq ( & qp - > r_lock ) ;
2016-02-14 12:10:04 -08:00
spin_lock ( & qp - > s_hlock ) ;
2016-01-22 13:00:35 -08:00
spin_lock ( & qp - > s_lock ) ;
cur_state = attr_mask & IB_QP_CUR_STATE ?
attr - > cur_qp_state : qp - > state ;
new_state = attr_mask & IB_QP_STATE ? attr - > qp_state : cur_state ;
2017-08-04 13:53:51 -07:00
opa_ah = rdma_cap_opa_ah ( ibqp - > device , qp - > port_num ) ;
2016-01-22 13:00:35 -08:00
if ( ! ib_modify_qp_is_ok ( cur_state , new_state , ibqp - > qp_type ,
attr_mask , link ) )
goto inval ;
2016-01-22 13:04:38 -08:00
if ( rdi - > driver_f . check_modify_qp & &
rdi - > driver_f . check_modify_qp ( qp , attr , attr_mask , udata ) )
goto inval ;
2016-01-22 13:00:35 -08:00
if ( attr_mask & IB_QP_AV ) {
2017-08-04 13:53:51 -07:00
if ( opa_ah ) {
if ( rdma_ah_get_dlid ( & attr - > ah_attr ) > =
opa_get_mcast_base ( OPA_MCAST_NR ) )
goto inval ;
} else {
if ( rdma_ah_get_dlid ( & attr - > ah_attr ) > =
be16_to_cpu ( IB_MULTICAST_LID_BASE ) )
goto inval ;
}
2016-01-22 13:00:35 -08:00
if ( rvt_check_ah ( qp - > ibqp . device , & attr - > ah_attr ) )
goto inval ;
}
if ( attr_mask & IB_QP_ALT_PATH ) {
2017-08-04 13:53:51 -07:00
if ( opa_ah ) {
if ( rdma_ah_get_dlid ( & attr - > alt_ah_attr ) > =
opa_get_mcast_base ( OPA_MCAST_NR ) )
goto inval ;
} else {
if ( rdma_ah_get_dlid ( & attr - > alt_ah_attr ) > =
be16_to_cpu ( IB_MULTICAST_LID_BASE ) )
goto inval ;
}
2016-01-22 13:00:35 -08:00
if ( rvt_check_ah ( qp - > ibqp . device , & attr - > alt_ah_attr ) )
goto inval ;
if ( attr - > alt_pkey_index > = rvt_get_npkeys ( rdi ) )
goto inval ;
}
if ( attr_mask & IB_QP_PKEY_INDEX )
if ( attr - > pkey_index > = rvt_get_npkeys ( rdi ) )
goto inval ;
if ( attr_mask & IB_QP_MIN_RNR_TIMER )
if ( attr - > min_rnr_timer > 31 )
goto inval ;
if ( attr_mask & IB_QP_PORT )
if ( qp - > ibqp . qp_type = = IB_QPT_SMI | |
qp - > ibqp . qp_type = = IB_QPT_GSI | |
attr - > port_num = = 0 | |
attr - > port_num > ibqp - > device - > phys_port_cnt )
goto inval ;
if ( attr_mask & IB_QP_DEST_QPN )
if ( attr - > dest_qp_num > RVT_QPN_MASK )
goto inval ;
if ( attr_mask & IB_QP_RETRY_CNT )
if ( attr - > retry_cnt > 7 )
goto inval ;
if ( attr_mask & IB_QP_RNR_RETRY )
if ( attr - > rnr_retry > 7 )
goto inval ;
2016-01-06 09:56:15 -08:00
/*
2016-01-22 13:00:35 -08:00
* Don ' t allow invalid path_mtu values . OK to set greater
* than the active mtu ( or even the max_cap , if we have tuned
* that to a small mtu . We ' ll set qp - > path_mtu
* to the lesser of requested attribute mtu and active ,
* for packetizing messages .
* Note that the QP port has to be set in INIT and MTU in RTR .
2016-01-06 09:56:15 -08:00
*/
2016-01-22 13:00:35 -08:00
if ( attr_mask & IB_QP_PATH_MTU ) {
pmtu = rdi - > driver_f . get_pmtu_from_attr ( rdi , qp , attr ) ;
if ( pmtu < 0 )
goto inval ;
}
if ( attr_mask & IB_QP_PATH_MIG_STATE ) {
if ( attr - > path_mig_state = = IB_MIG_REARM ) {
if ( qp - > s_mig_state = = IB_MIG_ARMED )
goto inval ;
if ( new_state ! = IB_QPS_RTS )
goto inval ;
} else if ( attr - > path_mig_state = = IB_MIG_MIGRATED ) {
if ( qp - > s_mig_state = = IB_MIG_REARM )
goto inval ;
if ( new_state ! = IB_QPS_RTS & & new_state ! = IB_QPS_SQD )
goto inval ;
if ( qp - > s_mig_state = = IB_MIG_ARMED )
mig = 1 ;
} else {
goto inval ;
}
}
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
if ( attr - > max_dest_rd_atomic > rdi - > dparms . max_rdma_atomic )
goto inval ;
switch ( new_state ) {
case IB_QPS_RESET :
if ( qp - > state ! = IB_QPS_RESET )
rvt_reset_qp ( rdi , qp , ibqp - > qp_type ) ;
break ;
case IB_QPS_RTR :
/* Allow event to re-trigger if QP set to RTR more than once */
qp - > r_flags & = ~ RVT_R_COMM_EST ;
qp - > state = new_state ;
break ;
case IB_QPS_SQD :
qp - > s_draining = qp - > s_last ! = qp - > s_cur ;
qp - > state = new_state ;
break ;
case IB_QPS_SQE :
if ( qp - > ibqp . qp_type = = IB_QPT_RC )
goto inval ;
qp - > state = new_state ;
break ;
case IB_QPS_ERR :
lastwqe = rvt_error_qp ( qp , IB_WC_WR_FLUSH_ERR ) ;
break ;
default :
qp - > state = new_state ;
break ;
}
if ( attr_mask & IB_QP_PKEY_INDEX )
qp - > s_pkey_index = attr - > pkey_index ;
if ( attr_mask & IB_QP_PORT )
qp - > port_num = attr - > port_num ;
if ( attr_mask & IB_QP_DEST_QPN )
qp - > remote_qpn = attr - > dest_qp_num ;
if ( attr_mask & IB_QP_SQ_PSN ) {
qp - > s_next_psn = attr - > sq_psn & rdi - > dparms . psn_modify_mask ;
qp - > s_psn = qp - > s_next_psn ;
qp - > s_sending_psn = qp - > s_next_psn ;
qp - > s_last_psn = qp - > s_next_psn - 1 ;
qp - > s_sending_hpsn = qp - > s_last_psn ;
}
if ( attr_mask & IB_QP_RQ_PSN )
qp - > r_psn = attr - > rq_psn & rdi - > dparms . psn_modify_mask ;
if ( attr_mask & IB_QP_ACCESS_FLAGS )
qp - > qp_access_flags = attr - > qp_access_flags ;
if ( attr_mask & IB_QP_AV ) {
2018-06-13 10:22:05 +03:00
rdma_replace_ah_attr ( & qp - > remote_ah_attr , & attr - > ah_attr ) ;
2017-04-29 14:41:28 -04:00
qp - > s_srate = rdma_ah_get_static_rate ( & attr - > ah_attr ) ;
2016-01-22 13:00:35 -08:00
qp - > srate_mbps = ib_rate_to_mbps ( qp - > s_srate ) ;
}
if ( attr_mask & IB_QP_ALT_PATH ) {
2018-06-13 10:22:05 +03:00
rdma_replace_ah_attr ( & qp - > alt_ah_attr , & attr - > alt_ah_attr ) ;
2016-01-22 13:00:35 -08:00
qp - > s_alt_pkey_index = attr - > alt_pkey_index ;
}
if ( attr_mask & IB_QP_PATH_MIG_STATE ) {
qp - > s_mig_state = attr - > path_mig_state ;
if ( mig ) {
qp - > remote_ah_attr = qp - > alt_ah_attr ;
2017-04-29 14:41:28 -04:00
qp - > port_num = rdma_ah_get_port_num ( & qp - > alt_ah_attr ) ;
2016-01-22 13:00:35 -08:00
qp - > s_pkey_index = qp - > s_alt_pkey_index ;
}
}
if ( attr_mask & IB_QP_PATH_MTU ) {
qp - > pmtu = rdi - > driver_f . mtu_from_qp ( rdi , qp , pmtu ) ;
2016-02-14 12:10:04 -08:00
qp - > log_pmtu = ilog2 ( qp - > pmtu ) ;
2016-01-22 13:00:35 -08:00
}
if ( attr_mask & IB_QP_RETRY_CNT ) {
qp - > s_retry_cnt = attr - > retry_cnt ;
qp - > s_retry = attr - > retry_cnt ;
}
if ( attr_mask & IB_QP_RNR_RETRY ) {
qp - > s_rnr_retry_cnt = attr - > rnr_retry ;
qp - > s_rnr_retry = attr - > rnr_retry ;
}
if ( attr_mask & IB_QP_MIN_RNR_TIMER )
qp - > r_min_rnr_timer = attr - > min_rnr_timer ;
if ( attr_mask & IB_QP_TIMEOUT ) {
qp - > timeout = attr - > timeout ;
2017-06-17 10:37:26 -07:00
qp - > timeout_jiffies = rvt_timeout_to_jiffies ( qp - > timeout ) ;
2016-01-22 13:00:35 -08:00
}
if ( attr_mask & IB_QP_QKEY )
qp - > qkey = attr - > qkey ;
if ( attr_mask & IB_QP_MAX_DEST_RD_ATOMIC )
qp - > r_max_rd_atomic = attr - > max_dest_rd_atomic ;
if ( attr_mask & IB_QP_MAX_QP_RD_ATOMIC )
qp - > s_max_rd_atomic = attr - > max_rd_atomic ;
2016-01-22 13:04:38 -08:00
if ( rdi - > driver_f . modify_qp )
rdi - > driver_f . modify_qp ( qp , attr , attr_mask , udata ) ;
2016-01-22 13:00:35 -08:00
spin_unlock ( & qp - > s_lock ) ;
2016-02-14 12:10:04 -08:00
spin_unlock ( & qp - > s_hlock ) ;
2016-01-22 13:00:35 -08:00
spin_unlock_irq ( & qp - > r_lock ) ;
if ( cur_state = = IB_QPS_RESET & & new_state = = IB_QPS_INIT )
rvt_insert_qp ( rdi , qp ) ;
if ( lastwqe ) {
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_QP_LAST_WQE_REACHED ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
if ( mig ) {
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_PATH_MIG ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
return 0 ;
inval :
spin_unlock ( & qp - > s_lock ) ;
2016-02-14 12:10:04 -08:00
spin_unlock ( & qp - > s_hlock ) ;
2016-01-22 13:00:35 -08:00
spin_unlock_irq ( & qp - > r_lock ) ;
return - EINVAL ;
2016-01-06 09:56:15 -08:00
}
/**
* rvt_destroy_qp - destroy a queue pair
* @ ibqp : the queue pair to destroy
*
* Note that this can be called while the QP is actively sending or
* receiving !
2016-02-14 12:10:29 -08:00
*
* Return : 0 on success .
2016-01-06 09:56:15 -08:00
*/
int rvt_destroy_qp ( struct ib_qp * ibqp )
{
2016-01-22 13:00:42 -08:00
struct rvt_qp * qp = ibqp_to_rvtqp ( ibqp ) ;
struct rvt_dev_info * rdi = ib_to_rvt ( ibqp - > device ) ;
2016-01-06 09:56:15 -08:00
2016-01-22 13:00:42 -08:00
spin_lock_irq ( & qp - > r_lock ) ;
2016-02-14 12:10:04 -08:00
spin_lock ( & qp - > s_hlock ) ;
2016-01-22 13:00:42 -08:00
spin_lock ( & qp - > s_lock ) ;
rvt_reset_qp ( rdi , qp , ibqp - > qp_type ) ;
spin_unlock ( & qp - > s_lock ) ;
2016-02-14 12:10:04 -08:00
spin_unlock ( & qp - > s_hlock ) ;
2016-01-22 13:00:42 -08:00
spin_unlock_irq ( & qp - > r_lock ) ;
2017-10-09 12:38:33 -07:00
wait_event ( qp - > wait , ! atomic_read ( & qp - > refcount ) ) ;
2016-01-22 13:00:42 -08:00
/* qpn is now available for use again */
rvt_free_qpn ( & rdi - > qp_dev - > qpn_table , qp - > ibqp . qp_num ) ;
spin_lock ( & rdi - > n_qps_lock ) ;
rdi - > n_qps_allocated - - ;
2016-02-09 14:29:49 -08:00
if ( qp - > ibqp . qp_type = = IB_QPT_RC ) {
rdi - > n_rc_qps - - ;
rdi - > busy_jiffies = rdi - > n_rc_qps / RC_QP_SCALING_INTERVAL ;
}
2016-01-22 13:00:42 -08:00
spin_unlock ( & rdi - > n_qps_lock ) ;
if ( qp - > ip )
kref_put ( & qp - > ip - > ref , rvt_release_mmap_info ) ;
else
vfree ( qp - > r_rq . wq ) ;
vfree ( qp - > s_wq ) ;
rdi - > driver_f . qp_priv_free ( rdi , qp ) ;
2016-05-24 12:50:40 -07:00
kfree ( qp - > s_ack_queue ) ;
2018-06-13 10:22:05 +03:00
rdma_destroy_ah_attr ( & qp - > remote_ah_attr ) ;
rdma_destroy_ah_attr ( & qp - > alt_ah_attr ) ;
2016-01-22 13:00:42 -08:00
kfree ( qp ) ;
return 0 ;
2016-01-06 09:56:15 -08:00
}
2016-02-14 12:10:29 -08:00
/**
* rvt_query_qp - query an ipbq
* @ ibqp : IB qp to query
* @ attr : attr struct to fill in
* @ attr_mask : attr mask ignored
* @ init_attr : struct to fill in
*
* Return : always 0
*/
2016-01-06 09:56:15 -08:00
int rvt_query_qp ( struct ib_qp * ibqp , struct ib_qp_attr * attr ,
int attr_mask , struct ib_qp_init_attr * init_attr )
{
2016-01-22 13:05:04 -08:00
struct rvt_qp * qp = ibqp_to_rvtqp ( ibqp ) ;
struct rvt_dev_info * rdi = ib_to_rvt ( ibqp - > device ) ;
attr - > qp_state = qp - > state ;
attr - > cur_qp_state = attr - > qp_state ;
2017-08-04 13:52:20 -07:00
attr - > path_mtu = rdi - > driver_f . mtu_to_path_mtu ( qp - > pmtu ) ;
2016-01-22 13:05:04 -08:00
attr - > path_mig_state = qp - > s_mig_state ;
attr - > qkey = qp - > qkey ;
attr - > rq_psn = qp - > r_psn & rdi - > dparms . psn_mask ;
attr - > sq_psn = qp - > s_next_psn & rdi - > dparms . psn_mask ;
attr - > dest_qp_num = qp - > remote_qpn ;
attr - > qp_access_flags = qp - > qp_access_flags ;
2016-07-25 13:39:39 -07:00
attr - > cap . max_send_wr = qp - > s_size - 1 -
rdi - > dparms . reserved_operations ;
2016-01-22 13:05:04 -08:00
attr - > cap . max_recv_wr = qp - > ibqp . srq ? 0 : qp - > r_rq . size - 1 ;
attr - > cap . max_send_sge = qp - > s_max_sge ;
attr - > cap . max_recv_sge = qp - > r_rq . max_sge ;
attr - > cap . max_inline_data = 0 ;
attr - > ah_attr = qp - > remote_ah_attr ;
attr - > alt_ah_attr = qp - > alt_ah_attr ;
attr - > pkey_index = qp - > s_pkey_index ;
attr - > alt_pkey_index = qp - > s_alt_pkey_index ;
attr - > en_sqd_async_notify = 0 ;
attr - > sq_draining = qp - > s_draining ;
attr - > max_rd_atomic = qp - > s_max_rd_atomic ;
attr - > max_dest_rd_atomic = qp - > r_max_rd_atomic ;
attr - > min_rnr_timer = qp - > r_min_rnr_timer ;
attr - > port_num = qp - > port_num ;
attr - > timeout = qp - > timeout ;
attr - > retry_cnt = qp - > s_retry_cnt ;
attr - > rnr_retry = qp - > s_rnr_retry_cnt ;
2017-04-29 14:41:28 -04:00
attr - > alt_port_num =
rdma_ah_get_port_num ( & qp - > alt_ah_attr ) ;
2016-01-22 13:05:04 -08:00
attr - > alt_timeout = qp - > alt_timeout ;
init_attr - > event_handler = qp - > ibqp . event_handler ;
init_attr - > qp_context = qp - > ibqp . qp_context ;
init_attr - > send_cq = qp - > ibqp . send_cq ;
init_attr - > recv_cq = qp - > ibqp . recv_cq ;
init_attr - > srq = qp - > ibqp . srq ;
init_attr - > cap = attr - > cap ;
if ( qp - > s_flags & RVT_S_SIGNAL_REQ_WR )
init_attr - > sq_sig_type = IB_SIGNAL_REQ_WR ;
else
init_attr - > sq_sig_type = IB_SIGNAL_ALL_WR ;
init_attr - > qp_type = qp - > ibqp . qp_type ;
init_attr - > port_num = qp - > port_num ;
return 0 ;
2016-01-06 09:56:15 -08:00
}
2016-01-06 10:01:17 -08:00
/**
* rvt_post_receive - post a receive on a QP
* @ ibqp : the QP to post the receive on
* @ wr : the WR to post
* @ bad_wr : the first bad WR is put here
*
* This may be called from interrupt context .
2016-02-14 12:10:29 -08:00
*
* Return : 0 on success otherwise errno
2016-01-06 10:01:17 -08:00
*/
int rvt_post_recv ( struct ib_qp * ibqp , struct ib_recv_wr * wr ,
struct ib_recv_wr * * bad_wr )
{
2016-01-22 13:00:48 -08:00
struct rvt_qp * qp = ibqp_to_rvtqp ( ibqp ) ;
struct rvt_rwq * wq = qp - > r_rq . wq ;
unsigned long flags ;
2016-03-07 11:35:51 -08:00
int qp_err_flush = ( ib_rvt_state_ops [ qp - > state ] & RVT_FLUSH_RECV ) & &
! qp - > ibqp . srq ;
2016-01-06 10:01:17 -08:00
2016-01-22 13:00:48 -08:00
/* Check that state is OK to post receive. */
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_POST_RECV_OK ) | | ! wq ) {
* bad_wr = wr ;
return - EINVAL ;
}
for ( ; wr ; wr = wr - > next ) {
struct rvt_rwqe * wqe ;
u32 next ;
int i ;
if ( ( unsigned ) wr - > num_sge > qp - > r_rq . max_sge ) {
* bad_wr = wr ;
return - EINVAL ;
}
spin_lock_irqsave ( & qp - > r_rq . lock , flags ) ;
next = wq - > head + 1 ;
if ( next > = qp - > r_rq . size )
next = 0 ;
if ( next = = wq - > tail ) {
spin_unlock_irqrestore ( & qp - > r_rq . lock , flags ) ;
* bad_wr = wr ;
return - ENOMEM ;
}
2016-03-07 11:35:51 -08:00
if ( unlikely ( qp_err_flush ) ) {
struct ib_wc wc ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . qp = & qp - > ibqp ;
wc . opcode = IB_WC_RECV ;
wc . wr_id = wr - > wr_id ;
wc . status = IB_WC_WR_FLUSH_ERR ;
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
} else {
wqe = rvt_get_rwqe_ptr ( & qp - > r_rq , wq - > head ) ;
wqe - > wr_id = wr - > wr_id ;
wqe - > num_sge = wr - > num_sge ;
for ( i = 0 ; i < wr - > num_sge ; i + + )
wqe - > sg_list [ i ] = wr - > sg_list [ i ] ;
/*
* Make sure queue entry is written
* before the head index .
*/
smp_wmb ( ) ;
wq - > head = next ;
}
2016-01-22 13:00:48 -08:00
spin_unlock_irqrestore ( & qp - > r_rq . lock , flags ) ;
}
return 0 ;
2016-01-06 10:01:17 -08:00
}
2016-02-14 12:10:04 -08:00
/**
2016-07-01 16:02:07 -07:00
* rvt_qp_valid_operation - validate post send wr request
* @ qp - the qp
* @ post - parms - the post send table for the driver
* @ wr - the work request
2016-02-14 12:10:04 -08:00
*
2016-07-01 16:02:07 -07:00
* The routine validates the operation based on the
* validation table an returns the length of the operation
* which can extend beyond the ib_send_bw . Operation
* dependent flags key atomic operation validation .
*
* There is an exception for UD qps that validates the pd and
* overrides the length to include the additional UD specific
* length .
*
* Returns a negative error or the length of the work request
* for building the swqe .
*/
static inline int rvt_qp_valid_operation (
struct rvt_qp * qp ,
const struct rvt_operation_params * post_parms ,
2018-07-18 09:25:14 -07:00
const struct ib_send_wr * wr )
2016-07-01 16:02:07 -07:00
{
int len ;
if ( wr - > opcode > = RVT_OPERATION_MAX | | ! post_parms [ wr - > opcode ] . length )
return - EINVAL ;
if ( ! ( post_parms [ wr - > opcode ] . qpt_support & BIT ( qp - > ibqp . qp_type ) ) )
return - EINVAL ;
if ( ( post_parms [ wr - > opcode ] . flags & RVT_OPERATION_PRIV ) & &
ibpd_to_rvtpd ( qp - > ibqp . pd ) - > user )
return - EINVAL ;
if ( post_parms [ wr - > opcode ] . flags & RVT_OPERATION_ATOMIC_SGE & &
( wr - > num_sge = = 0 | |
wr - > sg_list [ 0 ] . length < sizeof ( u64 ) | |
wr - > sg_list [ 0 ] . addr & ( sizeof ( u64 ) - 1 ) ) )
return - EINVAL ;
if ( post_parms [ wr - > opcode ] . flags & RVT_OPERATION_ATOMIC & &
! qp - > s_max_rd_atomic )
return - EINVAL ;
len = post_parms [ wr - > opcode ] . length ;
/* UD specific */
if ( qp - > ibqp . qp_type ! = IB_QPT_UC & &
qp - > ibqp . qp_type ! = IB_QPT_RC ) {
if ( qp - > ibqp . pd ! = ud_wr ( wr ) - > ah - > pd )
return - EINVAL ;
len = sizeof ( struct ib_ud_wr ) ;
}
return len ;
}
/**
2016-07-25 13:39:39 -07:00
* rvt_qp_is_avail - determine queue capacity
2018-01-05 16:22:32 -08:00
* @ qp : the qp
* @ rdi : the rdmavt device
* @ reserved_op : is reserved operation
2016-02-14 12:10:04 -08:00
*
* This assumes the s_hlock is held but the s_last
* qp variable is uncontrolled .
2016-07-01 16:02:07 -07:00
*
2016-07-25 13:39:39 -07:00
* For non reserved operations , the qp - > s_avail
* may be changed .
*
* The return value is zero or a - ENOMEM .
2016-02-14 12:10:04 -08:00
*/
2016-07-25 13:39:39 -07:00
static inline int rvt_qp_is_avail (
struct rvt_qp * qp ,
struct rvt_dev_info * rdi ,
bool reserved_op )
2016-02-14 12:10:04 -08:00
{
u32 slast ;
2016-07-25 13:39:39 -07:00
u32 avail ;
u32 reserved_used ;
/* see rvt_qp_wqe_unreserve() */
smp_mb__before_atomic ( ) ;
reserved_used = atomic_read ( & qp - > s_reserved_used ) ;
if ( unlikely ( reserved_op ) ) {
/* see rvt_qp_wqe_unreserve() */
smp_mb__before_atomic ( ) ;
if ( reserved_used > = rdi - > dparms . reserved_operations )
return - ENOMEM ;
return 0 ;
}
/* non-reserved operations */
if ( likely ( qp - > s_avail ) )
return 0 ;
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 14:07:29 -07:00
slast = READ_ONCE ( qp - > s_last ) ;
2016-02-14 12:10:04 -08:00
if ( qp - > s_head > = slast )
2016-07-25 13:39:39 -07:00
avail = qp - > s_size - ( qp - > s_head - slast ) ;
2016-02-14 12:10:04 -08:00
else
2016-07-25 13:39:39 -07:00
avail = slast - qp - > s_head ;
/* see rvt_qp_wqe_unreserve() */
smp_mb__before_atomic ( ) ;
reserved_used = atomic_read ( & qp - > s_reserved_used ) ;
avail = avail - 1 -
( rdi - > dparms . reserved_operations - reserved_used ) ;
/* insure we don't assign a negative s_avail */
if ( ( s32 ) avail < = 0 )
return - ENOMEM ;
qp - > s_avail = avail ;
if ( WARN_ON ( qp - > s_avail >
( qp - > s_size - 1 - rdi - > dparms . reserved_operations ) ) )
rvt_pr_err ( rdi ,
" More avail entries than QP RB size. \n QP: %u, size: %u, avail: %u \n head: %u, tail: %u, cur: %u, acked: %u, last: %u " ,
qp - > ibqp . qp_num , qp - > s_size , qp - > s_avail ,
qp - > s_head , qp - > s_tail , qp - > s_cur ,
qp - > s_acked , qp - > s_last ) ;
return 0 ;
2016-02-14 12:10:04 -08:00
}
2016-01-22 13:00:22 -08:00
/**
* rvt_post_one_wr - post one RC , UC , or UD send work request
* @ qp : the QP to post on
* @ wr : the work request to send
*/
2016-02-14 12:45:44 -08:00
static int rvt_post_one_wr ( struct rvt_qp * qp ,
2018-07-18 09:25:14 -07:00
const struct ib_send_wr * wr ,
2016-02-14 12:45:44 -08:00
int * call_send )
2016-01-22 13:00:22 -08:00
{
struct rvt_swqe * wqe ;
u32 next ;
int i ;
int j ;
int acc ;
struct rvt_lkey_table * rkt ;
struct rvt_pd * pd ;
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
2016-02-14 12:10:04 -08:00
u8 log_pmtu ;
2017-07-29 08:43:43 -07:00
int ret ;
2016-07-01 16:02:24 -07:00
size_t cplen ;
2016-07-25 13:39:39 -07:00
bool reserved_op ;
2016-07-25 13:39:45 -07:00
int local_ops_delayed = 0 ;
2016-01-22 13:00:22 -08:00
2016-07-01 16:02:07 -07:00
BUILD_BUG_ON ( IB_QPT_MAX > = ( sizeof ( u32 ) * BITS_PER_BYTE ) ) ;
2016-01-22 13:00:22 -08:00
/* IB spec says that num_sge == 0 is OK. */
if ( unlikely ( wr - > num_sge > qp - > s_max_sge ) )
return - EINVAL ;
2016-07-01 16:02:24 -07:00
ret = rvt_qp_valid_operation ( qp , rdi - > post_parms , wr ) ;
if ( ret < 0 )
return ret ;
cplen = ret ;
2016-07-25 13:38:25 -07:00
/*
2016-07-25 13:39:45 -07:00
* Local operations include fast register and local invalidate .
* Fast register needs to be processed immediately because the
* registered lkey may be used by following work requests and the
* lkey needs to be valid at the time those requests are posted .
* Local invalidate can be processed immediately if fencing is
* not required and no previous local invalidate ops are pending .
* Signaled local operations that have been processed immediately
* need to have requests with " completion only " flags set posted
* to the send queue in order to generate completions .
2016-07-25 13:38:25 -07:00
*/
2016-07-25 13:39:45 -07:00
if ( ( rdi - > post_parms [ wr - > opcode ] . flags & RVT_OPERATION_LOCAL ) ) {
2016-07-25 13:38:25 -07:00
switch ( wr - > opcode ) {
case IB_WR_REG_MR :
2016-07-25 13:39:45 -07:00
ret = rvt_fast_reg_mr ( qp ,
reg_wr ( wr ) - > mr ,
reg_wr ( wr ) - > key ,
reg_wr ( wr ) - > access ) ;
if ( ret | | ! ( wr - > send_flags & IB_SEND_SIGNALED ) )
return ret ;
break ;
2016-07-25 13:38:25 -07:00
case IB_WR_LOCAL_INV :
2016-07-25 13:39:45 -07:00
if ( ( wr - > send_flags & IB_SEND_FENCE ) | |
atomic_read ( & qp - > local_ops_pending ) ) {
local_ops_delayed = 1 ;
} else {
ret = rvt_invalidate_rkey (
qp , wr - > ex . invalidate_rkey ) ;
if ( ret | | ! ( wr - > send_flags & IB_SEND_SIGNALED ) )
return ret ;
}
break ;
2016-07-25 13:38:25 -07:00
default :
return - EINVAL ;
}
}
2016-07-25 13:39:39 -07:00
reserved_op = rdi - > post_parms [ wr - > opcode ] . flags &
RVT_OPERATION_USE_RESERVE ;
2016-02-14 12:10:04 -08:00
/* check for avail */
2016-07-25 13:39:39 -07:00
ret = rvt_qp_is_avail ( qp , rdi , reserved_op ) ;
if ( ret )
return ret ;
2016-01-22 13:00:22 -08:00
next = qp - > s_head + 1 ;
if ( next > = qp - > s_size )
next = 0 ;
2016-02-03 14:14:45 -08:00
2016-01-22 13:00:22 -08:00
rkt = & rdi - > lkey_table ;
pd = ibpd_to_rvtpd ( qp - > ibqp . pd ) ;
wqe = rvt_get_swqe_ptr ( qp , qp - > s_head ) ;
2016-07-01 16:02:24 -07:00
/* cplen has length from above */
memcpy ( & wqe - > wr , wr , cplen ) ;
2016-01-22 13:00:22 -08:00
wqe - > length = 0 ;
j = 0 ;
if ( wr - > num_sge ) {
2017-05-12 09:20:31 -07:00
struct rvt_sge * last_sge = NULL ;
2016-01-22 13:00:22 -08:00
acc = wr - > opcode > = IB_WR_RDMA_READ ?
IB_ACCESS_LOCAL_WRITE : 0 ;
for ( i = 0 ; i < wr - > num_sge ; i + + ) {
u32 length = wr - > sg_list [ i ] . length ;
if ( length = = 0 )
continue ;
2017-07-29 08:43:43 -07:00
ret = rvt_lkey_ok ( rkt , pd , & wqe - > sg_list [ j ] , last_sge ,
& wr - > sg_list [ i ] , acc ) ;
if ( unlikely ( ret < 0 ) )
goto bail_inval_free ;
2016-01-22 13:00:22 -08:00
wqe - > length + = length ;
2017-07-29 08:43:43 -07:00
if ( ret )
2017-05-12 09:20:31 -07:00
last_sge = & wqe - > sg_list [ j ] ;
2017-07-29 08:43:43 -07:00
j + = ret ;
2016-01-22 13:00:22 -08:00
}
wqe - > wr . num_sge = j ;
}
2016-02-14 12:10:04 -08:00
/* general part of wqe valid - allow for driver checks */
if ( rdi - > driver_f . check_send_wqe ) {
ret = rdi - > driver_f . check_send_wqe ( qp , wqe ) ;
2016-02-14 12:45:44 -08:00
if ( ret < 0 )
2016-01-22 13:00:22 -08:00
goto bail_inval_free ;
2016-02-14 12:45:44 -08:00
if ( ret )
* call_send = ret ;
2016-02-14 12:10:04 -08:00
}
log_pmtu = qp - > log_pmtu ;
if ( qp - > ibqp . qp_type ! = IB_QPT_UC & &
qp - > ibqp . qp_type ! = IB_QPT_RC ) {
struct rvt_ah * ah = ibah_to_rvtah ( wqe - > ud_wr . ah ) ;
log_pmtu = ah - > log_pmtu ;
2016-01-22 13:00:22 -08:00
atomic_inc ( & ibah_to_rvtah ( ud_wr ( wr ) - > ah ) - > refcount ) ;
}
2016-02-14 12:10:04 -08:00
2016-07-25 13:38:25 -07:00
if ( rdi - > post_parms [ wr - > opcode ] . flags & RVT_OPERATION_LOCAL ) {
2016-07-25 13:39:45 -07:00
if ( local_ops_delayed )
atomic_inc ( & qp - > local_ops_pending ) ;
else
wqe - > wr . send_flags | = RVT_SEND_COMPLETION_ONLY ;
2016-07-25 13:38:25 -07:00
wqe - > ssn = 0 ;
wqe - > psn = 0 ;
wqe - > lpsn = 0 ;
} else {
wqe - > ssn = qp - > s_ssn + + ;
wqe - > psn = qp - > s_next_psn ;
wqe - > lpsn = wqe - > psn +
( wqe - > length ?
( ( wqe - > length - 1 ) > > log_pmtu ) :
0 ) ;
qp - > s_next_psn = wqe - > lpsn + 1 ;
}
2017-03-20 17:26:01 -07:00
if ( unlikely ( reserved_op ) ) {
wqe - > wr . send_flags | = RVT_SEND_RESERVE_USED ;
2016-07-25 13:39:39 -07:00
rvt_qp_wqe_reserve ( qp , wqe ) ;
2017-03-20 17:26:01 -07:00
} else {
wqe - > wr . send_flags & = ~ RVT_SEND_RESERVE_USED ;
2016-07-25 13:39:39 -07:00
qp - > s_avail - - ;
2017-03-20 17:26:01 -07:00
}
2017-05-12 09:20:31 -07:00
trace_rvt_post_one_wr ( qp , wqe , wr - > num_sge ) ;
2016-02-14 12:10:04 -08:00
smp_wmb ( ) ; /* see request builders */
2016-01-22 13:00:22 -08:00
qp - > s_head = next ;
return 0 ;
bail_inval_free :
/* release mr holds */
while ( j ) {
struct rvt_sge * sge = & wqe - > sg_list [ - - j ] ;
rvt_put_mr ( sge - > mr ) ;
}
2016-02-14 12:10:04 -08:00
return ret ;
2016-01-22 13:00:22 -08:00
}
2016-01-06 10:01:17 -08:00
/**
* rvt_post_send - post a send on a QP
* @ ibqp : the QP to post the send on
* @ wr : the list of work requests to post
* @ bad_wr : the first bad WR is put here
*
* This may be called from interrupt context .
2016-02-14 12:10:29 -08:00
*
* Return : 0 on success else errno
2016-01-06 10:01:17 -08:00
*/
int rvt_post_send ( struct ib_qp * ibqp , struct ib_send_wr * wr ,
struct ib_send_wr * * bad_wr )
{
2016-01-22 13:00:22 -08:00
struct rvt_qp * qp = ibqp_to_rvtqp ( ibqp ) ;
struct rvt_dev_info * rdi = ib_to_rvt ( ibqp - > device ) ;
unsigned long flags = 0 ;
int call_send ;
unsigned nreq = 0 ;
int err = 0 ;
2016-02-14 12:10:04 -08:00
spin_lock_irqsave ( & qp - > s_hlock , flags ) ;
2016-01-22 13:00:22 -08:00
2016-01-06 10:01:17 -08:00
/*
2016-01-22 13:00:22 -08:00
* Ensure QP state is such that we can send . If not bail out early ,
* there is no need to do this every time we post a send .
2016-01-06 10:01:17 -08:00
*/
2016-01-22 13:00:22 -08:00
if ( unlikely ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_POST_SEND_OK ) ) ) {
2016-02-14 12:10:04 -08:00
spin_unlock_irqrestore ( & qp - > s_hlock , flags ) ;
2016-01-22 13:00:22 -08:00
return - EINVAL ;
}
2016-01-06 10:01:17 -08:00
2016-01-22 13:00:22 -08:00
/*
* If the send queue is empty , and we only have a single WR then just go
* ahead and kick the send engine into gear . Otherwise we will always
* just schedule the send to happen later .
*/
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-23 14:07:29 -07:00
call_send = qp - > s_head = = READ_ONCE ( qp - > s_last ) & & ! wr - > next ;
2016-01-22 13:00:22 -08:00
for ( ; wr ; wr = wr - > next ) {
2016-02-14 12:45:44 -08:00
err = rvt_post_one_wr ( qp , wr , & call_send ) ;
2016-01-22 13:00:22 -08:00
if ( unlikely ( err ) ) {
* bad_wr = wr ;
goto bail ;
}
nreq + + ;
}
bail :
2016-02-14 12:10:04 -08:00
spin_unlock_irqrestore ( & qp - > s_hlock , flags ) ;
if ( nreq ) {
if ( call_send )
rdi - > driver_f . do_send ( qp ) ;
2016-04-12 10:47:00 -07:00
else
rdi - > driver_f . schedule_send_no_lock ( qp ) ;
2016-02-14 12:10:04 -08:00
}
2016-01-22 13:00:22 -08:00
return err ;
2016-01-06 10:01:17 -08:00
}
/**
* rvt_post_srq_receive - post a receive on a shared receive queue
* @ ibsrq : the SRQ to post the receive on
* @ wr : the list of work requests to post
* @ bad_wr : A pointer to the first WR to cause a problem is put here
*
* This may be called from interrupt context .
2016-02-14 12:10:29 -08:00
*
* Return : 0 on success else errno
2016-01-06 10:01:17 -08:00
*/
int rvt_post_srq_recv ( struct ib_srq * ibsrq , struct ib_recv_wr * wr ,
struct ib_recv_wr * * bad_wr )
{
2016-02-03 14:14:36 -08:00
struct rvt_srq * srq = ibsrq_to_rvtsrq ( ibsrq ) ;
struct rvt_rwq * wq ;
unsigned long flags ;
for ( ; wr ; wr = wr - > next ) {
struct rvt_rwqe * wqe ;
u32 next ;
int i ;
if ( ( unsigned ) wr - > num_sge > srq - > rq . max_sge ) {
* bad_wr = wr ;
return - EINVAL ;
}
spin_lock_irqsave ( & srq - > rq . lock , flags ) ;
wq = srq - > rq . wq ;
next = wq - > head + 1 ;
if ( next > = srq - > rq . size )
next = 0 ;
if ( next = = wq - > tail ) {
spin_unlock_irqrestore ( & srq - > rq . lock , flags ) ;
* bad_wr = wr ;
return - ENOMEM ;
}
wqe = rvt_get_rwqe_ptr ( & srq - > rq , wq - > head ) ;
wqe - > wr_id = wr - > wr_id ;
wqe - > num_sge = wr - > num_sge ;
for ( i = 0 ; i < wr - > num_sge ; i + + )
wqe - > sg_list [ i ] = wr - > sg_list [ i ] ;
/* Make sure queue entry is written before the head index. */
smp_wmb ( ) ;
wq - > head = next ;
spin_unlock_irqrestore ( & srq - > rq . lock , flags ) ;
}
return 0 ;
2016-01-06 10:01:17 -08:00
}
2017-02-08 05:27:01 -08:00
2018-05-02 06:44:03 -07:00
/*
* Validate a RWQE and fill in the SGE state .
* Return 1 if OK .
*/
static int init_sge ( struct rvt_qp * qp , struct rvt_rwqe * wqe )
{
int i , j , ret ;
struct ib_wc wc ;
struct rvt_lkey_table * rkt ;
struct rvt_pd * pd ;
struct rvt_sge_state * ss ;
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
rkt = & rdi - > lkey_table ;
pd = ibpd_to_rvtpd ( qp - > ibqp . srq ? qp - > ibqp . srq - > pd : qp - > ibqp . pd ) ;
ss = & qp - > r_sge ;
ss - > sg_list = qp - > r_sg_list ;
qp - > r_len = 0 ;
for ( i = j = 0 ; i < wqe - > num_sge ; i + + ) {
if ( wqe - > sg_list [ i ] . length = = 0 )
continue ;
/* Check LKEY */
ret = rvt_lkey_ok ( rkt , pd , j ? & ss - > sg_list [ j - 1 ] : & ss - > sge ,
NULL , & wqe - > sg_list [ i ] ,
IB_ACCESS_LOCAL_WRITE ) ;
if ( unlikely ( ret < = 0 ) )
goto bad_lkey ;
qp - > r_len + = wqe - > sg_list [ i ] . length ;
j + + ;
}
ss - > num_sge = j ;
ss - > total_len = qp - > r_len ;
return 1 ;
bad_lkey :
while ( j ) {
struct rvt_sge * sge = - - j ? & ss - > sg_list [ j - 1 ] : & ss - > sge ;
rvt_put_mr ( sge - > mr ) ;
}
ss - > num_sge = 0 ;
memset ( & wc , 0 , sizeof ( wc ) ) ;
wc . wr_id = wqe - > wr_id ;
wc . status = IB_WC_LOC_PROT_ERR ;
wc . opcode = IB_WC_RECV ;
wc . qp = & qp - > ibqp ;
/* Signal solicited completion event. */
rvt_cq_enter ( ibcq_to_rvtcq ( qp - > ibqp . recv_cq ) , & wc , 1 ) ;
return 0 ;
}
/**
* rvt_get_rwqe - copy the next RWQE into the QP ' s RWQE
* @ qp : the QP
* @ wr_id_only : update qp - > r_wr_id only , not qp - > r_sge
*
* Return - 1 if there is a local error , 0 if no RWQE is available ,
* otherwise return 1.
*
* Can be called from interrupt level .
*/
int rvt_get_rwqe ( struct rvt_qp * qp , bool wr_id_only )
{
unsigned long flags ;
struct rvt_rq * rq ;
struct rvt_rwq * wq ;
struct rvt_srq * srq ;
struct rvt_rwqe * wqe ;
void ( * handler ) ( struct ib_event * , void * ) ;
u32 tail ;
int ret ;
if ( qp - > ibqp . srq ) {
srq = ibsrq_to_rvtsrq ( qp - > ibqp . srq ) ;
handler = srq - > ibsrq . event_handler ;
rq = & srq - > rq ;
} else {
srq = NULL ;
handler = NULL ;
rq = & qp - > r_rq ;
}
spin_lock_irqsave ( & rq - > lock , flags ) ;
if ( ! ( ib_rvt_state_ops [ qp - > state ] & RVT_PROCESS_RECV_OK ) ) {
ret = 0 ;
goto unlock ;
}
wq = rq - > wq ;
tail = wq - > tail ;
/* Validate tail before using it since it is user writable. */
if ( tail > = rq - > size )
tail = 0 ;
if ( unlikely ( tail = = wq - > head ) ) {
ret = 0 ;
goto unlock ;
}
/* Make sure entry is read after head index is read. */
smp_rmb ( ) ;
wqe = rvt_get_rwqe_ptr ( rq , tail ) ;
/*
* Even though we update the tail index in memory , the verbs
* consumer is not supposed to post more entries until a
* completion is generated .
*/
if ( + + tail > = rq - > size )
tail = 0 ;
wq - > tail = tail ;
if ( ! wr_id_only & & ! init_sge ( qp , wqe ) ) {
ret = - 1 ;
goto unlock ;
}
qp - > r_wr_id = wqe - > wr_id ;
ret = 1 ;
set_bit ( RVT_R_WRID_VALID , & qp - > r_aflags ) ;
if ( handler ) {
u32 n ;
/*
* Validate head pointer value and compute
* the number of remaining WQEs .
*/
n = wq - > head ;
if ( n > = rq - > size )
n = 0 ;
if ( n < tail )
n + = rq - > size - tail ;
else
n - = tail ;
if ( n < srq - > limit ) {
struct ib_event ev ;
srq - > limit = 0 ;
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
ev . device = qp - > ibqp . device ;
ev . element . srq = qp - > ibqp . srq ;
ev . event = IB_EVENT_SRQ_LIMIT_REACHED ;
handler ( & ev , srq - > ibsrq . srq_context ) ;
goto bail ;
}
}
unlock :
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
bail :
return ret ;
}
EXPORT_SYMBOL ( rvt_get_rwqe ) ;
2017-02-08 05:27:01 -08:00
/**
* qp_comm_est - handle trap with QP established
* @ qp : the QP
*/
void rvt_comm_est ( struct rvt_qp * qp )
{
qp - > r_flags | = RVT_R_COMM_EST ;
if ( qp - > ibqp . event_handler ) {
struct ib_event ev ;
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_COMM_EST ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
}
EXPORT_SYMBOL ( rvt_comm_est ) ;
void rvt_rc_error ( struct rvt_qp * qp , enum ib_wc_status err )
{
unsigned long flags ;
int lastwqe ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
lastwqe = rvt_error_qp ( qp , err ) ;
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
if ( lastwqe ) {
struct ib_event ev ;
ev . device = qp - > ibqp . device ;
ev . element . qp = & qp - > ibqp ;
ev . event = IB_EVENT_QP_LAST_WQE_REACHED ;
qp - > ibqp . event_handler ( & ev , qp - > ibqp . qp_context ) ;
}
}
EXPORT_SYMBOL ( rvt_rc_error ) ;
2017-02-08 05:27:13 -08:00
2017-02-08 05:28:19 -08:00
/*
* rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
* @ index - the index
* return usec from an index into ib_rvt_rnr_table
*/
unsigned long rvt_rnr_tbl_to_usec ( u32 index )
{
2017-02-08 05:28:25 -08:00
return ib_rvt_rnr_table [ ( index & IB_AETH_CREDIT_MASK ) ] ;
2017-02-08 05:28:19 -08:00
}
EXPORT_SYMBOL ( rvt_rnr_tbl_to_usec ) ;
2017-02-08 05:27:13 -08:00
static inline unsigned long rvt_aeth_to_usec ( u32 aeth )
{
2017-02-08 05:28:25 -08:00
return ib_rvt_rnr_table [ ( aeth > > IB_AETH_CREDIT_SHIFT ) &
IB_AETH_CREDIT_MASK ] ;
2017-02-08 05:27:13 -08:00
}
/*
* rvt_add_retry_timer - add / start a retry timer
* @ qp - the QP
* add a retry timer on the QP
*/
void rvt_add_retry_timer ( struct rvt_qp * qp )
{
struct ib_qp * ibqp = & qp - > ibqp ;
struct rvt_dev_info * rdi = ib_to_rvt ( ibqp - > device ) ;
lockdep_assert_held ( & qp - > s_lock ) ;
qp - > s_flags | = RVT_S_TIMER ;
/* 4.096 usec. * (1 << qp->timeout) */
qp - > s_timer . expires = jiffies + qp - > timeout_jiffies +
rdi - > busy_jiffies ;
add_timer ( & qp - > s_timer ) ;
}
EXPORT_SYMBOL ( rvt_add_retry_timer ) ;
/**
* rvt_add_rnr_timer - add / start an rnr timer
* @ qp - the QP
* @ aeth - aeth of RNR timeout , simulated aeth for loopback
* add an rnr timer on the QP
*/
void rvt_add_rnr_timer ( struct rvt_qp * qp , u32 aeth )
{
u32 to ;
lockdep_assert_held ( & qp - > s_lock ) ;
qp - > s_flags | = RVT_S_WAIT_RNR ;
to = rvt_aeth_to_usec ( aeth ) ;
2017-12-18 19:57:28 -08:00
trace_rvt_rnrnak_add ( qp , to ) ;
2017-02-08 05:27:13 -08:00
hrtimer_start ( & qp - > s_rnr_timer ,
2018-05-15 18:31:24 -07:00
ns_to_ktime ( 1000 * to ) , HRTIMER_MODE_REL_PINNED ) ;
2017-02-08 05:27:13 -08:00
}
EXPORT_SYMBOL ( rvt_add_rnr_timer ) ;
/**
* rvt_stop_rc_timers - stop all timers
* @ qp - the QP
* stop any pending timers
*/
void rvt_stop_rc_timers ( struct rvt_qp * qp )
{
lockdep_assert_held ( & qp - > s_lock ) ;
/* Remove QP from all timers */
if ( qp - > s_flags & ( RVT_S_TIMER | RVT_S_WAIT_RNR ) ) {
qp - > s_flags & = ~ ( RVT_S_TIMER | RVT_S_WAIT_RNR ) ;
del_timer ( & qp - > s_timer ) ;
hrtimer_try_to_cancel ( & qp - > s_rnr_timer ) ;
}
}
EXPORT_SYMBOL ( rvt_stop_rc_timers ) ;
/**
* rvt_stop_rnr_timer - stop an rnr timer
* @ qp - the QP
*
* stop an rnr timer and return if the timer
* had been pending .
*/
2017-12-18 19:57:28 -08:00
static void rvt_stop_rnr_timer ( struct rvt_qp * qp )
2017-02-08 05:27:13 -08:00
{
lockdep_assert_held ( & qp - > s_lock ) ;
/* Remove QP from rnr timer */
if ( qp - > s_flags & RVT_S_WAIT_RNR ) {
qp - > s_flags & = ~ RVT_S_WAIT_RNR ;
2017-12-18 19:57:28 -08:00
trace_rvt_rnrnak_stop ( qp , 0 ) ;
2017-02-08 05:27:13 -08:00
}
}
/**
* rvt_del_timers_sync - wait for any timeout routines to exit
* @ qp - the QP
*/
void rvt_del_timers_sync ( struct rvt_qp * qp )
{
del_timer_sync ( & qp - > s_timer ) ;
hrtimer_cancel ( & qp - > s_rnr_timer ) ;
}
EXPORT_SYMBOL ( rvt_del_timers_sync ) ;
/**
* This is called from s_timer for missing responses .
*/
2017-10-16 15:51:13 -07:00
static void rvt_rc_timeout ( struct timer_list * t )
2017-02-08 05:27:13 -08:00
{
2017-10-16 15:51:13 -07:00
struct rvt_qp * qp = from_timer ( qp , t , s_timer ) ;
2017-02-08 05:27:13 -08:00
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
unsigned long flags ;
spin_lock_irqsave ( & qp - > r_lock , flags ) ;
spin_lock ( & qp - > s_lock ) ;
if ( qp - > s_flags & RVT_S_TIMER ) {
2017-03-20 17:25:55 -07:00
struct rvt_ibport * rvp = rdi - > ports [ qp - > port_num - 1 ] ;
2017-02-08 05:27:13 -08:00
qp - > s_flags & = ~ RVT_S_TIMER ;
2017-03-20 17:25:55 -07:00
rvp - > n_rc_timeouts + + ;
2017-02-08 05:27:13 -08:00
del_timer ( & qp - > s_timer ) ;
2017-03-20 17:25:55 -07:00
trace_rvt_rc_timeout ( qp , qp - > s_last_psn + 1 ) ;
2017-02-08 05:27:13 -08:00
if ( rdi - > driver_f . notify_restart_rc )
rdi - > driver_f . notify_restart_rc ( qp ,
qp - > s_last_psn + 1 ,
1 ) ;
rdi - > driver_f . schedule_send ( qp ) ;
}
spin_unlock ( & qp - > s_lock ) ;
spin_unlock_irqrestore ( & qp - > r_lock , flags ) ;
}
/*
* This is called from s_timer for RNR timeouts .
*/
enum hrtimer_restart rvt_rc_rnr_retry ( struct hrtimer * t )
{
struct rvt_qp * qp = container_of ( t , struct rvt_qp , s_rnr_timer ) ;
struct rvt_dev_info * rdi = ib_to_rvt ( qp - > ibqp . device ) ;
unsigned long flags ;
spin_lock_irqsave ( & qp - > s_lock , flags ) ;
rvt_stop_rnr_timer ( qp ) ;
2017-12-18 19:57:28 -08:00
trace_rvt_rnrnak_timeout ( qp , 0 ) ;
2017-02-08 05:27:13 -08:00
rdi - > driver_f . schedule_send ( qp ) ;
spin_unlock_irqrestore ( & qp - > s_lock , flags ) ;
return HRTIMER_NORESTART ;
}
EXPORT_SYMBOL ( rvt_rc_rnr_retry ) ;
2017-08-28 11:23:45 -07:00
/**
* rvt_qp_iter_init - initial for QP iteration
2018-01-05 16:22:32 -08:00
* @ rdi : rvt devinfo
* @ v : u64 value
2017-08-28 11:23:45 -07:00
*
* This returns an iterator suitable for iterating QPs
* in the system .
*
* The @ cb is a user defined callback and @ v is a 64
* bit value passed to and relevant for processing in the
* @ cb . An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp .
*
* Use cases that require memory allocation to succeed
* must preallocate appropriately .
*
* Return : a pointer to an rvt_qp_iter or NULL
*/
struct rvt_qp_iter * rvt_qp_iter_init ( struct rvt_dev_info * rdi ,
u64 v ,
void ( * cb ) ( struct rvt_qp * qp , u64 v ) )
{
struct rvt_qp_iter * i ;
i = kzalloc ( sizeof ( * i ) , GFP_KERNEL ) ;
if ( ! i )
return NULL ;
i - > rdi = rdi ;
/* number of special QPs (SMI/GSI) for device */
i - > specials = rdi - > ibdev . phys_port_cnt * 2 ;
i - > v = v ;
i - > cb = cb ;
return i ;
}
EXPORT_SYMBOL ( rvt_qp_iter_init ) ;
/**
* rvt_qp_iter_next - return the next QP in iter
* @ iter - the iterator
*
* Fine grained QP iterator suitable for use
* with debugfs seq_file mechanisms .
*
* Updates iter - > qp with the current QP when the return
* value is 0.
*
* Return : 0 - iter - > qp is valid 1 - no more QPs
*/
int rvt_qp_iter_next ( struct rvt_qp_iter * iter )
__must_hold ( RCU )
{
int n = iter - > n ;
int ret = 1 ;
struct rvt_qp * pqp = iter - > qp ;
struct rvt_qp * qp ;
struct rvt_dev_info * rdi = iter - > rdi ;
/*
* The approach is to consider the special qps
* as additional table entries before the
* real hash table . Since the qp code sets
* the qp - > next hash link to NULL , this works just fine .
*
* iter - > specials is 2 * # ports
*
* n = 0. . iter - > specials is the special qp indices
*
* n = iter - > specials . . rdi - > qp_dev - > qp_table_size + iter - > specials are
* the potential hash bucket entries
*
*/
for ( ; n < rdi - > qp_dev - > qp_table_size + iter - > specials ; n + + ) {
if ( pqp ) {
qp = rcu_dereference ( pqp - > next ) ;
} else {
if ( n < iter - > specials ) {
struct rvt_ibport * rvp ;
int pidx ;
pidx = n % rdi - > ibdev . phys_port_cnt ;
rvp = rdi - > ports [ pidx ] ;
qp = rcu_dereference ( rvp - > qp [ n & 1 ] ) ;
} else {
qp = rcu_dereference (
rdi - > qp_dev - > qp_table [
( n - iter - > specials ) ] ) ;
}
}
pqp = qp ;
if ( qp ) {
iter - > qp = qp ;
iter - > n = n ;
return 0 ;
}
}
return ret ;
}
EXPORT_SYMBOL ( rvt_qp_iter_next ) ;
/**
* rvt_qp_iter - iterate all QPs
* @ rdi - rvt devinfo
* @ v - a 64 bit value
* @ cb - a callback
*
* This provides a way for iterating all QPs .
*
* The @ cb is a user defined callback and @ v is a 64
* bit value passed to and relevant for processing in the
* cb . An example use case would be to alter QP processing
* based on criteria not part of the rvt_qp .
*
* The code has an internal iterator to simplify
* non seq_file use cases .
*/
void rvt_qp_iter ( struct rvt_dev_info * rdi ,
u64 v ,
void ( * cb ) ( struct rvt_qp * qp , u64 v ) )
{
int ret ;
struct rvt_qp_iter i = {
. rdi = rdi ,
. specials = rdi - > ibdev . phys_port_cnt * 2 ,
. v = v ,
. cb = cb
} ;
rcu_read_lock ( ) ;
do {
ret = rvt_qp_iter_next ( & i ) ;
if ( ! ret ) {
rvt_get_qp ( i . qp ) ;
rcu_read_unlock ( ) ;
i . cb ( i . qp , i . v ) ;
rcu_read_lock ( ) ;
rvt_put_qp ( i . qp ) ;
}
} while ( ! ret ) ;
rcu_read_unlock ( ) ;
}
EXPORT_SYMBOL ( rvt_qp_iter ) ;