2005-08-19 10:59:31 -07:00
/*
* Copyright ( c ) 2005 Cisco Systems . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
2005-11-07 00:59:43 -08:00
# include <linux/slab.h>
# include <linux/string.h>
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-11-07 00:59:43 -08:00
2006-10-16 20:22:35 -07:00
# include <asm/io.h>
2019-02-07 18:44:49 +02:00
# include <rdma/uverbs_ioctl.h>
2005-08-19 10:59:31 -07:00
# include "mthca_dev.h"
# include "mthca_cmd.h"
# include "mthca_memfree.h"
# include "mthca_wqe.h"
enum {
MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
} ;
struct mthca_tavor_srq_context {
__be64 wqe_base_ds ; /* low 6 bits is descriptor size */
__be32 state_pd ;
__be32 lkey ;
__be32 uar ;
2006-03-13 14:33:01 +02:00
__be16 limit_watermark ;
__be16 wqe_cnt ;
2005-08-19 10:59:31 -07:00
u32 reserved [ 2 ] ;
} ;
struct mthca_arbel_srq_context {
__be32 state_logsize_srqn ;
__be32 lkey ;
__be32 db_index ;
__be32 logstride_usrpage ;
__be64 wqe_base ;
__be32 eq_pd ;
__be16 limit_watermark ;
__be16 wqe_cnt ;
u16 reserved1 ;
__be16 wqe_counter ;
u32 reserved2 [ 3 ] ;
} ;
static void * get_wqe ( struct mthca_srq * srq , int n )
{
if ( srq - > is_direct )
return srq - > queue . direct . buf + ( n < < srq - > wqe_shift ) ;
else
return srq - > queue . page_list [ ( n < < srq - > wqe_shift ) > > PAGE_SHIFT ] . buf +
( ( n < < srq - > wqe_shift ) & ( PAGE_SIZE - 1 ) ) ;
}
/*
* Return a pointer to the location within a WQE that we ' re using as a
2005-10-30 13:07:03 -08:00
* link when the WQE is in the free list . We use the imm field
* because in the Tavor case , posting a WQE may overwrite the next
* segment of the previous WQE , but a receive WQE will never touch the
* imm field . This avoids corrupting our free list if the previous
* WQE has already completed and been put on the free list when we
* post the next WQE .
2005-08-19 10:59:31 -07:00
*/
static inline int * wqe_to_link ( void * wqe )
{
2005-10-30 13:07:03 -08:00
return ( int * ) ( wqe + offsetof ( struct mthca_next_seg , imm ) ) ;
2005-08-19 10:59:31 -07:00
}
static void mthca_tavor_init_srq_context ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_srq * srq ,
2018-12-17 17:15:18 +02:00
struct mthca_tavor_srq_context * context ,
2019-02-07 18:44:49 +02:00
struct ib_udata * udata )
2005-08-19 10:59:31 -07:00
{
2019-02-07 18:44:49 +02:00
struct mthca_ucontext * ucontext = rdma_udata_to_drv_context (
udata , struct mthca_ucontext , ibucontext ) ;
2005-08-19 10:59:31 -07:00
memset ( context , 0 , sizeof * context ) ;
context - > wqe_base_ds = cpu_to_be64 ( 1 < < ( srq - > wqe_shift - 4 ) ) ;
context - > state_pd = cpu_to_be32 ( pd - > pd_num ) ;
context - > lkey = cpu_to_be32 ( srq - > mr . ibmr . lkey ) ;
2019-02-07 18:44:49 +02:00
if ( udata )
context - > uar = cpu_to_be32 ( ucontext - > uar . index ) ;
2005-08-19 10:59:31 -07:00
else
context - > uar = cpu_to_be32 ( dev - > driver_uar . index ) ;
}
static void mthca_arbel_init_srq_context ( struct mthca_dev * dev ,
struct mthca_pd * pd ,
struct mthca_srq * srq ,
2018-12-17 17:15:18 +02:00
struct mthca_arbel_srq_context * context ,
2019-02-07 18:44:49 +02:00
struct ib_udata * udata )
2005-08-19 10:59:31 -07:00
{
2019-02-07 18:44:49 +02:00
struct mthca_ucontext * ucontext = rdma_udata_to_drv_context (
udata , struct mthca_ucontext , ibucontext ) ;
2007-02-05 16:21:08 -08:00
int logsize , max ;
2005-08-19 10:59:31 -07:00
memset ( context , 0 , sizeof * context ) ;
2007-02-05 16:21:08 -08:00
/*
* Put max in a temporary variable to work around gcc bug
* triggered by ilog2 ( ) on sparc64 .
*/
max = srq - > max ;
logsize = ilog2 ( max ) ;
2005-08-19 10:59:31 -07:00
context - > state_logsize_srqn = cpu_to_be32 ( logsize < < 24 | srq - > srqn ) ;
context - > lkey = cpu_to_be32 ( srq - > mr . ibmr . lkey ) ;
context - > db_index = cpu_to_be32 ( srq - > db_index ) ;
context - > logstride_usrpage = cpu_to_be32 ( ( srq - > wqe_shift - 4 ) < < 29 ) ;
2019-02-07 18:44:49 +02:00
if ( udata )
context - > logstride_usrpage | = cpu_to_be32 ( ucontext - > uar . index ) ;
2005-08-19 10:59:31 -07:00
else
context - > logstride_usrpage | = cpu_to_be32 ( dev - > driver_uar . index ) ;
context - > eq_pd = cpu_to_be32 ( MTHCA_EQ_ASYNC < < 24 | pd - > pd_num ) ;
}
static void mthca_free_srq_buf ( struct mthca_dev * dev , struct mthca_srq * srq )
{
mthca_buf_free ( dev , srq - > max < < srq - > wqe_shift , & srq - > queue ,
srq - > is_direct , & srq - > mr ) ;
kfree ( srq - > wrid ) ;
}
static int mthca_alloc_srq_buf ( struct mthca_dev * dev , struct mthca_pd * pd ,
2018-12-17 17:15:18 +02:00
struct mthca_srq * srq , struct ib_udata * udata )
2005-08-19 10:59:31 -07:00
{
struct mthca_data_seg * scatter ;
void * wqe ;
int err ;
int i ;
2018-12-17 17:15:18 +02:00
if ( udata )
2005-08-19 10:59:31 -07:00
return 0 ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 13:55:00 -07:00
srq - > wrid = kmalloc_array ( srq - > max , sizeof ( u64 ) , GFP_KERNEL ) ;
2005-08-19 10:59:31 -07:00
if ( ! srq - > wrid )
return - ENOMEM ;
err = mthca_buf_alloc ( dev , srq - > max < < srq - > wqe_shift ,
MTHCA_MAX_DIRECT_SRQ_SIZE ,
& srq - > queue , & srq - > is_direct , pd , 1 , & srq - > mr ) ;
if ( err ) {
kfree ( srq - > wrid ) ;
return err ;
}
/*
* Now initialize the SRQ buffer so that all of the WQEs are
* linked into the list of free WQEs . In addition , set the
* scatter list L_Keys to the sentry value of 0x100 .
*/
for ( i = 0 ; i < srq - > max ; + + i ) {
2008-01-24 06:38:06 -08:00
struct mthca_next_seg * next ;
2005-08-19 10:59:31 -07:00
2008-01-24 06:38:06 -08:00
next = wqe = get_wqe ( srq , i ) ;
if ( i < srq - > max - 1 ) {
* wqe_to_link ( wqe ) = i + 1 ;
next - > nda_op = htonl ( ( ( i + 1 ) < < srq - > wqe_shift ) | 1 ) ;
} else {
* wqe_to_link ( wqe ) = - 1 ;
next - > nda_op = 0 ;
}
2005-08-19 10:59:31 -07:00
for ( scatter = wqe + sizeof ( struct mthca_next_seg ) ;
( void * ) scatter < wqe + ( 1 < < srq - > wqe_shift ) ;
+ + scatter )
scatter - > lkey = cpu_to_be32 ( MTHCA_INVAL_LKEY ) ;
}
2005-09-19 09:17:56 -07:00
srq - > last = get_wqe ( srq , srq - > max - 1 ) ;
2005-08-19 10:59:31 -07:00
return 0 ;
}
int mthca_alloc_srq ( struct mthca_dev * dev , struct mthca_pd * pd ,
2018-12-17 17:15:18 +02:00
struct ib_srq_attr * attr , struct mthca_srq * srq ,
struct ib_udata * udata )
2005-08-19 10:59:31 -07:00
{
struct mthca_mailbox * mailbox ;
int ds ;
int err ;
/* Sanity check SRQ size before proceeding */
2005-10-10 13:48:07 -07:00
if ( attr - > max_wr > dev - > limits . max_srq_wqes | |
2006-04-11 18:16:27 +03:00
attr - > max_sge > dev - > limits . max_srq_sge )
2005-08-19 10:59:31 -07:00
return - EINVAL ;
srq - > max = attr - > max_wr ;
srq - > max_gs = attr - > max_sge ;
srq - > counter = 0 ;
if ( mthca_is_memfree ( dev ) )
srq - > max = roundup_pow_of_two ( srq - > max + 1 ) ;
2006-10-09 18:06:32 +02:00
else
srq - > max = srq - > max + 1 ;
2005-08-19 10:59:31 -07:00
2006-01-04 14:42:39 -08:00
ds = max ( 64UL ,
2005-08-19 10:59:31 -07:00
roundup_pow_of_two ( sizeof ( struct mthca_next_seg ) +
srq - > max_gs * sizeof ( struct mthca_data_seg ) ) ) ;
2006-03-20 12:35:34 +02:00
2006-03-26 17:01:12 +02:00
if ( ! mthca_is_memfree ( dev ) & & ( ds > dev - > limits . max_desc_sz ) )
2006-03-20 12:35:34 +02:00
return - EINVAL ;
2006-12-08 02:37:49 -08:00
srq - > wqe_shift = ilog2 ( ds ) ;
2005-08-19 10:59:31 -07:00
srq - > srqn = mthca_alloc ( & dev - > srq_table . alloc ) ;
if ( srq - > srqn = = - 1 )
return - ENOMEM ;
if ( mthca_is_memfree ( dev ) ) {
err = mthca_table_get ( dev , dev - > srq_table . table , srq - > srqn ) ;
if ( err )
goto err_out ;
2018-12-17 17:15:18 +02:00
if ( ! udata ) {
2005-08-19 10:59:31 -07:00
srq - > db_index = mthca_alloc_db ( dev , MTHCA_DB_TYPE_SRQ ,
srq - > srqn , & srq - > db ) ;
if ( srq - > db_index < 0 ) {
err = - ENOMEM ;
goto err_out_icm ;
}
}
}
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) ) {
err = PTR_ERR ( mailbox ) ;
goto err_out_db ;
}
2018-12-17 17:15:18 +02:00
err = mthca_alloc_srq_buf ( dev , pd , srq , udata ) ;
2005-08-19 10:59:31 -07:00
if ( err )
goto err_out_mailbox ;
spin_lock_init ( & srq - > lock ) ;
2006-05-09 10:50:29 -07:00
srq - > refcount = 1 ;
2005-08-19 10:59:31 -07:00
init_waitqueue_head ( & srq - > wait ) ;
2006-06-17 20:37:41 -07:00
mutex_init ( & srq - > mutex ) ;
2005-08-19 10:59:31 -07:00
if ( mthca_is_memfree ( dev ) )
2018-12-17 17:15:18 +02:00
mthca_arbel_init_srq_context ( dev , pd , srq , mailbox - > buf , udata ) ;
2005-08-19 10:59:31 -07:00
else
2018-12-17 17:15:18 +02:00
mthca_tavor_init_srq_context ( dev , pd , srq , mailbox - > buf , udata ) ;
2005-08-19 10:59:31 -07:00
2011-07-07 17:20:40 +00:00
err = mthca_SW2HW_SRQ ( dev , mailbox , srq - > srqn ) ;
2005-08-19 10:59:31 -07:00
if ( err ) {
mthca_warn ( dev , " SW2HW_SRQ failed (%d) \n " , err ) ;
goto err_out_free_buf ;
}
spin_lock_irq ( & dev - > srq_table . lock ) ;
if ( mthca_array_set ( & dev - > srq_table . srq ,
srq - > srqn & ( dev - > limits . num_srqs - 1 ) ,
srq ) ) {
spin_unlock_irq ( & dev - > srq_table . lock ) ;
goto err_out_free_srq ;
}
spin_unlock_irq ( & dev - > srq_table . lock ) ;
mthca_free_mailbox ( dev , mailbox ) ;
srq - > first_free = 0 ;
srq - > last_free = srq - > max - 1 ;
2006-10-09 18:06:32 +02:00
attr - > max_wr = srq - > max - 1 ;
2006-02-23 12:13:51 -08:00
attr - > max_sge = srq - > max_gs ;
2005-08-19 10:59:31 -07:00
return 0 ;
err_out_free_srq :
2011-07-07 17:20:40 +00:00
err = mthca_HW2SW_SRQ ( dev , mailbox , srq - > srqn ) ;
2005-08-19 10:59:31 -07:00
if ( err )
mthca_warn ( dev , " HW2SW_SRQ failed (%d) \n " , err ) ;
err_out_free_buf :
2018-12-17 17:15:18 +02:00
if ( ! udata )
2005-08-19 10:59:31 -07:00
mthca_free_srq_buf ( dev , srq ) ;
err_out_mailbox :
mthca_free_mailbox ( dev , mailbox ) ;
err_out_db :
2018-12-17 17:15:18 +02:00
if ( ! udata & & mthca_is_memfree ( dev ) )
2005-08-19 10:59:31 -07:00
mthca_free_db ( dev , MTHCA_DB_TYPE_SRQ , srq - > db_index ) ;
err_out_icm :
mthca_table_put ( dev , dev - > srq_table . table , srq - > srqn ) ;
err_out :
mthca_free ( & dev - > srq_table . alloc , srq - > srqn ) ;
return err ;
}
2006-05-09 10:50:29 -07:00
static inline int get_srq_refcount ( struct mthca_dev * dev , struct mthca_srq * srq )
{
int c ;
spin_lock_irq ( & dev - > srq_table . lock ) ;
c = srq - > refcount ;
spin_unlock_irq ( & dev - > srq_table . lock ) ;
return c ;
}
2005-08-19 10:59:31 -07:00
void mthca_free_srq ( struct mthca_dev * dev , struct mthca_srq * srq )
{
struct mthca_mailbox * mailbox ;
int err ;
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) ) {
mthca_warn ( dev , " No memory for mailbox to free SRQ. \n " ) ;
return ;
}
2011-07-07 17:20:40 +00:00
err = mthca_HW2SW_SRQ ( dev , mailbox , srq - > srqn ) ;
2005-08-19 10:59:31 -07:00
if ( err )
mthca_warn ( dev , " HW2SW_SRQ failed (%d) \n " , err ) ;
spin_lock_irq ( & dev - > srq_table . lock ) ;
mthca_array_clear ( & dev - > srq_table . srq ,
srq - > srqn & ( dev - > limits . num_srqs - 1 ) ) ;
2006-05-09 10:50:29 -07:00
- - srq - > refcount ;
2005-08-19 10:59:31 -07:00
spin_unlock_irq ( & dev - > srq_table . lock ) ;
2006-05-09 10:50:29 -07:00
wait_event ( srq - > wait , ! get_srq_refcount ( dev , srq ) ) ;
2005-08-19 10:59:31 -07:00
if ( ! srq - > ibsrq . uobject ) {
mthca_free_srq_buf ( dev , srq ) ;
if ( mthca_is_memfree ( dev ) )
mthca_free_db ( dev , MTHCA_DB_TYPE_SRQ , srq - > db_index ) ;
}
mthca_table_put ( dev , dev - > srq_table . table , srq - > srqn ) ;
mthca_free ( & dev - > srq_table . alloc , srq - > srqn ) ;
mthca_free_mailbox ( dev , mailbox ) ;
}
2005-10-06 13:15:56 -07:00
int mthca_modify_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr ,
2006-08-11 14:58:09 -07:00
enum ib_srq_attr_mask attr_mask , struct ib_udata * udata )
2006-02-01 13:38:24 -08:00
{
2005-10-06 13:15:56 -07:00
struct mthca_dev * dev = to_mdev ( ibsrq - > device ) ;
struct mthca_srq * srq = to_msrq ( ibsrq ) ;
2011-07-07 17:20:40 +00:00
int ret = 0 ;
2005-10-06 13:15:56 -07:00
/* We don't support resizing SRQs (yet?) */
if ( attr_mask & IB_SRQ_MAX_WR )
return - EINVAL ;
if ( attr_mask & IB_SRQ_LIMIT ) {
2006-07-13 11:05:49 +03:00
u32 max_wr = mthca_is_memfree ( dev ) ? srq - > max - 1 : srq - > max ;
if ( attr - > srq_limit > max_wr )
2006-03-20 17:32:43 +02:00
return - EINVAL ;
2006-06-17 20:37:41 -07:00
mutex_lock ( & srq - > mutex ) ;
2011-07-07 17:20:40 +00:00
ret = mthca_ARM_SRQ ( dev , srq - > srqn , attr - > srq_limit ) ;
2006-06-17 20:37:41 -07:00
mutex_unlock ( & srq - > mutex ) ;
2005-10-06 13:15:56 -07:00
}
2011-07-07 17:20:40 +00:00
return ret ;
2005-10-06 13:15:56 -07:00
}
2006-02-13 16:40:21 -08:00
int mthca_query_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * srq_attr )
{
struct mthca_dev * dev = to_mdev ( ibsrq - > device ) ;
struct mthca_srq * srq = to_msrq ( ibsrq ) ;
struct mthca_mailbox * mailbox ;
struct mthca_arbel_srq_context * arbel_ctx ;
2006-03-13 14:33:01 +02:00
struct mthca_tavor_srq_context * tavor_ctx ;
2006-02-13 16:40:21 -08:00
int err ;
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) )
return PTR_ERR ( mailbox ) ;
2011-07-07 17:20:40 +00:00
err = mthca_QUERY_SRQ ( dev , srq - > srqn , mailbox ) ;
2006-02-13 16:40:21 -08:00
if ( err )
goto out ;
if ( mthca_is_memfree ( dev ) ) {
arbel_ctx = mailbox - > buf ;
2006-03-13 14:33:01 +02:00
srq_attr - > srq_limit = be16_to_cpu ( arbel_ctx - > limit_watermark ) ;
} else {
tavor_ctx = mailbox - > buf ;
srq_attr - > srq_limit = be16_to_cpu ( tavor_ctx - > limit_watermark ) ;
}
2006-02-13 16:40:21 -08:00
2006-10-09 18:06:32 +02:00
srq_attr - > max_wr = srq - > max - 1 ;
2006-02-13 16:40:21 -08:00
srq_attr - > max_sge = srq - > max_gs ;
out :
mthca_free_mailbox ( dev , mailbox ) ;
return err ;
}
2005-08-19 10:59:31 -07:00
void mthca_srq_event ( struct mthca_dev * dev , u32 srqn ,
enum ib_event_type event_type )
{
struct mthca_srq * srq ;
struct ib_event event ;
spin_lock ( & dev - > srq_table . lock ) ;
srq = mthca_array_get ( & dev - > srq_table . srq , srqn & ( dev - > limits . num_srqs - 1 ) ) ;
if ( srq )
2006-05-09 10:50:29 -07:00
+ + srq - > refcount ;
2005-08-19 10:59:31 -07:00
spin_unlock ( & dev - > srq_table . lock ) ;
if ( ! srq ) {
mthca_warn ( dev , " Async event for bogus SRQ %08x \n " , srqn ) ;
return ;
}
if ( ! srq - > ibsrq . event_handler )
goto out ;
event . device = & dev - > ib_dev ;
event . event = event_type ;
2005-10-06 13:15:56 -07:00
event . element . srq = & srq - > ibsrq ;
2005-08-19 10:59:31 -07:00
srq - > ibsrq . event_handler ( & event , srq - > ibsrq . srq_context ) ;
out :
2006-05-09 10:50:29 -07:00
spin_lock ( & dev - > srq_table . lock ) ;
if ( ! - - srq - > refcount )
2005-08-19 10:59:31 -07:00
wake_up ( & srq - > wait ) ;
2006-05-09 10:50:29 -07:00
spin_unlock ( & dev - > srq_table . lock ) ;
2005-08-19 10:59:31 -07:00
}
/*
* This function must be called with IRQs disabled .
*/
void mthca_free_srq_wqe ( struct mthca_srq * srq , u32 wqe_addr )
{
int ind ;
2008-01-24 06:38:06 -08:00
struct mthca_next_seg * last_free ;
2005-08-19 10:59:31 -07:00
ind = wqe_addr > > srq - > wqe_shift ;
spin_lock ( & srq - > lock ) ;
2008-01-24 06:38:06 -08:00
last_free = get_wqe ( srq , srq - > last_free ) ;
* wqe_to_link ( last_free ) = ind ;
last_free - > nda_op = htonl ( ( ind < < srq - > wqe_shift ) | 1 ) ;
2005-08-19 10:59:31 -07:00
* wqe_to_link ( get_wqe ( srq , ind ) ) = - 1 ;
srq - > last_free = ind ;
spin_unlock ( & srq - > lock ) ;
}
2018-07-18 09:25:32 -07:00
int mthca_tavor_post_srq_recv ( struct ib_srq * ibsrq , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2005-08-19 10:59:31 -07:00
{
struct mthca_dev * dev = to_mdev ( ibsrq - > device ) ;
struct mthca_srq * srq = to_msrq ( ibsrq ) ;
unsigned long flags ;
int err = 0 ;
int first_ind ;
int ind ;
int next_ind ;
int nreq ;
int i ;
void * wqe ;
void * prev_wqe ;
spin_lock_irqsave ( & srq - > lock , flags ) ;
first_ind = srq - > first_free ;
2006-05-24 18:27:07 +03:00
for ( nreq = 0 ; wr ; wr = wr - > next ) {
2008-02-04 20:20:44 -08:00
ind = srq - > first_free ;
2005-08-19 10:59:31 -07:00
wqe = get_wqe ( srq , ind ) ;
next_ind = * wqe_to_link ( wqe ) ;
2005-10-06 13:25:16 -07:00
2007-10-10 17:55:37 +02:00
if ( unlikely ( next_ind < 0 ) ) {
2005-10-06 13:25:16 -07:00
mthca_err ( dev , " SRQ %06x full \n " , srq - > srqn ) ;
err = - ENOMEM ;
* bad_wr = wr ;
break ;
}
2005-08-19 10:59:31 -07:00
prev_wqe = srq - > last ;
srq - > last = wqe ;
( ( struct mthca_next_seg * ) wqe ) - > ee_nds = 0 ;
/* flags field will always remain 0 */
wqe + = sizeof ( struct mthca_next_seg ) ;
if ( unlikely ( wr - > num_sge > srq - > max_gs ) ) {
err = - EINVAL ;
* bad_wr = wr ;
srq - > last = prev_wqe ;
2005-09-18 14:00:17 -07:00
break ;
2005-08-19 10:59:31 -07:00
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
2007-07-18 11:30:34 -07:00
mthca_set_data_seg ( wqe , wr - > sg_list + i ) ;
2005-08-19 10:59:31 -07:00
wqe + = sizeof ( struct mthca_data_seg ) ;
}
2007-07-18 11:30:34 -07:00
if ( i < srq - > max_gs )
mthca_set_data_seg_inval ( wqe ) ;
2005-08-19 10:59:31 -07:00
2005-09-13 10:41:03 -07:00
( ( struct mthca_next_seg * ) prev_wqe ) - > ee_nds =
cpu_to_be32 ( MTHCA_NEXT_DBD ) ;
2005-08-19 10:59:31 -07:00
srq - > wrid [ ind ] = wr - > wr_id ;
srq - > first_free = next_ind ;
2006-05-24 18:27:07 +03:00
+ + nreq ;
if ( unlikely ( nreq = = MTHCA_TAVOR_MAX_WQES_PER_RECV_DB ) ) {
nreq = 0 ;
/*
* Make sure that descriptors are written
* before doorbell is rung .
*/
wmb ( ) ;
2007-10-14 20:40:27 -07:00
mthca_write64 ( first_ind < < srq - > wqe_shift , srq - > srqn < < 8 ,
2006-05-24 18:27:07 +03:00
dev - > kar + MTHCA_RECEIVE_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
first_ind = srq - > first_free ;
}
2005-08-19 10:59:31 -07:00
}
if ( likely ( nreq ) ) {
/*
* Make sure that descriptors are written before
* doorbell is rung .
*/
wmb ( ) ;
2007-10-14 20:40:27 -07:00
mthca_write64 ( first_ind < < srq - > wqe_shift , ( srq - > srqn < < 8 ) | nreq ,
2005-08-19 10:59:31 -07:00
dev - > kar + MTHCA_RECEIVE_DOORBELL ,
MTHCA_GET_DOORBELL_LOCK ( & dev - > doorbell_lock ) ) ;
}
spin_unlock_irqrestore ( & srq - > lock , flags ) ;
return err ;
}
2018-07-18 09:25:32 -07:00
int mthca_arbel_post_srq_recv ( struct ib_srq * ibsrq , const struct ib_recv_wr * wr ,
const struct ib_recv_wr * * bad_wr )
2005-08-19 10:59:31 -07:00
{
struct mthca_dev * dev = to_mdev ( ibsrq - > device ) ;
struct mthca_srq * srq = to_msrq ( ibsrq ) ;
unsigned long flags ;
int err = 0 ;
int ind ;
int next_ind ;
int nreq ;
int i ;
void * wqe ;
spin_lock_irqsave ( & srq - > lock , flags ) ;
for ( nreq = 0 ; wr ; + + nreq , wr = wr - > next ) {
2008-02-04 20:20:44 -08:00
ind = srq - > first_free ;
2005-08-19 10:59:31 -07:00
wqe = get_wqe ( srq , ind ) ;
next_ind = * wqe_to_link ( wqe ) ;
2007-10-10 17:55:37 +02:00
if ( unlikely ( next_ind < 0 ) ) {
2005-10-06 13:25:16 -07:00
mthca_err ( dev , " SRQ %06x full \n " , srq - > srqn ) ;
err = - ENOMEM ;
* bad_wr = wr ;
break ;
}
2005-08-19 10:59:31 -07:00
( ( struct mthca_next_seg * ) wqe ) - > ee_nds = 0 ;
/* flags field will always remain 0 */
wqe + = sizeof ( struct mthca_next_seg ) ;
if ( unlikely ( wr - > num_sge > srq - > max_gs ) ) {
err = - EINVAL ;
* bad_wr = wr ;
2005-09-18 14:00:17 -07:00
break ;
2005-08-19 10:59:31 -07:00
}
for ( i = 0 ; i < wr - > num_sge ; + + i ) {
2007-07-18 11:30:34 -07:00
mthca_set_data_seg ( wqe , wr - > sg_list + i ) ;
2005-08-19 10:59:31 -07:00
wqe + = sizeof ( struct mthca_data_seg ) ;
}
2007-07-18 11:30:34 -07:00
if ( i < srq - > max_gs )
mthca_set_data_seg_inval ( wqe ) ;
2005-08-19 10:59:31 -07:00
srq - > wrid [ ind ] = wr - > wr_id ;
srq - > first_free = next_ind ;
}
if ( likely ( nreq ) ) {
srq - > counter + = nreq ;
/*
* Make sure that descriptors are written before
* we write doorbell record .
*/
wmb ( ) ;
* srq - > db = cpu_to_be32 ( srq - > counter ) ;
}
spin_unlock_irqrestore ( & srq - > lock , flags ) ;
return err ;
}
2006-04-11 18:16:27 +03:00
int mthca_max_srq_sge ( struct mthca_dev * dev )
{
if ( mthca_is_memfree ( dev ) )
return dev - > limits . max_sg ;
/*
* SRQ allocations are based on powers of 2 for Tavor ,
* ( although they only need to be multiples of 16 bytes ) .
*
* Therefore , we need to base the max number of sg entries on
* the largest power of 2 descriptor size that is < = to the
* actual max WQE descriptor size , rather than return the
* max_sg value given by the firmware ( which is based on WQE
* sizes as multiples of 16 , not powers of 2 ) .
*
* If SRQ implementation is changed for Tavor to be based on
* multiples of 16 , the calculation below can be deleted and
* the FW max_sg value returned .
*/
return min_t ( int , dev - > limits . max_sg ,
( ( 1 < < ( fls ( dev - > limits . max_desc_sz ) - 1 ) ) -
sizeof ( struct mthca_next_seg ) ) /
sizeof ( struct mthca_data_seg ) ) ;
}
2006-11-29 15:33:06 -08:00
int mthca_init_srq_table ( struct mthca_dev * dev )
2005-08-19 10:59:31 -07:00
{
int err ;
if ( ! ( dev - > mthca_flags & MTHCA_FLAG_SRQ ) )
return 0 ;
spin_lock_init ( & dev - > srq_table . lock ) ;
err = mthca_alloc_init ( & dev - > srq_table . alloc ,
dev - > limits . num_srqs ,
dev - > limits . num_srqs - 1 ,
dev - > limits . reserved_srqs ) ;
if ( err )
return err ;
err = mthca_array_init ( & dev - > srq_table . srq ,
dev - > limits . num_srqs ) ;
if ( err )
mthca_alloc_cleanup ( & dev - > srq_table . alloc ) ;
return err ;
}
2006-03-29 09:36:46 -08:00
void mthca_cleanup_srq_table ( struct mthca_dev * dev )
2005-08-19 10:59:31 -07:00
{
if ( ! ( dev - > mthca_flags & MTHCA_FLAG_SRQ ) )
return ;
mthca_array_cleanup ( & dev - > srq_table . srq , dev - > limits . num_srqs ) ;
mthca_alloc_cleanup ( & dev - > srq_table . alloc ) ;
}