2006-03-29 15:23:36 -08:00
/*
2007-06-11 10:21:14 -07:00
* Copyright ( c ) 2006 , 2007 QLogic Corporation . All rights reserved .
2006-03-29 15:23:36 -08:00
* Copyright ( c ) 2005 , 2006 PathScale , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/err.h>
# include <linux/vmalloc.h>
# include "ipath_verbs.h"
/**
* ipath_post_srq_receive - post a receive on a shared receive queue
* @ ibsrq : the SRQ to post the receive on
* @ wr : the list of work requests to post
* @ bad_wr : the first WR to cause a problem is put here
*
* This may be called from interrupt context .
*/
int ipath_post_srq_receive ( struct ib_srq * ibsrq , struct ib_recv_wr * wr ,
struct ib_recv_wr * * bad_wr )
{
struct ipath_srq * srq = to_isrq ( ibsrq ) ;
2006-09-22 15:22:26 -07:00
struct ipath_rwq * wq ;
2006-03-29 15:23:36 -08:00
unsigned long flags ;
int ret ;
for ( ; wr ; wr = wr - > next ) {
struct ipath_rwqe * wqe ;
u32 next ;
2006-09-22 15:22:26 -07:00
int i ;
2006-03-29 15:23:36 -08:00
2006-09-22 15:22:26 -07:00
if ( ( unsigned ) wr - > num_sge > srq - > rq . max_sge ) {
2006-03-29 15:23:36 -08:00
* bad_wr = wr ;
2007-11-14 13:34:14 -08:00
ret = - EINVAL ;
2006-03-29 15:23:36 -08:00
goto bail ;
}
spin_lock_irqsave ( & srq - > rq . lock , flags ) ;
2006-09-22 15:22:26 -07:00
wq = srq - > rq . wq ;
next = wq - > head + 1 ;
2006-03-29 15:23:36 -08:00
if ( next > = srq - > rq . size )
next = 0 ;
2006-09-22 15:22:26 -07:00
if ( next = = wq - > tail ) {
2006-03-29 15:23:36 -08:00
spin_unlock_irqrestore ( & srq - > rq . lock , flags ) ;
* bad_wr = wr ;
ret = - ENOMEM ;
goto bail ;
}
2006-09-22 15:22:26 -07:00
wqe = get_rwqe_ptr ( & srq - > rq , wq - > head ) ;
2006-03-29 15:23:36 -08:00
wqe - > wr_id = wr - > wr_id ;
2006-09-22 15:22:26 -07:00
wqe - > num_sge = wr - > num_sge ;
for ( i = 0 ; i < wr - > num_sge ; i + + )
wqe - > sg_list [ i ] = wr - > sg_list [ i ] ;
2007-07-06 12:48:23 -07:00
/* Make sure queue entry is written before the head index. */
smp_wmb ( ) ;
2006-09-22 15:22:26 -07:00
wq - > head = next ;
2006-03-29 15:23:36 -08:00
spin_unlock_irqrestore ( & srq - > rq . lock , flags ) ;
}
ret = 0 ;
bail :
return ret ;
}
/**
* ipath_create_srq - create a shared receive queue
* @ ibpd : the protection domain of the SRQ to create
2007-11-26 23:44:15 -08:00
* @ srq_init_attr : the attributes of the SRQ
* @ udata : data from libipathverbs when creating a user SRQ
2006-03-29 15:23:36 -08:00
*/
struct ib_srq * ipath_create_srq ( struct ib_pd * ibpd ,
struct ib_srq_init_attr * srq_init_attr ,
struct ib_udata * udata )
{
2006-07-01 04:35:58 -07:00
struct ipath_ibdev * dev = to_idev ( ibpd - > device ) ;
2006-03-29 15:23:36 -08:00
struct ipath_srq * srq ;
u32 sz ;
struct ib_srq * ret ;
2006-07-01 04:35:58 -07:00
if ( srq_init_attr - > attr . max_wr = = 0 ) {
ret = ERR_PTR ( - EINVAL ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-07-01 04:35:58 -07:00
}
if ( ( srq_init_attr - > attr . max_sge > ib_ipath_max_srq_sges ) | |
( srq_init_attr - > attr . max_wr > ib_ipath_max_srq_wrs ) ) {
2006-03-29 15:23:36 -08:00
ret = ERR_PTR ( - EINVAL ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-03-29 15:23:36 -08:00
}
srq = kmalloc ( sizeof ( * srq ) , GFP_KERNEL ) ;
if ( ! srq ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-03-29 15:23:36 -08:00
}
/*
* Need to use vmalloc ( ) if we want to support large # s of entries .
*/
srq - > rq . size = srq_init_attr - > attr . max_wr + 1 ;
2006-09-22 15:22:26 -07:00
srq - > rq . max_sge = srq_init_attr - > attr . max_sge ;
sz = sizeof ( struct ib_sge ) * srq - > rq . max_sge +
2006-03-29 15:23:36 -08:00
sizeof ( struct ipath_rwqe ) ;
2006-09-22 15:22:26 -07:00
srq - > rq . wq = vmalloc_user ( sizeof ( struct ipath_rwq ) + srq - > rq . size * sz ) ;
2006-03-29 15:23:36 -08:00
if ( ! srq - > rq . wq ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto bail_srq ;
2006-03-29 15:23:36 -08:00
}
2006-09-22 15:22:26 -07:00
/*
* Return the address of the RWQ as the offset to mmap .
* See ipath_mmap ( ) for details .
*/
if ( udata & & udata - > outlen > = sizeof ( __u64 ) ) {
int err ;
2007-04-27 21:07:23 -07:00
u32 s = sizeof ( struct ipath_rwq ) + srq - > rq . size * sz ;
2006-09-22 15:22:26 -07:00
2007-04-27 21:07:23 -07:00
srq - > ip =
ipath_create_mmap_info ( dev , s ,
ibpd - > uobject - > context ,
srq - > rq . wq ) ;
if ( ! srq - > ip ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto bail_wq ;
}
2007-04-27 21:07:23 -07:00
err = ib_copy_to_udata ( udata , & srq - > ip - > offset ,
sizeof ( srq - > ip - > offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
goto bail_ip ;
2006-09-22 15:22:26 -07:00
}
} else
srq - > ip = NULL ;
2006-03-29 15:23:36 -08:00
/*
* ib_create_srq ( ) will initialize srq - > ibsrq .
*/
spin_lock_init ( & srq - > rq . lock ) ;
2006-09-22 15:22:26 -07:00
srq - > rq . wq - > head = 0 ;
srq - > rq . wq - > tail = 0 ;
2006-03-29 15:23:36 -08:00
srq - > limit = srq_init_attr - > attr . srq_limit ;
2006-09-28 09:00:04 -07:00
spin_lock ( & dev - > n_srqs_lock ) ;
if ( dev - > n_srqs_allocated = = ib_ipath_max_srqs ) {
spin_unlock ( & dev - > n_srqs_lock ) ;
ret = ERR_PTR ( - ENOMEM ) ;
2007-04-27 21:07:23 -07:00
goto bail_ip ;
2006-09-28 09:00:04 -07:00
}
dev - > n_srqs_allocated + + ;
spin_unlock ( & dev - > n_srqs_lock ) ;
2006-09-22 15:22:26 -07:00
2007-04-27 21:07:23 -07:00
if ( srq - > ip ) {
spin_lock_irq ( & dev - > pending_lock ) ;
list_add ( & srq - > ip - > pending_mmaps , & dev - > pending_mmaps ) ;
spin_unlock_irq ( & dev - > pending_lock ) ;
}
2006-03-29 15:23:36 -08:00
ret = & srq - > ibsrq ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-03-29 15:23:36 -08:00
2007-04-27 21:07:23 -07:00
bail_ip :
kfree ( srq - > ip ) ;
2006-09-22 15:22:26 -07:00
bail_wq :
vfree ( srq - > rq . wq ) ;
bail_srq :
kfree ( srq ) ;
done :
2006-03-29 15:23:36 -08:00
return ret ;
}
/**
* ipath_modify_srq - modify a shared receive queue
* @ ibsrq : the SRQ to modify
* @ attr : the new attributes of the SRQ
* @ attr_mask : indicates which attributes to modify
2006-08-11 14:58:09 -07:00
* @ udata : user data for ipathverbs . so
2006-03-29 15:23:36 -08:00
*/
int ipath_modify_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr ,
2006-08-11 14:58:09 -07:00
enum ib_srq_attr_mask attr_mask ,
struct ib_udata * udata )
2006-03-29 15:23:36 -08:00
{
struct ipath_srq * srq = to_isrq ( ibsrq ) ;
2007-11-09 15:22:31 -08:00
struct ipath_rwq * wq ;
2006-09-22 15:22:26 -07:00
int ret = 0 ;
2006-03-29 15:23:36 -08:00
2006-09-22 15:22:26 -07:00
if ( attr_mask & IB_SRQ_MAX_WR ) {
struct ipath_rwq * owq ;
struct ipath_rwqe * p ;
u32 sz , size , n , head , tail ;
2006-03-29 15:23:36 -08:00
2006-09-22 15:22:26 -07:00
/* Check that the requested sizes are below the limits. */
if ( ( attr - > max_wr > ib_ipath_max_srq_wrs ) | |
( ( attr_mask & IB_SRQ_LIMIT ) ?
attr - > srq_limit : srq - > limit ) > attr - > max_wr ) {
2006-03-29 15:23:36 -08:00
ret = - EINVAL ;
goto bail ;
}
sz = sizeof ( struct ipath_rwqe ) +
2006-09-22 15:22:26 -07:00
srq - > rq . max_sge * sizeof ( struct ib_sge ) ;
2006-07-01 04:35:58 -07:00
size = attr - > max_wr + 1 ;
2006-09-22 15:22:26 -07:00
wq = vmalloc_user ( sizeof ( struct ipath_rwq ) + size * sz ) ;
2006-03-29 15:23:36 -08:00
if ( ! wq ) {
ret = - ENOMEM ;
goto bail ;
}
2007-11-09 15:22:31 -08:00
/* Check that we can write the offset to mmap. */
2006-09-22 15:22:26 -07:00
if ( udata & & udata - > inlen > = sizeof ( __u64 ) ) {
__u64 offset_addr ;
2007-11-09 15:22:31 -08:00
__u64 offset = 0 ;
2006-09-22 15:22:26 -07:00
ret = ib_copy_from_udata ( & offset_addr , udata ,
sizeof ( offset_addr ) ) ;
2007-11-09 15:22:31 -08:00
if ( ret )
goto bail_free ;
2008-04-16 21:01:11 -07:00
udata - > outbuf =
( void __user * ) ( unsigned long ) offset_addr ;
2006-09-22 15:22:26 -07:00
ret = ib_copy_to_udata ( udata , & offset ,
sizeof ( offset ) ) ;
2007-11-09 15:22:31 -08:00
if ( ret )
goto bail_free ;
2006-09-22 15:22:26 -07:00
}
spin_lock_irq ( & srq - > rq . lock ) ;
/*
* validate head pointer value and compute
* the number of remaining WQEs .
*/
owq = srq - > rq . wq ;
head = owq - > head ;
if ( head > = srq - > rq . size )
head = 0 ;
tail = owq - > tail ;
if ( tail > = srq - > rq . size )
tail = 0 ;
n = head ;
if ( n < tail )
n + = srq - > rq . size - tail ;
2006-03-29 15:23:36 -08:00
else
2006-09-22 15:22:26 -07:00
n - = tail ;
if ( size < = n ) {
2006-03-29 15:23:36 -08:00
ret = - EINVAL ;
2007-11-09 15:22:31 -08:00
goto bail_unlock ;
2006-03-29 15:23:36 -08:00
}
n = 0 ;
2006-09-22 15:22:26 -07:00
p = wq - > wq ;
while ( tail ! = head ) {
2006-03-29 15:23:36 -08:00
struct ipath_rwqe * wqe ;
int i ;
2006-09-22 15:22:26 -07:00
wqe = get_rwqe_ptr ( & srq - > rq , tail ) ;
2006-03-29 15:23:36 -08:00
p - > wr_id = wqe - > wr_id ;
p - > num_sge = wqe - > num_sge ;
for ( i = 0 ; i < wqe - > num_sge ; i + + )
p - > sg_list [ i ] = wqe - > sg_list [ i ] ;
n + + ;
p = ( struct ipath_rwqe * ) ( ( char * ) p + sz ) ;
2006-09-22 15:22:26 -07:00
if ( + + tail > = srq - > rq . size )
tail = 0 ;
2006-03-29 15:23:36 -08:00
}
srq - > rq . wq = wq ;
srq - > rq . size = size ;
2006-09-22 15:22:26 -07:00
wq - > head = n ;
wq - > tail = 0 ;
if ( attr_mask & IB_SRQ_LIMIT )
srq - > limit = attr - > srq_limit ;
spin_unlock_irq ( & srq - > rq . lock ) ;
vfree ( owq ) ;
if ( srq - > ip ) {
struct ipath_mmap_info * ip = srq - > ip ;
struct ipath_ibdev * dev = to_idev ( srq - > ibsrq . device ) ;
2007-04-27 21:07:23 -07:00
u32 s = sizeof ( struct ipath_rwq ) + size * sz ;
2006-09-22 15:22:26 -07:00
2007-04-27 21:07:23 -07:00
ipath_update_mmap_info ( dev , ip , s , wq ) ;
2007-11-09 15:22:31 -08:00
/*
* Return the offset to mmap .
* See ipath_mmap ( ) for details .
*/
if ( udata & & udata - > inlen > = sizeof ( __u64 ) ) {
ret = ib_copy_to_udata ( udata , & ip - > offset ,
sizeof ( ip - > offset ) ) ;
if ( ret )
goto bail ;
}
2006-09-22 15:22:26 -07:00
spin_lock_irq ( & dev - > pending_lock ) ;
2007-04-27 21:07:23 -07:00
if ( list_empty ( & ip - > pending_mmaps ) )
list_add ( & ip - > pending_mmaps ,
& dev - > pending_mmaps ) ;
2006-09-22 15:22:26 -07:00
spin_unlock_irq ( & dev - > pending_lock ) ;
}
} else if ( attr_mask & IB_SRQ_LIMIT ) {
spin_lock_irq ( & srq - > rq . lock ) ;
if ( attr - > srq_limit > = srq - > rq . size )
ret = - EINVAL ;
else
srq - > limit = attr - > srq_limit ;
spin_unlock_irq ( & srq - > rq . lock ) ;
2006-07-01 04:35:58 -07:00
}
2007-11-09 15:22:31 -08:00
goto bail ;
2006-03-29 15:23:36 -08:00
2007-11-09 15:22:31 -08:00
bail_unlock :
spin_unlock_irq ( & srq - > rq . lock ) ;
bail_free :
vfree ( wq ) ;
2006-03-29 15:23:36 -08:00
bail :
return ret ;
}
int ipath_query_srq ( struct ib_srq * ibsrq , struct ib_srq_attr * attr )
{
struct ipath_srq * srq = to_isrq ( ibsrq ) ;
attr - > max_wr = srq - > rq . size - 1 ;
attr - > max_sge = srq - > rq . max_sge ;
attr - > srq_limit = srq - > limit ;
return 0 ;
}
/**
* ipath_destroy_srq - destroy a shared receive queue
* @ ibsrq : the SRQ to destroy
*/
int ipath_destroy_srq ( struct ib_srq * ibsrq )
{
struct ipath_srq * srq = to_isrq ( ibsrq ) ;
2006-07-01 04:35:58 -07:00
struct ipath_ibdev * dev = to_idev ( ibsrq - > device ) ;
2006-03-29 15:23:36 -08:00
2006-09-28 09:00:04 -07:00
spin_lock ( & dev - > n_srqs_lock ) ;
2006-07-01 04:35:58 -07:00
dev - > n_srqs_allocated - - ;
2006-09-28 09:00:04 -07:00
spin_unlock ( & dev - > n_srqs_lock ) ;
if ( srq - > ip )
kref_put ( & srq - > ip - > ref , ipath_release_mmap_info ) ;
else
vfree ( srq - > rq . wq ) ;
2006-03-29 15:23:36 -08:00
kfree ( srq ) ;
return 0 ;
}