2006-03-29 15:23:36 -08:00
/*
2006-07-01 04:35:49 -07:00
* Copyright ( c ) 2006 QLogic , Inc . All rights reserved .
2006-03-29 15:23:36 -08:00
* Copyright ( c ) 2005 , 2006 PathScale , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/err.h>
# include <linux/vmalloc.h>
# include "ipath_verbs.h"
/**
* ipath_cq_enter - add a new entry to the completion queue
* @ cq : completion queue
* @ entry : work completion entry to add
* @ sig : true if @ entry is a solicitated entry
*
2006-09-22 15:22:26 -07:00
* This may be called with qp - > s_lock held .
2006-03-29 15:23:36 -08:00
*/
void ipath_cq_enter ( struct ipath_cq * cq , struct ib_wc * entry , int solicited )
{
2006-09-22 15:22:26 -07:00
struct ipath_cq_wc * wc = cq - > queue ;
2006-03-29 15:23:36 -08:00
unsigned long flags ;
2006-09-22 15:22:26 -07:00
u32 head ;
2006-03-29 15:23:36 -08:00
u32 next ;
spin_lock_irqsave ( & cq - > lock , flags ) ;
2006-09-22 15:22:26 -07:00
/*
* Note that the head pointer might be writable by user processes .
* Take care to verify it is a sane value .
*/
head = wc - > head ;
if ( head > = ( unsigned ) cq - > ibcq . cqe ) {
head = cq - > ibcq . cqe ;
2006-03-29 15:23:36 -08:00
next = 0 ;
2006-09-22 15:22:26 -07:00
} else
next = head + 1 ;
if ( unlikely ( next = = wc - > tail ) ) {
2006-03-29 15:23:36 -08:00
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
if ( cq - > ibcq . event_handler ) {
struct ib_event ev ;
ev . device = cq - > ibcq . device ;
ev . element . cq = & cq - > ibcq ;
ev . event = IB_EVENT_CQ_ERR ;
cq - > ibcq . event_handler ( & ev , cq - > ibcq . cq_context ) ;
}
return ;
}
2006-09-22 15:22:26 -07:00
wc - > queue [ head ] = * entry ;
wc - > head = next ;
2006-03-29 15:23:36 -08:00
if ( cq - > notify = = IB_CQ_NEXT_COMP | |
( cq - > notify = = IB_CQ_SOLICITED & & solicited ) ) {
cq - > notify = IB_CQ_NONE ;
cq - > triggered + + ;
/*
* This will cause send_complete ( ) to be called in
* another thread .
*/
tasklet_hi_schedule ( & cq - > comptask ) ;
}
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
if ( entry - > status ! = IB_WC_SUCCESS )
to_idev ( cq - > ibcq . device ) - > n_wqe_errs + + ;
}
/**
* ipath_poll_cq - poll for work completion entries
* @ ibcq : the completion queue to poll
* @ num_entries : the maximum number of entries to return
* @ entry : pointer to array where work completions are placed
*
* Returns the number of completion entries polled .
*
* This may be called from interrupt context . Also called by ib_poll_cq ( )
* in the generic verbs code .
*/
int ipath_poll_cq ( struct ib_cq * ibcq , int num_entries , struct ib_wc * entry )
{
struct ipath_cq * cq = to_icq ( ibcq ) ;
2006-09-22 15:22:26 -07:00
struct ipath_cq_wc * wc = cq - > queue ;
2006-03-29 15:23:36 -08:00
unsigned long flags ;
int npolled ;
spin_lock_irqsave ( & cq - > lock , flags ) ;
for ( npolled = 0 ; npolled < num_entries ; + + npolled , + + entry ) {
2006-09-22 15:22:26 -07:00
if ( wc - > tail = = wc - > head )
2006-03-29 15:23:36 -08:00
break ;
2006-09-22 15:22:26 -07:00
* entry = wc - > queue [ wc - > tail ] ;
if ( wc - > tail > = cq - > ibcq . cqe )
wc - > tail = 0 ;
2006-03-29 15:23:36 -08:00
else
2006-09-22 15:22:26 -07:00
wc - > tail + + ;
2006-03-29 15:23:36 -08:00
}
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
return npolled ;
}
static void send_complete ( unsigned long data )
{
struct ipath_cq * cq = ( struct ipath_cq * ) data ;
/*
* The completion handler will most likely rearm the notification
* and poll for all pending entries . If a new completion entry
* is added while we are in this routine , tasklet_hi_schedule ( )
* won ' t call us again until we return so we check triggered to
* see if we need to call the handler again .
*/
for ( ; ; ) {
u8 triggered = cq - > triggered ;
cq - > ibcq . comp_handler ( & cq - > ibcq , cq - > ibcq . cq_context ) ;
if ( cq - > triggered = = triggered )
return ;
}
}
/**
* ipath_create_cq - create a completion queue
* @ ibdev : the device this completion queue is attached to
* @ entries : the minimum size of the completion queue
* @ context : unused by the InfiniPath driver
* @ udata : unused by the InfiniPath driver
*
* Returns a pointer to the completion queue or negative errno values
* for failure .
*
* Called by ib_create_cq ( ) in the generic verbs code .
*/
struct ib_cq * ipath_create_cq ( struct ib_device * ibdev , int entries ,
struct ib_ucontext * context ,
struct ib_udata * udata )
{
2006-07-01 04:35:58 -07:00
struct ipath_ibdev * dev = to_idev ( ibdev ) ;
2006-03-29 15:23:36 -08:00
struct ipath_cq * cq ;
2006-09-22 15:22:26 -07:00
struct ipath_cq_wc * wc ;
2006-03-29 15:23:36 -08:00
struct ib_cq * ret ;
2006-08-25 11:24:37 -07:00
if ( entries < 1 | | entries > ib_ipath_max_cqes ) {
2006-07-01 04:35:58 -07:00
ret = ERR_PTR ( - EINVAL ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-07-01 04:35:58 -07:00
}
if ( dev - > n_cqs_allocated = = ib_ipath_max_cqs ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-07-01 04:35:58 -07:00
}
2006-09-22 15:22:26 -07:00
/* Allocate the completion queue structure. */
2006-03-29 15:23:36 -08:00
cq = kmalloc ( sizeof ( * cq ) , GFP_KERNEL ) ;
if ( ! cq ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-03-29 15:23:36 -08:00
}
/*
2006-09-22 15:22:26 -07:00
* Allocate the completion queue entries and head / tail pointers .
* This is allocated separately so that it can be resized and
* also mapped into user space .
* We need to use vmalloc ( ) in order to support mmap and large
* numbers of entries .
2006-03-29 15:23:36 -08:00
*/
2006-09-22 15:22:26 -07:00
wc = vmalloc_user ( sizeof ( * wc ) + sizeof ( struct ib_wc ) * entries ) ;
2006-03-29 15:23:36 -08:00
if ( ! wc ) {
ret = ERR_PTR ( - ENOMEM ) ;
2006-09-22 15:22:26 -07:00
goto bail_cq ;
2006-03-29 15:23:36 -08:00
}
2006-09-22 15:22:26 -07:00
/*
* Return the address of the WC as the offset to mmap .
* See ipath_mmap ( ) for details .
*/
if ( udata & & udata - > outlen > = sizeof ( __u64 ) ) {
struct ipath_mmap_info * ip ;
__u64 offset = ( __u64 ) wc ;
int err ;
err = ib_copy_to_udata ( udata , & offset , sizeof ( offset ) ) ;
if ( err ) {
ret = ERR_PTR ( err ) ;
goto bail_wc ;
}
/* Allocate info for ipath_mmap(). */
ip = kmalloc ( sizeof ( * ip ) , GFP_KERNEL ) ;
if ( ! ip ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto bail_wc ;
}
cq - > ip = ip ;
ip - > context = context ;
ip - > obj = wc ;
kref_init ( & ip - > ref ) ;
ip - > mmap_cnt = 0 ;
ip - > size = PAGE_ALIGN ( sizeof ( * wc ) +
sizeof ( struct ib_wc ) * entries ) ;
spin_lock_irq ( & dev - > pending_lock ) ;
ip - > next = dev - > pending_mmaps ;
dev - > pending_mmaps = ip ;
spin_unlock_irq ( & dev - > pending_lock ) ;
} else
cq - > ip = NULL ;
2006-03-29 15:23:36 -08:00
/*
* ib_create_cq ( ) will initialize cq - > ibcq except for cq - > ibcq . cqe .
* The number of entries should be > = the number requested or return
* an error .
*/
cq - > ibcq . cqe = entries ;
cq - > notify = IB_CQ_NONE ;
cq - > triggered = 0 ;
spin_lock_init ( & cq - > lock ) ;
tasklet_init ( & cq - > comptask , send_complete , ( unsigned long ) cq ) ;
2006-09-22 15:22:26 -07:00
wc - > head = 0 ;
wc - > tail = 0 ;
2006-03-29 15:23:36 -08:00
cq - > queue = wc ;
ret = & cq - > ibcq ;
2006-07-01 04:35:58 -07:00
dev - > n_cqs_allocated + + ;
2006-09-22 15:22:26 -07:00
goto done ;
2006-07-01 04:35:58 -07:00
2006-09-22 15:22:26 -07:00
bail_wc :
vfree ( wc ) ;
bail_cq :
kfree ( cq ) ;
done :
2006-03-29 15:23:36 -08:00
return ret ;
}
/**
* ipath_destroy_cq - destroy a completion queue
* @ ibcq : the completion queue to destroy .
*
* Returns 0 for success .
*
* Called by ib_destroy_cq ( ) in the generic verbs code .
*/
int ipath_destroy_cq ( struct ib_cq * ibcq )
{
2006-07-01 04:35:58 -07:00
struct ipath_ibdev * dev = to_idev ( ibcq - > device ) ;
2006-03-29 15:23:36 -08:00
struct ipath_cq * cq = to_icq ( ibcq ) ;
tasklet_kill ( & cq - > comptask ) ;
2006-07-01 04:35:58 -07:00
dev - > n_cqs_allocated - - ;
2006-09-22 15:22:26 -07:00
if ( cq - > ip )
kref_put ( & cq - > ip - > ref , ipath_release_mmap_info ) ;
else
vfree ( cq - > queue ) ;
2006-03-29 15:23:36 -08:00
kfree ( cq ) ;
return 0 ;
}
/**
* ipath_req_notify_cq - change the notification type for a completion queue
* @ ibcq : the completion queue
* @ notify : the type of notification to request
*
* Returns 0 for success .
*
* This may be called from interrupt context . Also called by
* ib_req_notify_cq ( ) in the generic verbs code .
*/
int ipath_req_notify_cq ( struct ib_cq * ibcq , enum ib_cq_notify notify )
{
struct ipath_cq * cq = to_icq ( ibcq ) ;
unsigned long flags ;
spin_lock_irqsave ( & cq - > lock , flags ) ;
/*
* Don ' t change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
2006-09-22 15:22:26 -07:00
* any other transitions ( see C11 - 31 and C11 - 32 in ch . 11.4 .2 .2 ) .
2006-03-29 15:23:36 -08:00
*/
if ( cq - > notify ! = IB_CQ_NEXT_COMP )
cq - > notify = notify ;
spin_unlock_irqrestore ( & cq - > lock , flags ) ;
return 0 ;
}
int ipath_resize_cq ( struct ib_cq * ibcq , int cqe , struct ib_udata * udata )
{
struct ipath_cq * cq = to_icq ( ibcq ) ;
2006-09-22 15:22:26 -07:00
struct ipath_cq_wc * old_wc = cq - > queue ;
struct ipath_cq_wc * wc ;
u32 head , tail , n ;
2006-03-29 15:23:36 -08:00
int ret ;
2006-08-25 11:24:37 -07:00
if ( cqe < 1 | | cqe > ib_ipath_max_cqes ) {
ret = - EINVAL ;
goto bail ;
}
2006-03-29 15:23:36 -08:00
/*
* Need to use vmalloc ( ) if we want to support large # s of entries .
*/
2006-09-22 15:22:26 -07:00
wc = vmalloc_user ( sizeof ( * wc ) + sizeof ( struct ib_wc ) * cqe ) ;
2006-03-29 15:23:36 -08:00
if ( ! wc ) {
ret = - ENOMEM ;
goto bail ;
}
2006-09-22 15:22:26 -07:00
/*
* Return the address of the WC as the offset to mmap .
* See ipath_mmap ( ) for details .
*/
if ( udata & & udata - > outlen > = sizeof ( __u64 ) ) {
__u64 offset = ( __u64 ) wc ;
ret = ib_copy_to_udata ( udata , & offset , sizeof ( offset ) ) ;
if ( ret )
goto bail ;
}
2006-03-29 15:23:36 -08:00
spin_lock_irq ( & cq - > lock ) ;
2006-09-22 15:22:26 -07:00
/*
* Make sure head and tail are sane since they
* might be user writable .
*/
head = old_wc - > head ;
if ( head > ( u32 ) cq - > ibcq . cqe )
head = ( u32 ) cq - > ibcq . cqe ;
tail = old_wc - > tail ;
if ( tail > ( u32 ) cq - > ibcq . cqe )
tail = ( u32 ) cq - > ibcq . cqe ;
if ( head < tail )
n = cq - > ibcq . cqe + 1 + head - tail ;
2006-03-29 15:23:36 -08:00
else
2006-09-22 15:22:26 -07:00
n = head - tail ;
2006-03-29 15:23:36 -08:00
if ( unlikely ( ( u32 ) cqe < n ) ) {
spin_unlock_irq ( & cq - > lock ) ;
vfree ( wc ) ;
ret = - EOVERFLOW ;
goto bail ;
}
2006-09-22 15:22:26 -07:00
for ( n = 0 ; tail ! = head ; n + + ) {
wc - > queue [ n ] = old_wc - > queue [ tail ] ;
if ( tail = = ( u32 ) cq - > ibcq . cqe )
tail = 0 ;
2006-03-29 15:23:36 -08:00
else
2006-09-22 15:22:26 -07:00
tail + + ;
2006-03-29 15:23:36 -08:00
}
cq - > ibcq . cqe = cqe ;
2006-09-22 15:22:26 -07:00
wc - > head = n ;
wc - > tail = 0 ;
2006-03-29 15:23:36 -08:00
cq - > queue = wc ;
spin_unlock_irq ( & cq - > lock ) ;
vfree ( old_wc ) ;
2006-09-22 15:22:26 -07:00
if ( cq - > ip ) {
struct ipath_ibdev * dev = to_idev ( ibcq - > device ) ;
struct ipath_mmap_info * ip = cq - > ip ;
ip - > obj = wc ;
ip - > size = PAGE_ALIGN ( sizeof ( * wc ) +
sizeof ( struct ib_wc ) * cqe ) ;
spin_lock_irq ( & dev - > pending_lock ) ;
ip - > next = dev - > pending_mmaps ;
dev - > pending_mmaps = ip ;
spin_unlock_irq ( & dev - > pending_lock ) ;
}
2006-03-29 15:23:36 -08:00
ret = 0 ;
bail :
return ret ;
}