2010-04-21 15:30:06 -07:00
/*
* Copyright ( c ) 2009 - 2010 Chelsio , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
/* Crude resource management */
# include <linux/spinlock.h>
# include <linux/genalloc.h>
2011-06-16 12:10:40 +00:00
# include <linux/ratelimit.h>
2010-04-21 15:30:06 -07:00
# include "iw_cxgb4.h"
2012-05-18 15:29:32 +05:30
static int c4iw_init_qid_table ( struct c4iw_rdev * rdev )
2010-04-21 15:30:06 -07:00
{
u32 i ;
2012-05-18 15:29:32 +05:30
if ( c4iw_id_table_alloc ( & rdev - > resource . qid_table ,
rdev - > lldi . vr - > qp . start ,
rdev - > lldi . vr - > qp . size ,
rdev - > lldi . vr - > qp . size , 0 ) )
2010-04-21 15:30:06 -07:00
return - ENOMEM ;
2010-06-23 15:46:55 +00:00
for ( i = rdev - > lldi . vr - > qp . start ;
2012-05-18 15:29:32 +05:30
i < rdev - > lldi . vr - > qp . start + rdev - > lldi . vr - > qp . size ; i + + )
2010-04-21 15:30:06 -07:00
if ( ! ( i & rdev - > qpmask ) )
2012-05-18 15:29:32 +05:30
c4iw_id_free ( & rdev - > resource . qid_table , i ) ;
2010-04-21 15:30:06 -07:00
return 0 ;
}
/* nr_* must be power of 2 */
2018-07-25 21:22:14 +05:30
int c4iw_init_resource ( struct c4iw_rdev * rdev , u32 nr_tpt ,
u32 nr_pdid , u32 nr_srqt )
2010-04-21 15:30:06 -07:00
{
int err = 0 ;
2012-05-18 15:29:32 +05:30
err = c4iw_id_table_alloc ( & rdev - > resource . tpt_table , 0 , nr_tpt , 1 ,
C4IW_ID_TABLE_F_RANDOM ) ;
2010-04-21 15:30:06 -07:00
if ( err )
goto tpt_err ;
2012-05-18 15:29:32 +05:30
err = c4iw_init_qid_table ( rdev ) ;
2010-04-21 15:30:06 -07:00
if ( err )
goto qid_err ;
2012-05-18 15:29:32 +05:30
err = c4iw_id_table_alloc ( & rdev - > resource . pdid_table , 0 ,
nr_pdid , 1 , 0 ) ;
2010-04-21 15:30:06 -07:00
if ( err )
goto pdid_err ;
2018-07-25 21:22:14 +05:30
if ( ! nr_srqt )
err = c4iw_id_table_alloc ( & rdev - > resource . srq_table , 0 ,
1 , 1 , 0 ) ;
else
err = c4iw_id_table_alloc ( & rdev - > resource . srq_table , 0 ,
nr_srqt , 0 , 0 ) ;
if ( err )
goto srq_err ;
2010-04-21 15:30:06 -07:00
return 0 ;
2018-07-25 21:22:14 +05:30
srq_err :
c4iw_id_table_free ( & rdev - > resource . pdid_table ) ;
2012-05-18 15:29:32 +05:30
pdid_err :
c4iw_id_table_free ( & rdev - > resource . qid_table ) ;
qid_err :
c4iw_id_table_free ( & rdev - > resource . tpt_table ) ;
tpt_err :
2010-04-21 15:30:06 -07:00
return - ENOMEM ;
}
/*
* returns 0 if no resource available
*/
2012-05-18 15:29:32 +05:30
u32 c4iw_get_resource ( struct c4iw_id_table * id_table )
2010-04-21 15:30:06 -07:00
{
u32 entry ;
2012-05-18 15:29:32 +05:30
entry = c4iw_id_alloc ( id_table ) ;
if ( entry = = ( u32 ) ( - 1 ) )
2010-04-21 15:30:06 -07:00
return 0 ;
2012-05-18 15:29:32 +05:30
return entry ;
2010-04-21 15:30:06 -07:00
}
2012-05-18 15:29:32 +05:30
void c4iw_put_resource ( struct c4iw_id_table * id_table , u32 entry )
2010-04-21 15:30:06 -07:00
{
2017-09-27 13:05:49 +05:30
pr_debug ( " entry 0x%x \n " , entry ) ;
2012-05-18 15:29:32 +05:30
c4iw_id_free ( id_table , entry ) ;
2010-04-21 15:30:06 -07:00
}
u32 c4iw_get_cqid ( struct c4iw_rdev * rdev , struct c4iw_dev_ucontext * uctx )
{
struct c4iw_qid_list * entry ;
u32 qid ;
int i ;
mutex_lock ( & uctx - > lock ) ;
if ( ! list_empty ( & uctx - > cqids ) ) {
entry = list_entry ( uctx - > cqids . next , struct c4iw_qid_list ,
entry ) ;
list_del ( & entry - > entry ) ;
qid = entry - > qid ;
kfree ( entry ) ;
} else {
2012-05-18 15:29:32 +05:30
qid = c4iw_get_resource ( & rdev - > resource . qid_table ) ;
2010-04-21 15:30:06 -07:00
if ( ! qid )
goto out ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . qid . cur + = rdev - > qpmask + 1 ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
for ( i = qid + 1 ; i & rdev - > qpmask ; i + + ) {
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = i ;
list_add_tail ( & entry - > entry , & uctx - > cqids ) ;
}
/*
* now put the same ids on the qp list since they all
* map to the same db / gts page .
*/
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = qid ;
list_add_tail ( & entry - > entry , & uctx - > qpids ) ;
for ( i = qid + 1 ; i & rdev - > qpmask ; i + + ) {
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = i ;
list_add_tail ( & entry - > entry , & uctx - > qpids ) ;
}
}
out :
mutex_unlock ( & uctx - > lock ) ;
2017-09-27 13:05:49 +05:30
pr_debug ( " qid 0x%x \n " , qid ) ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
if ( rdev - > stats . qid . cur > rdev - > stats . qid . max )
rdev - > stats . qid . max = rdev - > stats . qid . cur ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
return qid ;
}
void c4iw_put_cqid ( struct c4iw_rdev * rdev , u32 qid ,
struct c4iw_dev_ucontext * uctx )
{
struct c4iw_qid_list * entry ;
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
return ;
2017-09-27 13:05:49 +05:30
pr_debug ( " qid 0x%x \n " , qid ) ;
2010-04-21 15:30:06 -07:00
entry - > qid = qid ;
mutex_lock ( & uctx - > lock ) ;
list_add_tail ( & entry - > entry , & uctx - > cqids ) ;
mutex_unlock ( & uctx - > lock ) ;
}
u32 c4iw_get_qpid ( struct c4iw_rdev * rdev , struct c4iw_dev_ucontext * uctx )
{
struct c4iw_qid_list * entry ;
u32 qid ;
int i ;
mutex_lock ( & uctx - > lock ) ;
if ( ! list_empty ( & uctx - > qpids ) ) {
entry = list_entry ( uctx - > qpids . next , struct c4iw_qid_list ,
entry ) ;
list_del ( & entry - > entry ) ;
qid = entry - > qid ;
kfree ( entry ) ;
} else {
2012-05-18 15:29:32 +05:30
qid = c4iw_get_resource ( & rdev - > resource . qid_table ) ;
2014-04-09 09:38:28 -05:00
if ( ! qid ) {
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . qid . fail + + ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
goto out ;
2014-04-09 09:38:28 -05:00
}
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . qid . cur + = rdev - > qpmask + 1 ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
for ( i = qid + 1 ; i & rdev - > qpmask ; i + + ) {
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = i ;
list_add_tail ( & entry - > entry , & uctx - > qpids ) ;
}
/*
* now put the same ids on the cq list since they all
* map to the same db / gts page .
*/
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = qid ;
list_add_tail ( & entry - > entry , & uctx - > cqids ) ;
for ( i = qid ; i & rdev - > qpmask ; i + + ) {
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
goto out ;
entry - > qid = i ;
list_add_tail ( & entry - > entry , & uctx - > cqids ) ;
}
}
out :
mutex_unlock ( & uctx - > lock ) ;
2017-09-27 13:05:49 +05:30
pr_debug ( " qid 0x%x \n " , qid ) ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
if ( rdev - > stats . qid . cur > rdev - > stats . qid . max )
rdev - > stats . qid . max = rdev - > stats . qid . cur ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
return qid ;
}
void c4iw_put_qpid ( struct c4iw_rdev * rdev , u32 qid ,
struct c4iw_dev_ucontext * uctx )
{
struct c4iw_qid_list * entry ;
2019-05-20 09:54:31 +03:00
entry = kmalloc ( sizeof ( * entry ) , GFP_KERNEL ) ;
2010-04-21 15:30:06 -07:00
if ( ! entry )
return ;
2017-09-27 13:05:49 +05:30
pr_debug ( " qid 0x%x \n " , qid ) ;
2010-04-21 15:30:06 -07:00
entry - > qid = qid ;
mutex_lock ( & uctx - > lock ) ;
list_add_tail ( & entry - > entry , & uctx - > qpids ) ;
mutex_unlock ( & uctx - > lock ) ;
}
void c4iw_destroy_resource ( struct c4iw_resource * rscp )
{
2012-05-18 15:29:32 +05:30
c4iw_id_table_free ( & rscp - > tpt_table ) ;
c4iw_id_table_free ( & rscp - > qid_table ) ;
c4iw_id_table_free ( & rscp - > pdid_table ) ;
2010-04-21 15:30:06 -07:00
}
/*
* PBL Memory Manager . Uses Linux generic allocator .
*/
# define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
u32 c4iw_pblpool_alloc ( struct c4iw_rdev * rdev , int size )
{
unsigned long addr = gen_pool_alloc ( rdev - > pbl_pool , size ) ;
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , ( u32 ) addr , size ) ;
2012-05-18 15:29:32 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
2012-05-18 15:29:27 +05:30
if ( addr ) {
rdev - > stats . pbl . cur + = roundup ( size , 1 < < MIN_PBL_SHIFT ) ;
if ( rdev - > stats . pbl . cur > rdev - > stats . pbl . max )
rdev - > stats . pbl . max = rdev - > stats . pbl . cur ;
2018-04-23 21:42:37 +05:30
kref_get ( & rdev - > pbl_kref ) ;
2012-05-18 15:29:32 +05:30
} else
rdev - > stats . pbl . fail + + ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
return ( u32 ) addr ;
}
2018-04-23 21:42:37 +05:30
static void destroy_pblpool ( struct kref * kref )
{
struct c4iw_rdev * rdev ;
rdev = container_of ( kref , struct c4iw_rdev , pbl_kref ) ;
gen_pool_destroy ( rdev - > pbl_pool ) ;
complete ( & rdev - > pbl_compl ) ;
}
2010-04-21 15:30:06 -07:00
void c4iw_pblpool_free ( struct c4iw_rdev * rdev , u32 addr , int size )
{
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , addr , size ) ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . pbl . cur - = roundup ( size , 1 < < MIN_PBL_SHIFT ) ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
gen_pool_free ( rdev - > pbl_pool , ( unsigned long ) addr , size ) ;
2018-04-23 21:42:37 +05:30
kref_put ( & rdev - > pbl_kref , destroy_pblpool ) ;
2010-04-21 15:30:06 -07:00
}
int c4iw_pblpool_create ( struct c4iw_rdev * rdev )
{
unsigned pbl_start , pbl_chunk , pbl_top ;
rdev - > pbl_pool = gen_pool_create ( MIN_PBL_SHIFT , - 1 ) ;
if ( ! rdev - > pbl_pool )
return - ENOMEM ;
pbl_start = rdev - > lldi . vr - > pbl . start ;
pbl_chunk = rdev - > lldi . vr - > pbl . size ;
pbl_top = pbl_start + pbl_chunk ;
while ( pbl_start < pbl_top ) {
pbl_chunk = min ( pbl_top - pbl_start + 1 , pbl_chunk ) ;
if ( gen_pool_add ( rdev - > pbl_pool , pbl_start , pbl_chunk , - 1 ) ) {
2017-09-27 13:05:49 +05:30
pr_debug ( " failed to add PBL chunk (%x/%x) \n " ,
pbl_start , pbl_chunk ) ;
2010-04-21 15:30:06 -07:00
if ( pbl_chunk < = 1024 < < MIN_PBL_SHIFT ) {
2017-02-09 14:23:50 -08:00
pr_warn ( " Failed to add all PBL chunks (%x/%x) \n " ,
pbl_start , pbl_top - pbl_start ) ;
2010-04-21 15:30:06 -07:00
return 0 ;
}
pbl_chunk > > = 1 ;
} else {
2017-09-27 13:05:49 +05:30
pr_debug ( " added PBL chunk (%x/%x) \n " ,
pbl_start , pbl_chunk ) ;
2010-04-21 15:30:06 -07:00
pbl_start + = pbl_chunk ;
}
}
return 0 ;
}
void c4iw_pblpool_destroy ( struct c4iw_rdev * rdev )
{
2018-04-23 21:42:37 +05:30
kref_put ( & rdev - > pbl_kref , destroy_pblpool ) ;
2010-04-21 15:30:06 -07:00
}
/*
* RQT Memory Manager . Uses Linux generic allocator .
*/
# define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
u32 c4iw_rqtpool_alloc ( struct c4iw_rdev * rdev , int size )
{
unsigned long addr = gen_pool_alloc ( rdev - > rqt_pool , size < < 6 ) ;
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , ( u32 ) addr , size < < 6 ) ;
2011-06-16 12:10:40 +00:00
if ( ! addr )
2017-02-09 14:23:50 -08:00
pr_warn_ratelimited ( " %s: Out of RQT memory \n " ,
2014-04-09 09:38:27 -05:00
pci_name ( rdev - > lldi . pdev ) ) ;
2012-05-18 15:29:32 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
2012-05-18 15:29:27 +05:30
if ( addr ) {
rdev - > stats . rqt . cur + = roundup ( size < < 6 , 1 < < MIN_RQT_SHIFT ) ;
if ( rdev - > stats . rqt . cur > rdev - > stats . rqt . max )
rdev - > stats . rqt . max = rdev - > stats . rqt . cur ;
2018-04-23 21:42:37 +05:30
kref_get ( & rdev - > rqt_kref ) ;
2012-05-18 15:29:32 +05:30
} else
rdev - > stats . rqt . fail + + ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
return ( u32 ) addr ;
}
2018-04-23 21:42:37 +05:30
static void destroy_rqtpool ( struct kref * kref )
{
struct c4iw_rdev * rdev ;
rdev = container_of ( kref , struct c4iw_rdev , rqt_kref ) ;
gen_pool_destroy ( rdev - > rqt_pool ) ;
complete ( & rdev - > rqt_compl ) ;
}
2010-04-21 15:30:06 -07:00
void c4iw_rqtpool_free ( struct c4iw_rdev * rdev , u32 addr , int size )
{
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , addr , size < < 6 ) ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . rqt . cur - = roundup ( size < < 6 , 1 < < MIN_RQT_SHIFT ) ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-04-21 15:30:06 -07:00
gen_pool_free ( rdev - > rqt_pool , ( unsigned long ) addr , size < < 6 ) ;
2018-04-23 21:42:37 +05:30
kref_put ( & rdev - > rqt_kref , destroy_rqtpool ) ;
2010-04-21 15:30:06 -07:00
}
int c4iw_rqtpool_create ( struct c4iw_rdev * rdev )
{
unsigned rqt_start , rqt_chunk , rqt_top ;
2018-07-25 21:22:14 +05:30
int skip = 0 ;
2010-04-21 15:30:06 -07:00
rdev - > rqt_pool = gen_pool_create ( MIN_RQT_SHIFT , - 1 ) ;
if ( ! rdev - > rqt_pool )
return - ENOMEM ;
2018-07-25 21:22:14 +05:30
/*
* If SRQs are supported , then never use the first RQE from
* the RQT region . This is because HW uses RQT index 0 as NULL .
*/
if ( rdev - > lldi . vr - > srq . size )
skip = T4_RQT_ENTRY_SIZE ;
rqt_start = rdev - > lldi . vr - > rq . start + skip ;
rqt_chunk = rdev - > lldi . vr - > rq . size - skip ;
2010-04-21 15:30:06 -07:00
rqt_top = rqt_start + rqt_chunk ;
while ( rqt_start < rqt_top ) {
rqt_chunk = min ( rqt_top - rqt_start + 1 , rqt_chunk ) ;
if ( gen_pool_add ( rdev - > rqt_pool , rqt_start , rqt_chunk , - 1 ) ) {
2017-09-27 13:05:49 +05:30
pr_debug ( " failed to add RQT chunk (%x/%x) \n " ,
rqt_start , rqt_chunk ) ;
2010-04-21 15:30:06 -07:00
if ( rqt_chunk < = 1024 < < MIN_RQT_SHIFT ) {
2017-02-09 14:23:50 -08:00
pr_warn ( " Failed to add all RQT chunks (%x/%x) \n " ,
rqt_start , rqt_top - rqt_start ) ;
2010-04-21 15:30:06 -07:00
return 0 ;
}
rqt_chunk > > = 1 ;
} else {
2017-09-27 13:05:49 +05:30
pr_debug ( " added RQT chunk (%x/%x) \n " ,
rqt_start , rqt_chunk ) ;
2010-04-21 15:30:06 -07:00
rqt_start + = rqt_chunk ;
}
}
return 0 ;
}
void c4iw_rqtpool_destroy ( struct c4iw_rdev * rdev )
{
2018-04-23 21:42:37 +05:30
kref_put ( & rdev - > rqt_kref , destroy_rqtpool ) ;
2010-04-21 15:30:06 -07:00
}
2010-09-13 11:23:57 -05:00
2018-07-25 21:22:14 +05:30
int c4iw_alloc_srq_idx ( struct c4iw_rdev * rdev )
{
int idx ;
idx = c4iw_id_alloc ( & rdev - > resource . srq_table ) ;
mutex_lock ( & rdev - > stats . lock ) ;
if ( idx = = - 1 ) {
rdev - > stats . srqt . fail + + ;
mutex_unlock ( & rdev - > stats . lock ) ;
return - ENOMEM ;
}
rdev - > stats . srqt . cur + + ;
if ( rdev - > stats . srqt . cur > rdev - > stats . srqt . max )
rdev - > stats . srqt . max = rdev - > stats . srqt . cur ;
mutex_unlock ( & rdev - > stats . lock ) ;
return idx ;
}
void c4iw_free_srq_idx ( struct c4iw_rdev * rdev , int idx )
{
c4iw_id_free ( & rdev - > resource . srq_table , idx ) ;
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . srqt . cur - - ;
mutex_unlock ( & rdev - > stats . lock ) ;
}
2010-09-13 11:23:57 -05:00
/*
* On - Chip QP Memory .
*/
# define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
u32 c4iw_ocqp_pool_alloc ( struct c4iw_rdev * rdev , int size )
{
unsigned long addr = gen_pool_alloc ( rdev - > ocqp_pool , size ) ;
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , ( u32 ) addr , size ) ;
2012-05-18 15:29:27 +05:30
if ( addr ) {
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . ocqp . cur + = roundup ( size , 1 < < MIN_OCQP_SHIFT ) ;
if ( rdev - > stats . ocqp . cur > rdev - > stats . ocqp . max )
rdev - > stats . ocqp . max = rdev - > stats . ocqp . cur ;
mutex_unlock ( & rdev - > stats . lock ) ;
}
2010-09-13 11:23:57 -05:00
return ( u32 ) addr ;
}
void c4iw_ocqp_pool_free ( struct c4iw_rdev * rdev , u32 addr , int size )
{
2017-09-27 13:05:49 +05:30
pr_debug ( " addr 0x%x size %d \n " , addr , size ) ;
2012-05-18 15:29:27 +05:30
mutex_lock ( & rdev - > stats . lock ) ;
rdev - > stats . ocqp . cur - = roundup ( size , 1 < < MIN_OCQP_SHIFT ) ;
mutex_unlock ( & rdev - > stats . lock ) ;
2010-09-13 11:23:57 -05:00
gen_pool_free ( rdev - > ocqp_pool , ( unsigned long ) addr , size ) ;
}
int c4iw_ocqp_pool_create ( struct c4iw_rdev * rdev )
{
unsigned start , chunk , top ;
rdev - > ocqp_pool = gen_pool_create ( MIN_OCQP_SHIFT , - 1 ) ;
if ( ! rdev - > ocqp_pool )
return - ENOMEM ;
start = rdev - > lldi . vr - > ocq . start ;
chunk = rdev - > lldi . vr - > ocq . size ;
top = start + chunk ;
while ( start < top ) {
chunk = min ( top - start + 1 , chunk ) ;
if ( gen_pool_add ( rdev - > ocqp_pool , start , chunk , - 1 ) ) {
2017-09-27 13:05:49 +05:30
pr_debug ( " failed to add OCQP chunk (%x/%x) \n " ,
start , chunk ) ;
2010-09-13 11:23:57 -05:00
if ( chunk < = 1024 < < MIN_OCQP_SHIFT ) {
2017-02-09 14:23:50 -08:00
pr_warn ( " Failed to add all OCQP chunks (%x/%x) \n " ,
start , top - start ) ;
2010-09-13 11:23:57 -05:00
return 0 ;
}
chunk > > = 1 ;
} else {
2017-09-27 13:05:49 +05:30
pr_debug ( " added OCQP chunk (%x/%x) \n " ,
start , chunk ) ;
2010-09-13 11:23:57 -05:00
start + = chunk ;
}
}
return 0 ;
}
void c4iw_ocqp_pool_destroy ( struct c4iw_rdev * rdev )
{
gen_pool_destroy ( rdev - > ocqp_pool ) ;
}