2007-05-09 05:00:38 +04:00
/*
* Copyright ( c ) 2005 Mellanox Technologies . All rights reserved .
* Copyright ( c ) 2006 , 2007 Cisco Systems , Inc . All rights reserved .
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/init.h>
# include <linux/errno.h>
2007-05-15 23:36:30 +04:00
# include <linux/mm.h>
2007-08-01 13:28:20 +04:00
# include <linux/scatterlist.h>
2007-05-09 05:00:38 +04:00
# include <linux/mlx4/cmd.h>
# include "mlx4.h"
# include "icm.h"
# include "fw.h"
/*
* We allocate in as big chunks as we can , up to a maximum of 256 KB
* per chunk .
*/
enum {
MLX4_ICM_ALLOC_SIZE = 1 < < 18 ,
MLX4_TABLE_CHUNK_SIZE = 1 < < 18
} ;
2007-08-01 13:28:20 +04:00
static void mlx4_free_icm_pages ( struct mlx4_dev * dev , struct mlx4_icm_chunk * chunk )
2007-05-09 05:00:38 +04:00
{
int i ;
2007-08-01 13:28:20 +04:00
if ( chunk - > nsg > 0 )
pci_unmap_sg ( dev - > pdev , chunk - > mem , chunk - > npages ,
PCI_DMA_BIDIRECTIONAL ) ;
for ( i = 0 ; i < chunk - > npages ; + + i )
2007-10-22 23:19:53 +04:00
__free_pages ( sg_page ( & chunk - > mem [ i ] ) ,
2007-08-01 13:28:20 +04:00
get_order ( chunk - > mem [ i ] . length ) ) ;
}
2007-05-09 05:00:38 +04:00
2007-08-01 13:28:20 +04:00
static void mlx4_free_icm_coherent ( struct mlx4_dev * dev , struct mlx4_icm_chunk * chunk )
{
int i ;
for ( i = 0 ; i < chunk - > npages ; + + i )
dma_free_coherent ( & dev - > pdev - > dev , chunk - > mem [ i ] . length ,
2007-10-22 23:19:53 +04:00
lowmem_page_address ( sg_page ( & chunk - > mem [ i ] ) ) ,
2007-08-01 13:28:20 +04:00
sg_dma_address ( & chunk - > mem [ i ] ) ) ;
}
void mlx4_free_icm ( struct mlx4_dev * dev , struct mlx4_icm * icm , int coherent )
{
struct mlx4_icm_chunk * chunk , * tmp ;
if ( ! icm )
return ;
list_for_each_entry_safe ( chunk , tmp , & icm - > chunk_list , list ) {
if ( coherent )
mlx4_free_icm_coherent ( dev , chunk ) ;
else
mlx4_free_icm_pages ( dev , chunk ) ;
2007-05-09 05:00:38 +04:00
kfree ( chunk ) ;
}
kfree ( icm ) ;
}
2007-08-01 13:28:20 +04:00
static int mlx4_alloc_icm_pages ( struct scatterlist * mem , int order , gfp_t gfp_mask )
{
2007-10-22 23:19:53 +04:00
struct page * page ;
page = alloc_pages ( gfp_mask , order ) ;
if ( ! page )
2007-08-01 13:28:20 +04:00
return - ENOMEM ;
2007-10-24 13:20:47 +04:00
sg_set_page ( mem , page , PAGE_SIZE < < order , 0 ) ;
2007-08-01 13:28:20 +04:00
return 0 ;
}
static int mlx4_alloc_icm_coherent ( struct device * dev , struct scatterlist * mem ,
int order , gfp_t gfp_mask )
{
void * buf = dma_alloc_coherent ( dev , PAGE_SIZE < < order ,
& sg_dma_address ( mem ) , gfp_mask ) ;
if ( ! buf )
return - ENOMEM ;
sg_set_buf ( mem , buf , PAGE_SIZE < < order ) ;
BUG_ON ( mem - > offset ) ;
sg_dma_len ( mem ) = PAGE_SIZE < < order ;
return 0 ;
}
2007-05-09 05:00:38 +04:00
struct mlx4_icm * mlx4_alloc_icm ( struct mlx4_dev * dev , int npages ,
2007-08-01 13:28:20 +04:00
gfp_t gfp_mask , int coherent )
2007-05-09 05:00:38 +04:00
{
struct mlx4_icm * icm ;
struct mlx4_icm_chunk * chunk = NULL ;
int cur_order ;
2007-08-01 13:28:20 +04:00
int ret ;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON ( coherent & & ( gfp_mask & __GFP_HIGHMEM ) ) ;
2007-05-09 05:00:38 +04:00
icm = kmalloc ( sizeof * icm , gfp_mask & ~ ( __GFP_HIGHMEM | __GFP_NOWARN ) ) ;
if ( ! icm )
2007-08-01 13:28:20 +04:00
return NULL ;
2007-05-09 05:00:38 +04:00
icm - > refcount = 0 ;
INIT_LIST_HEAD ( & icm - > chunk_list ) ;
cur_order = get_order ( MLX4_ICM_ALLOC_SIZE ) ;
while ( npages > 0 ) {
if ( ! chunk ) {
chunk = kmalloc ( sizeof * chunk ,
gfp_mask & ~ ( __GFP_HIGHMEM | __GFP_NOWARN ) ) ;
if ( ! chunk )
goto fail ;
2007-10-22 23:19:53 +04:00
sg_init_table ( chunk - > mem , MLX4_ICM_CHUNK_LEN ) ;
2007-05-09 05:00:38 +04:00
chunk - > npages = 0 ;
chunk - > nsg = 0 ;
list_add_tail ( & chunk - > list , & icm - > chunk_list ) ;
}
while ( 1 < < cur_order > npages )
- - cur_order ;
2007-08-01 13:28:20 +04:00
if ( coherent )
ret = mlx4_alloc_icm_coherent ( & dev - > pdev - > dev ,
& chunk - > mem [ chunk - > npages ] ,
cur_order , gfp_mask ) ;
else
ret = mlx4_alloc_icm_pages ( & chunk - > mem [ chunk - > npages ] ,
cur_order , gfp_mask ) ;
if ( ! ret ) {
+ + chunk - > npages ;
2007-05-09 05:00:38 +04:00
2007-08-01 13:28:20 +04:00
if ( coherent )
+ + chunk - > nsg ;
else if ( chunk - > npages = = MLX4_ICM_CHUNK_LEN ) {
2007-05-09 05:00:38 +04:00
chunk - > nsg = pci_map_sg ( dev - > pdev , chunk - > mem ,
chunk - > npages ,
PCI_DMA_BIDIRECTIONAL ) ;
if ( chunk - > nsg < = 0 )
goto fail ;
chunk = NULL ;
}
npages - = 1 < < cur_order ;
} else {
- - cur_order ;
if ( cur_order < 0 )
goto fail ;
}
}
2007-08-01 13:28:20 +04:00
if ( ! coherent & & chunk ) {
2007-05-09 05:00:38 +04:00
chunk - > nsg = pci_map_sg ( dev - > pdev , chunk - > mem ,
chunk - > npages ,
PCI_DMA_BIDIRECTIONAL ) ;
if ( chunk - > nsg < = 0 )
goto fail ;
}
return icm ;
fail :
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , icm , coherent ) ;
2007-05-09 05:00:38 +04:00
return NULL ;
}
static int mlx4_MAP_ICM ( struct mlx4_dev * dev , struct mlx4_icm * icm , u64 virt )
{
return mlx4_map_cmd ( dev , MLX4_CMD_MAP_ICM , icm , virt ) ;
}
int mlx4_UNMAP_ICM ( struct mlx4_dev * dev , u64 virt , u32 page_count )
{
return mlx4_cmd ( dev , virt , page_count , 0 , MLX4_CMD_UNMAP_ICM ,
MLX4_CMD_TIME_CLASS_B ) ;
}
int mlx4_MAP_ICM_page ( struct mlx4_dev * dev , u64 dma_addr , u64 virt )
{
struct mlx4_cmd_mailbox * mailbox ;
__be64 * inbox ;
int err ;
mailbox = mlx4_alloc_cmd_mailbox ( dev ) ;
if ( IS_ERR ( mailbox ) )
return PTR_ERR ( mailbox ) ;
inbox = mailbox - > buf ;
inbox [ 0 ] = cpu_to_be64 ( virt ) ;
inbox [ 1 ] = cpu_to_be64 ( dma_addr ) ;
err = mlx4_cmd ( dev , mailbox - > dma , 1 , 0 , MLX4_CMD_MAP_ICM ,
MLX4_CMD_TIME_CLASS_B ) ;
mlx4_free_cmd_mailbox ( dev , mailbox ) ;
if ( ! err )
mlx4_dbg ( dev , " Mapped page at %llx to %llx for ICM. \n " ,
( unsigned long long ) dma_addr , ( unsigned long long ) virt ) ;
return err ;
}
int mlx4_MAP_ICM_AUX ( struct mlx4_dev * dev , struct mlx4_icm * icm )
{
return mlx4_map_cmd ( dev , MLX4_CMD_MAP_ICM_AUX , icm , - 1 ) ;
}
int mlx4_UNMAP_ICM_AUX ( struct mlx4_dev * dev )
{
return mlx4_cmd ( dev , 0 , 0 , 0 , MLX4_CMD_UNMAP_ICM_AUX , MLX4_CMD_TIME_CLASS_B ) ;
}
int mlx4_table_get ( struct mlx4_dev * dev , struct mlx4_icm_table * table , int obj )
{
int i = ( obj & ( table - > num_obj - 1 ) ) / ( MLX4_TABLE_CHUNK_SIZE / table - > obj_size ) ;
int ret = 0 ;
mutex_lock ( & table - > mutex ) ;
if ( table - > icm [ i ] ) {
+ + table - > icm [ i ] - > refcount ;
goto out ;
}
table - > icm [ i ] = mlx4_alloc_icm ( dev , MLX4_TABLE_CHUNK_SIZE > > PAGE_SHIFT ,
( table - > lowmem ? GFP_KERNEL : GFP_HIGHUSER ) |
2007-08-01 13:28:20 +04:00
__GFP_NOWARN , table - > coherent ) ;
2007-05-09 05:00:38 +04:00
if ( ! table - > icm [ i ] ) {
ret = - ENOMEM ;
goto out ;
}
if ( mlx4_MAP_ICM ( dev , table - > icm [ i ] , table - > virt +
( u64 ) i * MLX4_TABLE_CHUNK_SIZE ) ) {
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , table - > icm [ i ] , table - > coherent ) ;
2007-05-09 05:00:38 +04:00
table - > icm [ i ] = NULL ;
ret = - ENOMEM ;
goto out ;
}
+ + table - > icm [ i ] - > refcount ;
out :
mutex_unlock ( & table - > mutex ) ;
return ret ;
}
void mlx4_table_put ( struct mlx4_dev * dev , struct mlx4_icm_table * table , int obj )
{
int i ;
i = ( obj & ( table - > num_obj - 1 ) ) / ( MLX4_TABLE_CHUNK_SIZE / table - > obj_size ) ;
mutex_lock ( & table - > mutex ) ;
if ( - - table - > icm [ i ] - > refcount = = 0 ) {
mlx4_UNMAP_ICM ( dev , table - > virt + i * MLX4_TABLE_CHUNK_SIZE ,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE ) ;
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , table - > icm [ i ] , table - > coherent ) ;
2007-05-09 05:00:38 +04:00
table - > icm [ i ] = NULL ;
}
mutex_unlock ( & table - > mutex ) ;
}
2007-08-01 13:28:53 +04:00
void * mlx4_table_find ( struct mlx4_icm_table * table , int obj , dma_addr_t * dma_handle )
2007-05-09 05:00:38 +04:00
{
2007-08-01 13:28:53 +04:00
int idx , offset , dma_offset , i ;
2007-05-09 05:00:38 +04:00
struct mlx4_icm_chunk * chunk ;
struct mlx4_icm * icm ;
struct page * page = NULL ;
if ( ! table - > lowmem )
return NULL ;
mutex_lock ( & table - > mutex ) ;
2007-08-01 13:28:53 +04:00
idx = ( obj & ( table - > num_obj - 1 ) ) * table - > obj_size ;
icm = table - > icm [ idx / MLX4_TABLE_CHUNK_SIZE ] ;
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE ;
2007-05-09 05:00:38 +04:00
if ( ! icm )
goto out ;
list_for_each_entry ( chunk , & icm - > chunk_list , list ) {
for ( i = 0 ; i < chunk - > npages ; + + i ) {
2007-08-01 13:28:53 +04:00
if ( dma_handle & & dma_offset > = 0 ) {
if ( sg_dma_len ( & chunk - > mem [ i ] ) > dma_offset )
* dma_handle = sg_dma_address ( & chunk - > mem [ i ] ) +
dma_offset ;
dma_offset - = sg_dma_len ( & chunk - > mem [ i ] ) ;
}
/*
* DMA mapping can merge pages but not split them ,
* so if we found the page , dma_handle has already
* been assigned to .
*/
2007-05-09 05:00:38 +04:00
if ( chunk - > mem [ i ] . length > offset ) {
2007-10-22 23:19:53 +04:00
page = sg_page ( & chunk - > mem [ i ] ) ;
2007-05-09 05:00:38 +04:00
goto out ;
}
offset - = chunk - > mem [ i ] . length ;
}
}
out :
mutex_unlock ( & table - > mutex ) ;
return page ? lowmem_page_address ( page ) + offset : NULL ;
}
int mlx4_table_get_range ( struct mlx4_dev * dev , struct mlx4_icm_table * table ,
int start , int end )
{
int inc = MLX4_TABLE_CHUNK_SIZE / table - > obj_size ;
int i , err ;
for ( i = start ; i < = end ; i + = inc ) {
err = mlx4_table_get ( dev , table , i ) ;
if ( err )
goto fail ;
}
return 0 ;
fail :
while ( i > start ) {
i - = inc ;
mlx4_table_put ( dev , table , i ) ;
}
return err ;
}
void mlx4_table_put_range ( struct mlx4_dev * dev , struct mlx4_icm_table * table ,
int start , int end )
{
int i ;
for ( i = start ; i < = end ; i + = MLX4_TABLE_CHUNK_SIZE / table - > obj_size )
mlx4_table_put ( dev , table , i ) ;
}
int mlx4_init_icm_table ( struct mlx4_dev * dev , struct mlx4_icm_table * table ,
u64 virt , int obj_size , int nobj , int reserved ,
2007-08-01 13:28:20 +04:00
int use_lowmem , int use_coherent )
2007-05-09 05:00:38 +04:00
{
int obj_per_chunk ;
int num_icm ;
unsigned chunk_size ;
int i ;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size ;
num_icm = ( nobj + obj_per_chunk - 1 ) / obj_per_chunk ;
table - > icm = kcalloc ( num_icm , sizeof * table - > icm , GFP_KERNEL ) ;
if ( ! table - > icm )
return - ENOMEM ;
table - > virt = virt ;
table - > num_icm = num_icm ;
table - > num_obj = nobj ;
table - > obj_size = obj_size ;
table - > lowmem = use_lowmem ;
2007-08-01 13:28:20 +04:00
table - > coherent = use_coherent ;
2007-05-09 05:00:38 +04:00
mutex_init ( & table - > mutex ) ;
for ( i = 0 ; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size ; + + i ) {
chunk_size = MLX4_TABLE_CHUNK_SIZE ;
if ( ( i + 1 ) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size )
chunk_size = PAGE_ALIGN ( nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE ) ;
table - > icm [ i ] = mlx4_alloc_icm ( dev , chunk_size > > PAGE_SHIFT ,
( use_lowmem ? GFP_KERNEL : GFP_HIGHUSER ) |
2007-08-01 13:28:20 +04:00
__GFP_NOWARN , use_coherent ) ;
2007-05-09 05:00:38 +04:00
if ( ! table - > icm [ i ] )
goto err ;
if ( mlx4_MAP_ICM ( dev , table - > icm [ i ] , virt + i * MLX4_TABLE_CHUNK_SIZE ) ) {
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , table - > icm [ i ] , use_coherent ) ;
2007-05-09 05:00:38 +04:00
table - > icm [ i ] = NULL ;
goto err ;
}
/*
* Add a reference to this ICM chunk so that it never
* gets freed ( since it contains reserved firmware objects ) .
*/
+ + table - > icm [ i ] - > refcount ;
}
return 0 ;
err :
for ( i = 0 ; i < num_icm ; + + i )
if ( table - > icm [ i ] ) {
mlx4_UNMAP_ICM ( dev , virt + i * MLX4_TABLE_CHUNK_SIZE ,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE ) ;
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , table - > icm [ i ] , use_coherent ) ;
2007-05-09 05:00:38 +04:00
}
return - ENOMEM ;
}
void mlx4_cleanup_icm_table ( struct mlx4_dev * dev , struct mlx4_icm_table * table )
{
int i ;
for ( i = 0 ; i < table - > num_icm ; + + i )
if ( table - > icm [ i ] ) {
mlx4_UNMAP_ICM ( dev , table - > virt + i * MLX4_TABLE_CHUNK_SIZE ,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE ) ;
2007-08-01 13:28:20 +04:00
mlx4_free_icm ( dev , table - > icm [ i ] , table - > coherent ) ;
2007-05-09 05:00:38 +04:00
}
kfree ( table - > icm ) ;
}