2005-04-16 15:20:36 -07:00
/*
* Copyright ( c ) 2004 Topspin Communications . All rights reserved .
2005-08-10 23:03:10 -07:00
* Copyright ( c ) 2005 Mellanox Technologies . All rights reserved .
2005-04-16 15:20:36 -07:00
*
* This software is available to you under a choice of one of two
* licenses . You may choose to be licensed under the terms of the GNU
* General Public License ( GPL ) Version 2 , available from the file
* COPYING in the main directory of this source tree , or the
* OpenIB . org BSD license below :
*
* Redistribution and use in source and binary forms , with or
* without modification , are permitted provided that the following
* conditions are met :
*
* - Redistributions of source code must retain the above
* copyright notice , this list of conditions and the following
* disclaimer .
*
* - Redistributions in binary form must reproduce the above
* copyright notice , this list of conditions and the following
* disclaimer in the documentation and / or other materials
* provided with the distribution .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT . IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN
* ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*/
# include <linux/slab.h>
# include <linux/errno.h>
# include "mthca_dev.h"
# include "mthca_cmd.h"
2005-04-16 15:26:13 -07:00
# include "mthca_memfree.h"
2005-04-16 15:20:36 -07:00
2005-06-27 14:36:43 -07:00
struct mthca_mtt {
struct mthca_buddy * buddy ;
int order ;
u32 first_seg ;
} ;
2005-04-16 15:20:36 -07:00
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits .
*/
struct mthca_mpt_entry {
2005-08-13 21:05:57 -07:00
__be32 flags ;
__be32 page_size ;
__be32 key ;
__be32 pd ;
__be64 start ;
__be64 length ;
__be32 lkey ;
__be32 window_count ;
__be32 window_count_limit ;
__be64 mtt_seg ;
__be32 mtt_sz ; /* Arbel only */
u32 reserved [ 2 ] ;
2005-04-16 15:20:36 -07:00
} __attribute__ ( ( packed ) ) ;
# define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
# define MTHCA_MPT_FLAG_MIO (1 << 17)
# define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
# define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
# define MTHCA_MPT_FLAG_REGION (1 << 8)
# define MTHCA_MTT_FLAG_PRESENT 1
2005-04-16 15:26:30 -07:00
# define MTHCA_MPT_STATUS_SW 0xF0
# define MTHCA_MPT_STATUS_HW 0x00
2006-03-02 12:40:46 -08:00
# define SINAI_FMR_KEY_INC 0x1000000
2005-04-16 15:20:36 -07:00
/*
* Buddy allocator for MTT segments ( currently not very efficient
* since it doesn ' t keep a free list and just searches linearly
* through the bitmaps )
*/
2005-04-16 15:26:26 -07:00
static u32 mthca_buddy_alloc ( struct mthca_buddy * buddy , int order )
2005-04-16 15:20:36 -07:00
{
int o ;
int m ;
u32 seg ;
2005-04-16 15:26:26 -07:00
spin_lock ( & buddy - > lock ) ;
2005-04-16 15:20:36 -07:00
2008-07-22 14:20:05 -07:00
for ( o = order ; o < = buddy - > max_order ; + + o )
if ( buddy - > num_free [ o ] ) {
m = 1 < < ( buddy - > max_order - o ) ;
seg = find_first_bit ( buddy - > bits [ o ] , m ) ;
if ( seg < m )
goto found ;
}
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:26 -07:00
spin_unlock ( & buddy - > lock ) ;
2005-04-16 15:20:36 -07:00
return - 1 ;
found :
2005-04-16 15:26:26 -07:00
clear_bit ( seg , buddy - > bits [ o ] ) ;
2008-07-22 14:20:05 -07:00
- - buddy - > num_free [ o ] ;
2005-04-16 15:20:36 -07:00
while ( o > order ) {
- - o ;
seg < < = 1 ;
2005-04-16 15:26:26 -07:00
set_bit ( seg ^ 1 , buddy - > bits [ o ] ) ;
2008-07-22 14:20:05 -07:00
+ + buddy - > num_free [ o ] ;
2005-04-16 15:20:36 -07:00
}
2005-04-16 15:26:26 -07:00
spin_unlock ( & buddy - > lock ) ;
2005-04-16 15:20:36 -07:00
seg < < = order ;
return seg ;
}
2005-04-16 15:26:26 -07:00
static void mthca_buddy_free ( struct mthca_buddy * buddy , u32 seg , int order )
2005-04-16 15:20:36 -07:00
{
seg > > = order ;
2005-04-16 15:26:26 -07:00
spin_lock ( & buddy - > lock ) ;
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:26 -07:00
while ( test_bit ( seg ^ 1 , buddy - > bits [ order ] ) ) {
clear_bit ( seg ^ 1 , buddy - > bits [ order ] ) ;
2008-07-22 14:20:05 -07:00
- - buddy - > num_free [ order ] ;
2005-04-16 15:20:36 -07:00
seg > > = 1 ;
+ + order ;
}
2005-04-16 15:26:26 -07:00
set_bit ( seg , buddy - > bits [ order ] ) ;
2008-07-22 14:20:05 -07:00
+ + buddy - > num_free [ order ] ;
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:26 -07:00
spin_unlock ( & buddy - > lock ) ;
2005-04-16 15:20:36 -07:00
}
2006-11-29 15:33:06 -08:00
static int mthca_buddy_init ( struct mthca_buddy * buddy , int max_order )
2005-04-16 15:26:13 -07:00
{
2005-04-16 15:26:26 -07:00
int i , s ;
buddy - > max_order = max_order ;
spin_lock_init ( & buddy - > lock ) ;
2005-11-02 07:23:14 -08:00
buddy - > bits = kzalloc ( ( buddy - > max_order + 1 ) * sizeof ( long * ) ,
2005-04-16 15:26:26 -07:00
GFP_KERNEL ) ;
2011-11-03 17:48:25 -07:00
buddy - > num_free = kcalloc ( ( buddy - > max_order + 1 ) , sizeof * buddy - > num_free ,
2008-07-22 14:20:05 -07:00
GFP_KERNEL ) ;
if ( ! buddy - > bits | | ! buddy - > num_free )
2005-04-16 15:26:26 -07:00
goto err_out ;
for ( i = 0 ; i < = buddy - > max_order ; + + i ) {
s = BITS_TO_LONGS ( 1 < < ( buddy - > max_order - i ) ) ;
buddy - > bits [ i ] = kmalloc ( s * sizeof ( long ) , GFP_KERNEL ) ;
if ( ! buddy - > bits [ i ] )
goto err_out_free ;
bitmap_zero ( buddy - > bits [ i ] ,
1 < < ( buddy - > max_order - i ) ) ;
}
set_bit ( 0 , buddy - > bits [ buddy - > max_order ] ) ;
2008-07-22 14:20:05 -07:00
buddy - > num_free [ buddy - > max_order ] = 1 ;
2005-04-16 15:26:26 -07:00
return 0 ;
err_out_free :
for ( i = 0 ; i < = buddy - > max_order ; + + i )
kfree ( buddy - > bits [ i ] ) ;
2008-07-22 14:20:05 -07:00
err_out :
2005-04-16 15:26:26 -07:00
kfree ( buddy - > bits ) ;
2008-07-22 14:20:05 -07:00
kfree ( buddy - > num_free ) ;
2005-04-16 15:26:26 -07:00
return - ENOMEM ;
}
2006-03-29 09:36:46 -08:00
static void mthca_buddy_cleanup ( struct mthca_buddy * buddy )
2005-04-16 15:26:26 -07:00
{
int i ;
for ( i = 0 ; i < = buddy - > max_order ; + + i )
kfree ( buddy - > bits [ i ] ) ;
kfree ( buddy - > bits ) ;
2008-07-22 14:20:05 -07:00
kfree ( buddy - > num_free ) ;
2005-04-16 15:26:26 -07:00
}
2005-06-27 14:36:43 -07:00
static u32 mthca_alloc_mtt_range ( struct mthca_dev * dev , int order ,
struct mthca_buddy * buddy )
2005-04-16 15:26:26 -07:00
{
u32 seg = mthca_buddy_alloc ( buddy , order ) ;
2005-04-16 15:26:13 -07:00
if ( seg = = - 1 )
return - 1 ;
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) )
2005-04-16 15:26:13 -07:00
if ( mthca_table_get_range ( dev , dev - > mr_table . mtt_table , seg ,
seg + ( 1 < < order ) - 1 ) ) {
2005-04-16 15:26:26 -07:00
mthca_buddy_free ( buddy , seg , order ) ;
2005-04-16 15:26:13 -07:00
seg = - 1 ;
}
return seg ;
}
2005-06-27 14:36:43 -07:00
static struct mthca_mtt * __mthca_alloc_mtt ( struct mthca_dev * dev , int size ,
struct mthca_buddy * buddy )
{
struct mthca_mtt * mtt ;
int i ;
if ( size < = 0 )
return ERR_PTR ( - EINVAL ) ;
mtt = kmalloc ( sizeof * mtt , GFP_KERNEL ) ;
if ( ! mtt )
return ERR_PTR ( - ENOMEM ) ;
mtt - > buddy = buddy ;
mtt - > order = 0 ;
2009-05-27 14:36:16 -07:00
for ( i = dev - > limits . mtt_seg_size / 8 ; i < size ; i < < = 1 )
2005-06-27 14:36:43 -07:00
+ + mtt - > order ;
mtt - > first_seg = mthca_alloc_mtt_range ( dev , mtt - > order , buddy ) ;
if ( mtt - > first_seg = = - 1 ) {
kfree ( mtt ) ;
return ERR_PTR ( - ENOMEM ) ;
}
return mtt ;
}
struct mthca_mtt * mthca_alloc_mtt ( struct mthca_dev * dev , int size )
{
return __mthca_alloc_mtt ( dev , size , & dev - > mr_table . mtt_buddy ) ;
}
void mthca_free_mtt ( struct mthca_dev * dev , struct mthca_mtt * mtt )
{
if ( ! mtt )
return ;
mthca_buddy_free ( mtt - > buddy , mtt - > first_seg , mtt - > order ) ;
mthca_table_put_range ( dev , dev - > mr_table . mtt_table ,
mtt - > first_seg ,
mtt - > first_seg + ( 1 < < mtt - > order ) - 1 ) ;
kfree ( mtt ) ;
}
2007-02-10 23:14:25 +02:00
static int __mthca_write_mtt ( struct mthca_dev * dev , struct mthca_mtt * mtt ,
int start_index , u64 * buffer_list , int list_len )
2005-04-16 15:26:13 -07:00
{
2005-06-27 14:36:45 -07:00
struct mthca_mailbox * mailbox ;
2005-08-13 21:05:57 -07:00
__be64 * mtt_entry ;
2005-06-27 14:36:43 -07:00
int err = 0 ;
int i ;
2005-06-27 14:36:45 -07:00
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) )
return PTR_ERR ( mailbox ) ;
mtt_entry = mailbox - > buf ;
2005-06-27 14:36:43 -07:00
while ( list_len > 0 ) {
mtt_entry [ 0 ] = cpu_to_be64 ( dev - > mr_table . mtt_base +
2009-05-27 14:36:16 -07:00
mtt - > first_seg * dev - > limits . mtt_seg_size +
2005-06-27 14:36:43 -07:00
start_index * 8 ) ;
mtt_entry [ 1 ] = 0 ;
2005-06-27 14:36:45 -07:00
for ( i = 0 ; i < list_len & & i < MTHCA_MAILBOX_SIZE / 8 - 2 ; + + i )
2005-06-27 14:36:43 -07:00
mtt_entry [ i + 2 ] = cpu_to_be64 ( buffer_list [ i ] |
MTHCA_MTT_FLAG_PRESENT ) ;
/*
* If we have an odd number of entries to write , add
* one more dummy entry for firmware efficiency .
*/
if ( i & 1 )
mtt_entry [ i + 2 ] = 0 ;
2011-07-07 17:20:40 +00:00
err = mthca_WRITE_MTT ( dev , mailbox , ( i + 1 ) & ~ 1 ) ;
2005-06-27 14:36:43 -07:00
if ( err ) {
mthca_warn ( dev , " WRITE_MTT failed (%d) \n " , err ) ;
goto out ;
}
list_len - = i ;
start_index + = i ;
buffer_list + = i ;
}
out :
2005-06-27 14:36:45 -07:00
mthca_free_mailbox ( dev , mailbox ) ;
2005-06-27 14:36:43 -07:00
return err ;
2005-04-16 15:26:13 -07:00
}
2007-02-10 23:14:25 +02:00
int mthca_write_mtt_size ( struct mthca_dev * dev )
{
2007-04-24 16:31:04 -07:00
if ( dev - > mr_table . fmr_mtt_buddy ! = & dev - > mr_table . mtt_buddy | |
! ( dev - > mthca_flags & MTHCA_FLAG_FMR ) )
2007-02-10 23:14:25 +02:00
/*
* Be friendly to WRITE_MTT command
* and leave two empty slots for the
* index and reserved fields of the
* mailbox .
*/
return PAGE_SIZE / sizeof ( u64 ) - 2 ;
/* For Arbel, all MTTs must fit in the same page. */
return mthca_is_memfree ( dev ) ? ( PAGE_SIZE / sizeof ( u64 ) ) : 0x7ffffff ;
}
2007-02-20 01:02:13 +01:00
static void mthca_tavor_write_mtt_seg ( struct mthca_dev * dev ,
struct mthca_mtt * mtt , int start_index ,
u64 * buffer_list , int list_len )
2007-02-10 23:14:25 +02:00
{
u64 __iomem * mtts ;
int i ;
2009-05-27 14:36:16 -07:00
mtts = dev - > mr_table . tavor_fmr . mtt_base + mtt - > first_seg * dev - > limits . mtt_seg_size +
2007-02-10 23:14:25 +02:00
start_index * sizeof ( u64 ) ;
for ( i = 0 ; i < list_len ; + + i )
mthca_write64_raw ( cpu_to_be64 ( buffer_list [ i ] | MTHCA_MTT_FLAG_PRESENT ) ,
mtts + i ) ;
}
2007-02-20 01:02:13 +01:00
static void mthca_arbel_write_mtt_seg ( struct mthca_dev * dev ,
struct mthca_mtt * mtt , int start_index ,
u64 * buffer_list , int list_len )
2007-02-10 23:14:25 +02:00
{
__be64 * mtts ;
dma_addr_t dma_handle ;
int i ;
int s = start_index * sizeof ( u64 ) ;
/* For Arbel, all MTTs must fit in the same page. */
BUG_ON ( s / PAGE_SIZE ! = ( s + list_len * sizeof ( u64 ) - 1 ) / PAGE_SIZE ) ;
/* Require full segments */
2009-05-27 14:36:16 -07:00
BUG_ON ( s % dev - > limits . mtt_seg_size ) ;
2007-02-10 23:14:25 +02:00
mtts = mthca_table_find ( dev - > mr_table . mtt_table , mtt - > first_seg +
2009-05-27 14:36:16 -07:00
s / dev - > limits . mtt_seg_size , & dma_handle ) ;
2007-02-10 23:14:25 +02:00
BUG_ON ( ! mtts ) ;
2009-06-22 23:04:13 -07:00
dma_sync_single_for_cpu ( & dev - > pdev - > dev , dma_handle ,
list_len * sizeof ( u64 ) , DMA_TO_DEVICE ) ;
2007-02-10 23:14:25 +02:00
for ( i = 0 ; i < list_len ; + + i )
mtts [ i ] = cpu_to_be64 ( buffer_list [ i ] | MTHCA_MTT_FLAG_PRESENT ) ;
2009-06-22 23:04:13 -07:00
dma_sync_single_for_device ( & dev - > pdev - > dev , dma_handle ,
list_len * sizeof ( u64 ) , DMA_TO_DEVICE ) ;
2007-02-10 23:14:25 +02:00
}
int mthca_write_mtt ( struct mthca_dev * dev , struct mthca_mtt * mtt ,
int start_index , u64 * buffer_list , int list_len )
{
int size = mthca_write_mtt_size ( dev ) ;
int chunk ;
2007-04-24 16:31:04 -07:00
if ( dev - > mr_table . fmr_mtt_buddy ! = & dev - > mr_table . mtt_buddy | |
! ( dev - > mthca_flags & MTHCA_FLAG_FMR ) )
2007-02-10 23:14:25 +02:00
return __mthca_write_mtt ( dev , mtt , start_index , buffer_list , list_len ) ;
while ( list_len > 0 ) {
chunk = min ( size , list_len ) ;
if ( mthca_is_memfree ( dev ) )
mthca_arbel_write_mtt_seg ( dev , mtt , start_index ,
buffer_list , chunk ) ;
else
mthca_tavor_write_mtt_seg ( dev , mtt , start_index ,
buffer_list , chunk ) ;
list_len - = chunk ;
start_index + = chunk ;
buffer_list + = chunk ;
}
return 0 ;
}
2005-04-16 15:26:30 -07:00
static inline u32 tavor_hw_index_to_key ( u32 ind )
{
return ind ;
}
static inline u32 tavor_key_to_hw_index ( u32 key )
{
return key ;
}
static inline u32 arbel_hw_index_to_key ( u32 ind )
{
return ( ind > > 24 ) | ( ind < < 8 ) ;
}
static inline u32 arbel_key_to_hw_index ( u32 key )
{
return ( key < < 24 ) | ( key > > 8 ) ;
}
2005-04-16 15:20:36 -07:00
static inline u32 hw_index_to_key ( struct mthca_dev * dev , u32 ind )
{
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) )
2005-04-16 15:26:30 -07:00
return arbel_hw_index_to_key ( ind ) ;
2005-04-16 15:20:36 -07:00
else
2005-04-16 15:26:30 -07:00
return tavor_hw_index_to_key ( ind ) ;
2005-04-16 15:20:36 -07:00
}
static inline u32 key_to_hw_index ( struct mthca_dev * dev , u32 key )
{
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) )
2005-04-16 15:26:30 -07:00
return arbel_key_to_hw_index ( key ) ;
2005-04-16 15:20:36 -07:00
else
2005-04-16 15:26:30 -07:00
return tavor_key_to_hw_index ( key ) ;
2005-04-16 15:20:36 -07:00
}
2006-03-02 12:40:46 -08:00
static inline u32 adjust_key ( struct mthca_dev * dev , u32 key )
{
if ( dev - > mthca_flags & MTHCA_FLAG_SINAI_OPT )
return ( ( key < < 20 ) & 0x800000 ) | ( key & 0x7fffff ) ;
else
return key ;
}
2005-06-27 14:36:43 -07:00
int mthca_mr_alloc ( struct mthca_dev * dev , u32 pd , int buffer_size_shift ,
u64 iova , u64 total_size , u32 access , struct mthca_mr * mr )
2005-04-16 15:20:36 -07:00
{
2005-06-27 14:36:45 -07:00
struct mthca_mailbox * mailbox ;
2005-04-16 15:20:36 -07:00
struct mthca_mpt_entry * mpt_entry ;
u32 key ;
2005-06-27 14:36:43 -07:00
int i ;
2005-04-16 15:20:36 -07:00
int err ;
2005-06-27 14:36:43 -07:00
WARN_ON ( buffer_size_shift > = 32 ) ;
2005-04-16 15:20:36 -07:00
key = mthca_alloc ( & dev - > mr_table . mpt_alloc ) ;
if ( key = = - 1 )
return - ENOMEM ;
2006-03-02 12:40:46 -08:00
key = adjust_key ( dev , key ) ;
2005-04-16 15:20:36 -07:00
mr - > ibmr . rkey = mr - > ibmr . lkey = hw_index_to_key ( dev , key ) ;
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:26:13 -07:00
err = mthca_table_get ( dev , dev - > mr_table . mpt_table , key ) ;
if ( err )
goto err_out_mpt_free ;
}
2005-06-27 14:36:45 -07:00
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
if ( IS_ERR ( mailbox ) ) {
err = PTR_ERR ( mailbox ) ;
2005-04-16 15:26:13 -07:00
goto err_out_table ;
2005-04-16 15:20:36 -07:00
}
2005-06-27 14:36:45 -07:00
mpt_entry = mailbox - > buf ;
2005-04-16 15:20:36 -07:00
mpt_entry - > flags = cpu_to_be32 ( MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access ) ;
2005-06-27 14:36:43 -07:00
if ( ! mr - > mtt )
mpt_entry - > flags | = cpu_to_be32 ( MTHCA_MPT_FLAG_PHYSICAL ) ;
mpt_entry - > page_size = cpu_to_be32 ( buffer_size_shift - 12 ) ;
2005-04-16 15:20:36 -07:00
mpt_entry - > key = cpu_to_be32 ( key ) ;
mpt_entry - > pd = cpu_to_be32 ( pd ) ;
2005-06-27 14:36:43 -07:00
mpt_entry - > start = cpu_to_be64 ( iova ) ;
mpt_entry - > length = cpu_to_be64 ( total_size ) ;
2005-04-16 15:20:36 -07:00
memset ( & mpt_entry - > lkey , 0 ,
sizeof * mpt_entry - offsetof ( struct mthca_mpt_entry , lkey ) ) ;
2005-06-27 14:36:43 -07:00
if ( mr - > mtt )
mpt_entry - > mtt_seg =
cpu_to_be64 ( dev - > mr_table . mtt_base +
2009-05-27 14:36:16 -07:00
mr - > mtt - > first_seg * dev - > limits . mtt_seg_size ) ;
2005-06-27 14:36:43 -07:00
if ( 0 ) {
mthca_dbg ( dev , " Dumping MPT entry %08x: \n " , mr - > ibmr . lkey ) ;
for ( i = 0 ; i < sizeof ( struct mthca_mpt_entry ) / 4 ; + + i ) {
if ( i % 4 = = 0 )
printk ( " [%02x] " , i * 4 ) ;
2005-08-13 21:05:57 -07:00
printk ( " %08x " , be32_to_cpu ( ( ( __be32 * ) mpt_entry ) [ i ] ) ) ;
2005-06-27 14:36:43 -07:00
if ( ( i + 1 ) % 4 = = 0 )
printk ( " \n " ) ;
}
}
2005-06-27 14:36:45 -07:00
err = mthca_SW2HW_MPT ( dev , mailbox ,
2011-07-07 17:20:40 +00:00
key & ( dev - > limits . num_mpts - 1 ) ) ;
2005-04-16 15:26:13 -07:00
if ( err ) {
2005-04-16 15:20:36 -07:00
mthca_warn ( dev , " SW2HW_MPT failed (%d) \n " , err ) ;
2005-06-27 14:36:43 -07:00
goto err_out_mailbox ;
2005-04-16 15:20:36 -07:00
}
2005-06-27 14:36:45 -07:00
mthca_free_mailbox ( dev , mailbox ) ;
2005-04-16 15:20:36 -07:00
return err ;
2005-04-16 15:26:13 -07:00
2005-06-27 14:36:43 -07:00
err_out_mailbox :
2005-06-27 14:36:45 -07:00
mthca_free_mailbox ( dev , mailbox ) ;
2005-06-27 14:36:43 -07:00
2005-04-16 15:26:13 -07:00
err_out_table :
2005-06-27 14:36:43 -07:00
mthca_table_put ( dev , dev - > mr_table . mpt_table , key ) ;
2005-04-16 15:26:13 -07:00
err_out_mpt_free :
2005-04-16 15:26:19 -07:00
mthca_free ( & dev - > mr_table . mpt_alloc , key ) ;
2005-04-16 15:26:13 -07:00
return err ;
2005-04-16 15:20:36 -07:00
}
2005-06-27 14:36:43 -07:00
int mthca_mr_alloc_notrans ( struct mthca_dev * dev , u32 pd ,
u32 access , struct mthca_mr * mr )
{
mr - > mtt = NULL ;
return mthca_mr_alloc ( dev , pd , 12 , 0 , ~ 0ULL , access , mr ) ;
}
2005-04-16 15:20:36 -07:00
int mthca_mr_alloc_phys ( struct mthca_dev * dev , u32 pd ,
u64 * buffer_list , int buffer_size_shift ,
int list_len , u64 iova , u64 total_size ,
u32 access , struct mthca_mr * mr )
{
2005-06-27 14:36:43 -07:00
int err ;
2005-04-16 15:20:36 -07:00
2005-06-27 14:36:43 -07:00
mr - > mtt = mthca_alloc_mtt ( dev , list_len ) ;
if ( IS_ERR ( mr - > mtt ) )
return PTR_ERR ( mr - > mtt ) ;
2005-04-16 15:20:36 -07:00
2005-06-27 14:36:43 -07:00
err = mthca_write_mtt ( dev , mr - > mtt , 0 , buffer_list , list_len ) ;
2005-04-16 15:20:36 -07:00
if ( err ) {
2005-06-27 14:36:43 -07:00
mthca_free_mtt ( dev , mr - > mtt ) ;
return err ;
2005-04-16 15:20:36 -07:00
}
2005-06-27 14:36:43 -07:00
err = mthca_mr_alloc ( dev , pd , buffer_size_shift , iova ,
total_size , access , mr ) ;
2005-04-16 15:20:36 -07:00
if ( err )
2005-06-27 14:36:43 -07:00
mthca_free_mtt ( dev , mr - > mtt ) ;
2005-04-16 15:20:36 -07:00
return err ;
}
2005-04-16 15:26:30 -07:00
/* Free mr or fmr */
2005-06-27 14:36:43 -07:00
static void mthca_free_region ( struct mthca_dev * dev , u32 lkey )
2005-04-16 15:26:30 -07:00
{
2005-06-27 14:36:43 -07:00
mthca_table_put ( dev , dev - > mr_table . mpt_table ,
2005-08-15 07:38:50 -07:00
key_to_hw_index ( dev , lkey ) ) ;
2005-04-16 15:26:30 -07:00
mthca_free ( & dev - > mr_table . mpt_alloc , key_to_hw_index ( dev , lkey ) ) ;
}
2005-04-16 15:20:36 -07:00
void mthca_free_mr ( struct mthca_dev * dev , struct mthca_mr * mr )
{
int err ;
err = mthca_HW2SW_MPT ( dev , NULL ,
key_to_hw_index ( dev , mr - > ibmr . lkey ) &
2011-07-07 17:20:40 +00:00
( dev - > limits . num_mpts - 1 ) ) ;
2005-04-16 15:20:36 -07:00
if ( err )
mthca_warn ( dev , " HW2SW_MPT failed (%d) \n " , err ) ;
2005-06-27 14:36:43 -07:00
mthca_free_region ( dev , mr - > ibmr . lkey ) ;
mthca_free_mtt ( dev , mr - > mtt ) ;
2005-04-16 15:26:30 -07:00
}
int mthca_fmr_alloc ( struct mthca_dev * dev , u32 pd ,
u32 access , struct mthca_fmr * mr )
{
struct mthca_mpt_entry * mpt_entry ;
2005-06-27 14:36:45 -07:00
struct mthca_mailbox * mailbox ;
2005-04-16 15:26:30 -07:00
u64 mtt_seg ;
u32 key , idx ;
int list_len = mr - > attr . max_pages ;
int err = - ENOMEM ;
int i ;
2006-02-02 10:43:45 -08:00
if ( mr - > attr . page_shift < 12 | | mr - > attr . page_shift > = 32 )
2005-04-16 15:26:30 -07:00
return - EINVAL ;
/* For Arbel, all MTTs must fit in the same page. */
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) & &
2005-04-16 15:26:30 -07:00
mr - > attr . max_pages * sizeof * mr - > mem . arbel . mtts > PAGE_SIZE )
return - EINVAL ;
mr - > maps = 0 ;
key = mthca_alloc ( & dev - > mr_table . mpt_alloc ) ;
if ( key = = - 1 )
return - ENOMEM ;
2006-03-02 12:40:46 -08:00
key = adjust_key ( dev , key ) ;
2005-04-16 15:26:30 -07:00
idx = key & ( dev - > limits . num_mpts - 1 ) ;
mr - > ibmr . rkey = mr - > ibmr . lkey = hw_index_to_key ( dev , key ) ;
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:26:30 -07:00
err = mthca_table_get ( dev , dev - > mr_table . mpt_table , key ) ;
if ( err )
goto err_out_mpt_free ;
2007-02-10 23:15:08 +02:00
mr - > mem . arbel . mpt = mthca_table_find ( dev - > mr_table . mpt_table , key , NULL ) ;
2005-04-16 15:26:30 -07:00
BUG_ON ( ! mr - > mem . arbel . mpt ) ;
} else
mr - > mem . tavor . mpt = dev - > mr_table . tavor_fmr . mpt_base +
2006-02-01 13:38:24 -08:00
sizeof * ( mr - > mem . tavor . mpt ) * idx ;
2005-04-16 15:26:30 -07:00
2005-06-27 14:36:43 -07:00
mr - > mtt = __mthca_alloc_mtt ( dev , list_len , dev - > mr_table . fmr_mtt_buddy ) ;
2008-02-04 20:20:44 -08:00
if ( IS_ERR ( mr - > mtt ) ) {
err = PTR_ERR ( mr - > mtt ) ;
2005-04-16 15:26:30 -07:00
goto err_out_table ;
2008-02-04 20:20:44 -08:00
}
2005-04-16 15:26:30 -07:00
2009-05-27 14:36:16 -07:00
mtt_seg = mr - > mtt - > first_seg * dev - > limits . mtt_seg_size ;
2005-04-16 15:26:30 -07:00
2005-04-16 15:26:32 -07:00
if ( mthca_is_memfree ( dev ) ) {
2005-04-16 15:26:30 -07:00
mr - > mem . arbel . mtts = mthca_table_find ( dev - > mr_table . mtt_table ,
2007-02-10 23:15:08 +02:00
mr - > mtt - > first_seg ,
& mr - > mem . arbel . dma_handle ) ;
2005-04-16 15:26:30 -07:00
BUG_ON ( ! mr - > mem . arbel . mtts ) ;
} else
mr - > mem . tavor . mtts = dev - > mr_table . tavor_fmr . mtt_base + mtt_seg ;
2005-06-27 14:36:45 -07:00
mailbox = mthca_alloc_mailbox ( dev , GFP_KERNEL ) ;
2008-02-04 20:20:44 -08:00
if ( IS_ERR ( mailbox ) ) {
err = PTR_ERR ( mailbox ) ;
2005-04-16 15:26:30 -07:00
goto err_out_free_mtt ;
2008-02-04 20:20:44 -08:00
}
2005-04-16 15:26:30 -07:00
2005-06-27 14:36:45 -07:00
mpt_entry = mailbox - > buf ;
2005-04-16 15:26:30 -07:00
mpt_entry - > flags = cpu_to_be32 ( MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access ) ;
2006-02-02 10:43:45 -08:00
mpt_entry - > page_size = cpu_to_be32 ( mr - > attr . page_shift - 12 ) ;
2005-04-16 15:26:30 -07:00
mpt_entry - > key = cpu_to_be32 ( key ) ;
mpt_entry - > pd = cpu_to_be32 ( pd ) ;
memset ( & mpt_entry - > start , 0 ,
sizeof * mpt_entry - offsetof ( struct mthca_mpt_entry , start ) ) ;
mpt_entry - > mtt_seg = cpu_to_be64 ( dev - > mr_table . mtt_base + mtt_seg ) ;
if ( 0 ) {
mthca_dbg ( dev , " Dumping MPT entry %08x: \n " , mr - > ibmr . lkey ) ;
for ( i = 0 ; i < sizeof ( struct mthca_mpt_entry ) / 4 ; + + i ) {
if ( i % 4 = = 0 )
printk ( " [%02x] " , i * 4 ) ;
2005-08-13 21:05:57 -07:00
printk ( " %08x " , be32_to_cpu ( ( ( __be32 * ) mpt_entry ) [ i ] ) ) ;
2005-04-16 15:26:30 -07:00
if ( ( i + 1 ) % 4 = = 0 )
printk ( " \n " ) ;
}
}
2005-06-27 14:36:45 -07:00
err = mthca_SW2HW_MPT ( dev , mailbox ,
2011-07-07 17:20:40 +00:00
key & ( dev - > limits . num_mpts - 1 ) ) ;
2005-04-16 15:26:30 -07:00
if ( err ) {
mthca_warn ( dev , " SW2HW_MPT failed (%d) \n " , err ) ;
goto err_out_mailbox_free ;
}
2005-06-27 14:36:45 -07:00
mthca_free_mailbox ( dev , mailbox ) ;
2005-04-16 15:26:30 -07:00
return 0 ;
err_out_mailbox_free :
2005-06-27 14:36:45 -07:00
mthca_free_mailbox ( dev , mailbox ) ;
2005-04-16 15:26:30 -07:00
err_out_free_mtt :
2005-06-27 14:36:43 -07:00
mthca_free_mtt ( dev , mr - > mtt ) ;
2005-04-16 15:20:36 -07:00
2005-04-16 15:26:30 -07:00
err_out_table :
2005-06-27 14:36:43 -07:00
mthca_table_put ( dev , dev - > mr_table . mpt_table , key ) ;
2005-04-16 15:26:30 -07:00
err_out_mpt_free :
2008-02-19 10:42:50 -08:00
mthca_free ( & dev - > mr_table . mpt_alloc , key ) ;
2005-04-16 15:26:30 -07:00
return err ;
}
int mthca_free_fmr ( struct mthca_dev * dev , struct mthca_fmr * fmr )
{
if ( fmr - > maps )
return - EBUSY ;
2005-06-27 14:36:43 -07:00
mthca_free_region ( dev , fmr - > ibmr . lkey ) ;
mthca_free_mtt ( dev , fmr - > mtt ) ;
2005-04-16 15:26:30 -07:00
return 0 ;
}
static inline int mthca_check_fmr ( struct mthca_fmr * fmr , u64 * page_list ,
int list_len , u64 iova )
{
int i , page_mask ;
if ( list_len > fmr - > attr . max_pages )
return - EINVAL ;
2006-02-02 10:43:45 -08:00
page_mask = ( 1 < < fmr - > attr . page_shift ) - 1 ;
2005-04-16 15:26:30 -07:00
/* We are getting page lists, so va must be page aligned. */
if ( iova & page_mask )
return - EINVAL ;
/* Trust the user not to pass misaligned data in page_list */
if ( 0 )
for ( i = 0 ; i < list_len ; + + i ) {
if ( page_list [ i ] & ~ page_mask )
return - EINVAL ;
}
if ( fmr - > maps > = fmr - > attr . max_maps )
return - EINVAL ;
return 0 ;
}
int mthca_tavor_map_phys_fmr ( struct ib_fmr * ibfmr , u64 * page_list ,
int list_len , u64 iova )
{
struct mthca_fmr * fmr = to_mfmr ( ibfmr ) ;
struct mthca_dev * dev = to_mdev ( ibfmr - > device ) ;
struct mthca_mpt_entry mpt_entry ;
u32 key ;
int i , err ;
err = mthca_check_fmr ( fmr , page_list , list_len , iova ) ;
if ( err )
return err ;
+ + fmr - > maps ;
key = tavor_key_to_hw_index ( fmr - > ibmr . lkey ) ;
key + = dev - > limits . num_mpts ;
fmr - > ibmr . lkey = fmr - > ibmr . rkey = tavor_hw_index_to_key ( key ) ;
writeb ( MTHCA_MPT_STATUS_SW , fmr - > mem . tavor . mpt ) ;
for ( i = 0 ; i < list_len ; + + i ) {
__be64 mtt_entry = cpu_to_be64 ( page_list [ i ] |
MTHCA_MTT_FLAG_PRESENT ) ;
mthca_write64_raw ( mtt_entry , fmr - > mem . tavor . mtts + i ) ;
}
mpt_entry . lkey = cpu_to_be32 ( key ) ;
2006-02-02 10:43:45 -08:00
mpt_entry . length = cpu_to_be64 ( list_len * ( 1ull < < fmr - > attr . page_shift ) ) ;
2005-04-16 15:26:30 -07:00
mpt_entry . start = cpu_to_be64 ( iova ) ;
2005-08-13 21:05:57 -07:00
__raw_writel ( ( __force u32 ) mpt_entry . lkey , & fmr - > mem . tavor . mpt - > key ) ;
2005-04-16 15:26:30 -07:00
memcpy_toio ( & fmr - > mem . tavor . mpt - > start , & mpt_entry . start ,
offsetof ( struct mthca_mpt_entry , window_count ) -
offsetof ( struct mthca_mpt_entry , start ) ) ;
writeb ( MTHCA_MPT_STATUS_HW , fmr - > mem . tavor . mpt ) ;
return 0 ;
}
int mthca_arbel_map_phys_fmr ( struct ib_fmr * ibfmr , u64 * page_list ,
int list_len , u64 iova )
{
struct mthca_fmr * fmr = to_mfmr ( ibfmr ) ;
struct mthca_dev * dev = to_mdev ( ibfmr - > device ) ;
u32 key ;
int i , err ;
err = mthca_check_fmr ( fmr , page_list , list_len , iova ) ;
if ( err )
return err ;
+ + fmr - > maps ;
key = arbel_key_to_hw_index ( fmr - > ibmr . lkey ) ;
2006-03-02 12:40:46 -08:00
if ( dev - > mthca_flags & MTHCA_FLAG_SINAI_OPT )
key + = SINAI_FMR_KEY_INC ;
else
key + = dev - > limits . num_mpts ;
2005-04-16 15:26:30 -07:00
fmr - > ibmr . lkey = fmr - > ibmr . rkey = arbel_hw_index_to_key ( key ) ;
* ( u8 * ) fmr - > mem . arbel . mpt = MTHCA_MPT_STATUS_SW ;
wmb ( ) ;
2009-06-22 23:04:13 -07:00
dma_sync_single_for_cpu ( & dev - > pdev - > dev , fmr - > mem . arbel . dma_handle ,
list_len * sizeof ( u64 ) , DMA_TO_DEVICE ) ;
2005-04-16 15:26:30 -07:00
for ( i = 0 ; i < list_len ; + + i )
fmr - > mem . arbel . mtts [ i ] = cpu_to_be64 ( page_list [ i ] |
MTHCA_MTT_FLAG_PRESENT ) ;
2009-06-22 23:04:13 -07:00
dma_sync_single_for_device ( & dev - > pdev - > dev , fmr - > mem . arbel . dma_handle ,
list_len * sizeof ( u64 ) , DMA_TO_DEVICE ) ;
2007-02-10 23:15:08 +02:00
2005-04-16 15:26:30 -07:00
fmr - > mem . arbel . mpt - > key = cpu_to_be32 ( key ) ;
fmr - > mem . arbel . mpt - > lkey = cpu_to_be32 ( key ) ;
2006-02-02 10:43:45 -08:00
fmr - > mem . arbel . mpt - > length = cpu_to_be64 ( list_len * ( 1ull < < fmr - > attr . page_shift ) ) ;
2005-04-16 15:26:30 -07:00
fmr - > mem . arbel . mpt - > start = cpu_to_be64 ( iova ) ;
wmb ( ) ;
* ( u8 * ) fmr - > mem . arbel . mpt = MTHCA_MPT_STATUS_HW ;
wmb ( ) ;
return 0 ;
}
void mthca_tavor_fmr_unmap ( struct mthca_dev * dev , struct mthca_fmr * fmr )
{
if ( ! fmr - > maps )
return ;
fmr - > maps = 0 ;
writeb ( MTHCA_MPT_STATUS_SW , fmr - > mem . tavor . mpt ) ;
}
void mthca_arbel_fmr_unmap ( struct mthca_dev * dev , struct mthca_fmr * fmr )
{
if ( ! fmr - > maps )
return ;
fmr - > maps = 0 ;
* ( u8 * ) fmr - > mem . arbel . mpt = MTHCA_MPT_STATUS_SW ;
2005-04-16 15:20:36 -07:00
}
2006-11-29 15:33:06 -08:00
int mthca_init_mr_table ( struct mthca_dev * dev )
2005-04-16 15:20:36 -07:00
{
2011-01-11 20:39:46 -08:00
phys_addr_t addr ;
2007-02-10 23:13:12 +02:00
int mpts , mtts , err , i ;
2005-04-16 15:20:36 -07:00
err = mthca_alloc_init ( & dev - > mr_table . mpt_alloc ,
dev - > limits . num_mpts ,
~ 0 , dev - > limits . reserved_mrws ) ;
if ( err )
return err ;
2005-04-16 15:26:32 -07:00
if ( ! mthca_is_memfree ( dev ) & &
2005-04-16 15:26:30 -07:00
( dev - > mthca_flags & MTHCA_FLAG_DDR_HIDDEN ) )
dev - > limits . fmr_reserved_mtts = 0 ;
else
dev - > mthca_flags | = MTHCA_FLAG_FMR ;
2006-03-02 12:40:46 -08:00
if ( dev - > mthca_flags & MTHCA_FLAG_SINAI_OPT )
mthca_dbg ( dev , " Memory key throughput optimization activated. \n " ) ;
2005-04-16 15:26:26 -07:00
err = mthca_buddy_init ( & dev - > mr_table . mtt_buddy ,
fls ( dev - > limits . num_mtt_segs - 1 ) ) ;
2005-04-16 15:26:30 -07:00
2005-04-16 15:26:26 -07:00
if ( err )
goto err_mtt_buddy ;
2005-04-16 15:26:30 -07:00
dev - > mr_table . tavor_fmr . mpt_base = NULL ;
dev - > mr_table . tavor_fmr . mtt_base = NULL ;
if ( dev - > limits . fmr_reserved_mtts ) {
i = fls ( dev - > limits . fmr_reserved_mtts - 1 ) ;
if ( i > = 31 ) {
mthca_warn ( dev , " Unable to reserve 2^31 FMR MTTs. \n " ) ;
err = - EINVAL ;
goto err_fmr_mpt ;
}
2007-02-10 23:13:12 +02:00
mpts = mtts = 1 < < i ;
} else {
2007-03-25 11:17:43 +02:00
mtts = dev - > limits . num_mtt_segs ;
mpts = dev - > limits . num_mpts ;
2007-02-10 23:13:12 +02:00
}
if ( ! mthca_is_memfree ( dev ) & &
( dev - > mthca_flags & MTHCA_FLAG_FMR ) ) {
2005-04-16 15:26:30 -07:00
2006-05-10 17:58:41 +03:00
addr = pci_resource_start ( dev - > pdev , 4 ) +
( ( pci_resource_len ( dev - > pdev , 4 ) - 1 ) &
dev - > mr_table . mpt_base ) ;
2005-04-16 15:26:30 -07:00
dev - > mr_table . tavor_fmr . mpt_base =
2007-02-10 23:13:12 +02:00
ioremap ( addr , mpts * sizeof ( struct mthca_mpt_entry ) ) ;
2005-04-16 15:26:30 -07:00
if ( ! dev - > mr_table . tavor_fmr . mpt_base ) {
mthca_warn ( dev , " MPT ioremap for FMR failed. \n " ) ;
err = - ENOMEM ;
goto err_fmr_mpt ;
}
2006-05-10 17:58:41 +03:00
addr = pci_resource_start ( dev - > pdev , 4 ) +
( ( pci_resource_len ( dev - > pdev , 4 ) - 1 ) &
dev - > mr_table . mtt_base ) ;
2005-04-16 15:26:30 -07:00
dev - > mr_table . tavor_fmr . mtt_base =
2009-05-27 14:36:16 -07:00
ioremap ( addr , mtts * dev - > limits . mtt_seg_size ) ;
2005-04-16 15:26:30 -07:00
if ( ! dev - > mr_table . tavor_fmr . mtt_base ) {
mthca_warn ( dev , " MTT ioremap for FMR failed. \n " ) ;
err = - ENOMEM ;
goto err_fmr_mtt ;
}
2007-02-10 23:13:12 +02:00
}
2005-04-16 15:26:30 -07:00
2007-02-10 23:13:12 +02:00
if ( dev - > limits . fmr_reserved_mtts ) {
err = mthca_buddy_init ( & dev - > mr_table . tavor_fmr . mtt_buddy , fls ( mtts - 1 ) ) ;
2005-04-16 15:26:30 -07:00
if ( err )
goto err_fmr_mtt_buddy ;
/* Prevent regular MRs from using FMR keys */
2007-02-10 23:13:12 +02:00
err = mthca_buddy_alloc ( & dev - > mr_table . mtt_buddy , fls ( mtts - 1 ) ) ;
2005-04-16 15:26:30 -07:00
if ( err )
goto err_reserve_fmr ;
dev - > mr_table . fmr_mtt_buddy =
2006-02-01 13:38:24 -08:00
& dev - > mr_table . tavor_fmr . mtt_buddy ;
2005-04-16 15:26:30 -07:00
} else
dev - > mr_table . fmr_mtt_buddy = & dev - > mr_table . mtt_buddy ;
/* FMR table is always the first, take reserved MTTs out of there */
2005-04-16 15:26:26 -07:00
if ( dev - > limits . reserved_mtts ) {
2005-04-16 15:26:30 -07:00
i = fls ( dev - > limits . reserved_mtts - 1 ) ;
2005-06-27 14:36:43 -07:00
if ( mthca_alloc_mtt_range ( dev , i ,
dev - > mr_table . fmr_mtt_buddy ) = = - 1 ) {
2005-04-16 15:26:26 -07:00
mthca_warn ( dev , " MTT table of order %d is too small. \n " ,
2005-04-16 15:26:30 -07:00
dev - > mr_table . fmr_mtt_buddy - > max_order ) ;
2005-04-16 15:26:26 -07:00
err = - ENOMEM ;
2005-04-16 15:26:30 -07:00
goto err_reserve_mtts ;
2005-04-16 15:26:26 -07:00
}
2005-04-16 15:20:36 -07:00
}
return 0 ;
2005-04-16 15:26:30 -07:00
err_reserve_mtts :
err_reserve_fmr :
if ( dev - > limits . fmr_reserved_mtts )
mthca_buddy_cleanup ( & dev - > mr_table . tavor_fmr . mtt_buddy ) ;
err_fmr_mtt_buddy :
if ( dev - > mr_table . tavor_fmr . mtt_base )
iounmap ( dev - > mr_table . tavor_fmr . mtt_base ) ;
err_fmr_mtt :
if ( dev - > mr_table . tavor_fmr . mpt_base )
iounmap ( dev - > mr_table . tavor_fmr . mpt_base ) ;
err_fmr_mpt :
mthca_buddy_cleanup ( & dev - > mr_table . mtt_buddy ) ;
2005-04-16 15:26:26 -07:00
err_mtt_buddy :
2005-04-16 15:20:36 -07:00
mthca_alloc_cleanup ( & dev - > mr_table . mpt_alloc ) ;
return err ;
}
2006-03-29 09:36:46 -08:00
void mthca_cleanup_mr_table ( struct mthca_dev * dev )
2005-04-16 15:20:36 -07:00
{
/* XXX check if any MRs are still allocated? */
2005-04-16 15:26:30 -07:00
if ( dev - > limits . fmr_reserved_mtts )
mthca_buddy_cleanup ( & dev - > mr_table . tavor_fmr . mtt_buddy ) ;
2005-04-16 15:26:26 -07:00
mthca_buddy_cleanup ( & dev - > mr_table . mtt_buddy ) ;
2005-04-16 15:26:30 -07:00
if ( dev - > mr_table . tavor_fmr . mtt_base )
iounmap ( dev - > mr_table . tavor_fmr . mtt_base ) ;
if ( dev - > mr_table . tavor_fmr . mpt_base )
iounmap ( dev - > mr_table . tavor_fmr . mpt_base ) ;
2005-04-16 15:20:36 -07:00
mthca_alloc_cleanup ( & dev - > mr_table . mpt_alloc ) ;
}