2007-10-22 03:41:48 +04:00
/*
2009-07-01 21:49:06 +04:00
* Copyright © 2006 - 2009 , Intel Corporation .
2007-10-22 03:41:48 +04:00
*
2009-07-01 21:49:06 +04:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program ; if not , write to the Free Software Foundation , Inc . , 59 Temple
* Place - Suite 330 , Boston , MA 02111 - 1307 USA .
2007-10-22 03:41:48 +04:00
*
2008-02-24 02:23:35 +03:00
* Author : Anil S Keshavamurthy < anil . s . keshavamurthy @ intel . com >
2007-10-22 03:41:48 +04:00
*/
2008-09-09 19:37:29 +04:00
# include <linux/iova.h>
2015-07-13 14:31:30 +03:00
# include <linux/module.h>
2015-01-12 20:51:14 +03:00
# include <linux/slab.h>
2007-10-22 03:41:48 +04:00
void
2015-01-12 20:51:16 +03:00
init_iova_domain ( struct iova_domain * iovad , unsigned long granule ,
unsigned long start_pfn , unsigned long pfn_32bit )
2007-10-22 03:41:48 +04:00
{
2015-01-12 20:51:16 +03:00
/*
* IOVA granularity will normally be equal to the smallest
* supported IOMMU page size ; both * must * be capable of
* representing individual CPU pages exactly .
*/
BUG_ON ( ( granule > PAGE_SIZE ) | | ! is_power_of_2 ( granule ) ) ;
2007-10-22 03:41:48 +04:00
spin_lock_init ( & iovad - > iova_rbtree_lock ) ;
iovad - > rbroot = RB_ROOT ;
iovad - > cached32_node = NULL ;
2015-01-12 20:51:16 +03:00
iovad - > granule = granule ;
2015-01-12 20:51:15 +03:00
iovad - > start_pfn = start_pfn ;
2008-02-06 12:36:23 +03:00
iovad - > dma_32bit_pfn = pfn_32bit ;
2007-10-22 03:41:48 +04:00
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( init_iova_domain ) ;
2007-10-22 03:41:48 +04:00
static struct rb_node *
__get_cached_rbnode ( struct iova_domain * iovad , unsigned long * limit_pfn )
{
2008-02-06 12:36:23 +03:00
if ( ( * limit_pfn ! = iovad - > dma_32bit_pfn ) | |
2007-10-22 03:41:48 +04:00
( iovad - > cached32_node = = NULL ) )
return rb_last ( & iovad - > rbroot ) ;
else {
struct rb_node * prev_node = rb_prev ( iovad - > cached32_node ) ;
struct iova * curr_iova =
container_of ( iovad - > cached32_node , struct iova , node ) ;
* limit_pfn = curr_iova - > pfn_lo - 1 ;
return prev_node ;
}
}
static void
__cached_rbnode_insert_update ( struct iova_domain * iovad ,
unsigned long limit_pfn , struct iova * new )
{
2008-02-06 12:36:23 +03:00
if ( limit_pfn ! = iovad - > dma_32bit_pfn )
2007-10-22 03:41:48 +04:00
return ;
iovad - > cached32_node = & new - > node ;
}
static void
__cached_rbnode_delete_update ( struct iova_domain * iovad , struct iova * free )
{
struct iova * cached_iova ;
struct rb_node * curr ;
if ( ! iovad - > cached32_node )
return ;
curr = iovad - > cached32_node ;
cached_iova = container_of ( curr , struct iova , node ) ;
2011-05-28 22:15:04 +04:00
if ( free - > pfn_lo > = cached_iova - > pfn_lo ) {
struct rb_node * node = rb_next ( & free - > node ) ;
struct iova * iova = container_of ( node , struct iova , node ) ;
/* only cache if it's below 32bit pfn */
if ( node & & iova - > pfn_lo < iovad - > dma_32bit_pfn )
iovad - > cached32_node = node ;
else
iovad - > cached32_node = NULL ;
}
2007-10-22 03:41:48 +04:00
}
2015-07-16 21:40:12 +03:00
/*
* Computes the padding size required , to make the start address
* naturally aligned on the power - of - two order of its size
2007-10-22 03:41:58 +04:00
*/
2015-07-16 21:40:12 +03:00
static unsigned int
iova_get_pad_size ( unsigned int size , unsigned int limit_pfn )
2007-10-22 03:41:58 +04:00
{
2015-07-16 21:40:12 +03:00
return ( limit_pfn + 1 - size ) & ( __roundup_pow_of_two ( size ) - 1 ) ;
2007-10-22 03:41:58 +04:00
}
2008-03-05 02:22:04 +03:00
static int __alloc_and_insert_iova_range ( struct iova_domain * iovad ,
unsigned long size , unsigned long limit_pfn ,
struct iova * new , bool size_aligned )
2007-10-22 03:41:48 +04:00
{
2008-03-05 02:22:04 +03:00
struct rb_node * prev , * curr = NULL ;
2007-10-22 03:41:48 +04:00
unsigned long flags ;
unsigned long saved_pfn ;
2007-10-22 03:41:58 +04:00
unsigned int pad_size = 0 ;
2007-10-22 03:41:48 +04:00
/* Walk the tree backwards */
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
saved_pfn = limit_pfn ;
curr = __get_cached_rbnode ( iovad , & limit_pfn ) ;
2008-03-05 02:22:04 +03:00
prev = curr ;
2007-10-22 03:41:48 +04:00
while ( curr ) {
struct iova * curr_iova = container_of ( curr , struct iova , node ) ;
2008-03-05 02:22:04 +03:00
2007-10-22 03:41:48 +04:00
if ( limit_pfn < curr_iova - > pfn_lo )
goto move_left ;
2007-10-22 03:41:58 +04:00
else if ( limit_pfn < curr_iova - > pfn_hi )
2007-10-22 03:41:48 +04:00
goto adjust_limit_pfn ;
2007-10-22 03:41:58 +04:00
else {
if ( size_aligned )
pad_size = iova_get_pad_size ( size , limit_pfn ) ;
if ( ( curr_iova - > pfn_hi + size + pad_size ) < = limit_pfn )
break ; /* found a free slot */
}
2007-10-22 03:41:48 +04:00
adjust_limit_pfn :
limit_pfn = curr_iova - > pfn_lo - 1 ;
move_left :
2008-03-05 02:22:04 +03:00
prev = curr ;
2007-10-22 03:41:48 +04:00
curr = rb_prev ( curr ) ;
}
2007-10-22 03:41:58 +04:00
if ( ! curr ) {
if ( size_aligned )
pad_size = iova_get_pad_size ( size , limit_pfn ) ;
2015-01-12 20:51:15 +03:00
if ( ( iovad - > start_pfn + size + pad_size ) > limit_pfn ) {
2007-10-22 03:41:58 +04:00
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
return - ENOMEM ;
}
2007-10-22 03:41:48 +04:00
}
2007-10-22 03:41:58 +04:00
/* pfn_lo will point to size aligned address if size_aligned is set */
new - > pfn_lo = limit_pfn - ( size + pad_size ) + 1 ;
new - > pfn_hi = new - > pfn_lo + size - 1 ;
2007-10-22 03:41:48 +04:00
2008-03-05 02:22:04 +03:00
/* Insert the new_iova into domain rbtree by holding writer lock */
/* Add new node and rebalance tree. */
{
2009-07-01 21:49:06 +04:00
struct rb_node * * entry , * parent = NULL ;
/* If we have 'prev', it's a valid place to start the
insertion . Otherwise , start from the root . */
if ( prev )
entry = & prev ;
else
entry = & iovad - > rbroot . rb_node ;
2008-03-05 02:22:04 +03:00
/* Figure out where to put new node */
while ( * entry ) {
struct iova * this = container_of ( * entry ,
struct iova , node ) ;
parent = * entry ;
if ( new - > pfn_lo < this - > pfn_lo )
entry = & ( ( * entry ) - > rb_left ) ;
else if ( new - > pfn_lo > this - > pfn_lo )
entry = & ( ( * entry ) - > rb_right ) ;
else
BUG ( ) ; /* this should not happen */
}
/* Add new node and rebalance tree. */
rb_link_node ( & new - > node , parent , entry ) ;
rb_insert_color ( & new - > node , & iovad - > rbroot ) ;
}
__cached_rbnode_insert_update ( iovad , saved_pfn , new ) ;
2007-10-22 03:41:48 +04:00
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
2008-03-05 02:22:04 +03:00
2007-10-22 03:41:48 +04:00
return 0 ;
}
static void
iova_insert_rbtree ( struct rb_root * root , struct iova * iova )
{
struct rb_node * * new = & ( root - > rb_node ) , * parent = NULL ;
/* Figure out where to put new node */
while ( * new ) {
struct iova * this = container_of ( * new , struct iova , node ) ;
2015-04-17 07:32:47 +03:00
2007-10-22 03:41:48 +04:00
parent = * new ;
if ( iova - > pfn_lo < this - > pfn_lo )
new = & ( ( * new ) - > rb_left ) ;
else if ( iova - > pfn_lo > this - > pfn_lo )
new = & ( ( * new ) - > rb_right ) ;
else
BUG ( ) ; /* this should not happen */
}
/* Add new node and rebalance tree. */
rb_link_node ( & iova - > node , parent , new ) ;
rb_insert_color ( & iova - > node , root ) ;
}
2015-07-13 14:31:28 +03:00
static struct kmem_cache * iova_cache ;
static unsigned int iova_cache_users ;
static DEFINE_MUTEX ( iova_cache_mutex ) ;
struct iova * alloc_iova_mem ( void )
{
return kmem_cache_alloc ( iova_cache , GFP_ATOMIC ) ;
}
EXPORT_SYMBOL ( alloc_iova_mem ) ;
void free_iova_mem ( struct iova * iova )
{
kmem_cache_free ( iova_cache , iova ) ;
}
EXPORT_SYMBOL ( free_iova_mem ) ;
int iova_cache_get ( void )
{
mutex_lock ( & iova_cache_mutex ) ;
if ( ! iova_cache_users ) {
iova_cache = kmem_cache_create (
" iommu_iova " , sizeof ( struct iova ) , 0 ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! iova_cache ) {
mutex_unlock ( & iova_cache_mutex ) ;
printk ( KERN_ERR " Couldn't create iova cache \n " ) ;
return - ENOMEM ;
}
}
iova_cache_users + + ;
mutex_unlock ( & iova_cache_mutex ) ;
return 0 ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( iova_cache_get ) ;
2015-07-13 14:31:28 +03:00
void iova_cache_put ( void )
{
mutex_lock ( & iova_cache_mutex ) ;
if ( WARN_ON ( ! iova_cache_users ) ) {
mutex_unlock ( & iova_cache_mutex ) ;
return ;
}
iova_cache_users - - ;
if ( ! iova_cache_users )
kmem_cache_destroy ( iova_cache ) ;
mutex_unlock ( & iova_cache_mutex ) ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( iova_cache_put ) ;
2015-07-13 14:31:28 +03:00
2007-10-22 03:41:48 +04:00
/**
* alloc_iova - allocates an iova
2012-07-21 21:21:32 +04:00
* @ iovad : - iova domain in question
* @ size : - size of page frames to allocate
* @ limit_pfn : - max limit address
* @ size_aligned : - set if size_aligned address range is required
2015-01-12 20:51:15 +03:00
* This function allocates an iova in the range iovad - > start_pfn to limit_pfn ,
* searching top - down from limit_pfn to iovad - > start_pfn . If the size_aligned
2007-10-22 03:41:58 +04:00
* flag is set then the allocated address iova - > pfn_lo will be naturally
* aligned on roundup_power_of_two ( size ) .
2007-10-22 03:41:48 +04:00
*/
struct iova *
alloc_iova ( struct iova_domain * iovad , unsigned long size ,
2007-10-22 03:41:58 +04:00
unsigned long limit_pfn ,
bool size_aligned )
2007-10-22 03:41:48 +04:00
{
struct iova * new_iova ;
int ret ;
new_iova = alloc_iova_mem ( ) ;
if ( ! new_iova )
return NULL ;
2008-03-05 02:22:04 +03:00
ret = __alloc_and_insert_iova_range ( iovad , size , limit_pfn ,
new_iova , size_aligned ) ;
2007-10-22 03:41:48 +04:00
if ( ret ) {
free_iova_mem ( new_iova ) ;
return NULL ;
}
return new_iova ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( alloc_iova ) ;
2007-10-22 03:41:48 +04:00
/**
* find_iova - find ' s an iova for a given pfn
2012-07-21 21:21:32 +04:00
* @ iovad : - iova domain in question .
* @ pfn : - page frame number
2007-10-22 03:41:48 +04:00
* This function finds and returns an iova belonging to the
* given doamin which matches the given pfn .
*/
struct iova * find_iova ( struct iova_domain * iovad , unsigned long pfn )
{
unsigned long flags ;
struct rb_node * node ;
/* Take the lock so that no other thread is manipulating the rbtree */
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
node = iovad - > rbroot . rb_node ;
while ( node ) {
struct iova * iova = container_of ( node , struct iova , node ) ;
/* If pfn falls within iova's range, return iova */
if ( ( pfn > = iova - > pfn_lo ) & & ( pfn < = iova - > pfn_hi ) ) {
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
/* We are not holding the lock while this iova
* is referenced by the caller as the same thread
* which called this function also calls __free_iova ( )
2012-07-21 21:21:32 +04:00
* and it is by design that only one thread can possibly
2007-10-22 03:41:48 +04:00
* reference a particular iova and hence no conflict .
*/
return iova ;
}
if ( pfn < iova - > pfn_lo )
node = node - > rb_left ;
else if ( pfn > iova - > pfn_lo )
node = node - > rb_right ;
}
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
return NULL ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( find_iova ) ;
2007-10-22 03:41:48 +04:00
/**
* __free_iova - frees the given iova
* @ iovad : iova domain in question .
* @ iova : iova in question .
* Frees the given iova belonging to the giving domain
*/
void
__free_iova ( struct iova_domain * iovad , struct iova * iova )
{
unsigned long flags ;
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
__cached_rbnode_delete_update ( iovad , iova ) ;
rb_erase ( & iova - > node , & iovad - > rbroot ) ;
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
free_iova_mem ( iova ) ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( __free_iova ) ;
2007-10-22 03:41:48 +04:00
/**
* free_iova - finds and frees the iova for a given pfn
* @ iovad : - iova domain in question .
* @ pfn : - pfn that is allocated previously
* This functions finds an iova for a given pfn and then
* frees the iova from that domain .
*/
void
free_iova ( struct iova_domain * iovad , unsigned long pfn )
{
struct iova * iova = find_iova ( iovad , pfn ) ;
2015-04-17 07:32:47 +03:00
2007-10-22 03:41:48 +04:00
if ( iova )
__free_iova ( iovad , iova ) ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( free_iova ) ;
2007-10-22 03:41:48 +04:00
/**
* put_iova_domain - destroys the iova doamin
* @ iovad : - iova domain in question .
* All the iova ' s in that domain are destroyed .
*/
void put_iova_domain ( struct iova_domain * iovad )
{
struct rb_node * node ;
unsigned long flags ;
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
node = rb_first ( & iovad - > rbroot ) ;
while ( node ) {
struct iova * iova = container_of ( node , struct iova , node ) ;
2015-04-17 07:32:47 +03:00
2007-10-22 03:41:48 +04:00
rb_erase ( node , & iovad - > rbroot ) ;
free_iova_mem ( iova ) ;
node = rb_first ( & iovad - > rbroot ) ;
}
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( put_iova_domain ) ;
2007-10-22 03:41:48 +04:00
static int
__is_range_overlap ( struct rb_node * node ,
unsigned long pfn_lo , unsigned long pfn_hi )
{
struct iova * iova = container_of ( node , struct iova , node ) ;
if ( ( pfn_lo < = iova - > pfn_hi ) & & ( pfn_hi > = iova - > pfn_lo ) )
return 1 ;
return 0 ;
}
2014-02-19 10:07:37 +04:00
static inline struct iova *
alloc_and_init_iova ( unsigned long pfn_lo , unsigned long pfn_hi )
{
struct iova * iova ;
iova = alloc_iova_mem ( ) ;
if ( iova ) {
iova - > pfn_lo = pfn_lo ;
iova - > pfn_hi = pfn_hi ;
}
return iova ;
}
2007-10-22 03:41:48 +04:00
static struct iova *
__insert_new_range ( struct iova_domain * iovad ,
unsigned long pfn_lo , unsigned long pfn_hi )
{
struct iova * iova ;
2014-02-19 10:07:37 +04:00
iova = alloc_and_init_iova ( pfn_lo , pfn_hi ) ;
if ( iova )
iova_insert_rbtree ( & iovad - > rbroot , iova ) ;
2007-10-22 03:41:48 +04:00
return iova ;
}
static void
__adjust_overlap_range ( struct iova * iova ,
unsigned long * pfn_lo , unsigned long * pfn_hi )
{
if ( * pfn_lo < iova - > pfn_lo )
iova - > pfn_lo = * pfn_lo ;
if ( * pfn_hi > iova - > pfn_hi )
* pfn_lo = iova - > pfn_hi + 1 ;
}
/**
* reserve_iova - reserves an iova in the given range
* @ iovad : - iova domain pointer
* @ pfn_lo : - lower page frame address
* @ pfn_hi : - higher pfn adderss
* This function allocates reserves the address range from pfn_lo to pfn_hi so
* that this address is not dished out as part of alloc_iova .
*/
struct iova *
reserve_iova ( struct iova_domain * iovad ,
unsigned long pfn_lo , unsigned long pfn_hi )
{
struct rb_node * node ;
unsigned long flags ;
struct iova * iova ;
unsigned int overlap = 0 ;
2009-07-08 18:23:30 +04:00
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
2007-10-22 03:41:48 +04:00
for ( node = rb_first ( & iovad - > rbroot ) ; node ; node = rb_next ( node ) ) {
if ( __is_range_overlap ( node , pfn_lo , pfn_hi ) ) {
iova = container_of ( node , struct iova , node ) ;
__adjust_overlap_range ( iova , & pfn_lo , & pfn_hi ) ;
if ( ( pfn_lo > = iova - > pfn_lo ) & &
( pfn_hi < = iova - > pfn_hi ) )
goto finish ;
overlap = 1 ;
} else if ( overlap )
break ;
}
2011-03-31 05:57:33 +04:00
/* We are here either because this is the first reserver node
2007-10-22 03:41:48 +04:00
* or need to insert remaining non overlap addr range
*/
iova = __insert_new_range ( iovad , pfn_lo , pfn_hi ) ;
finish :
2009-07-08 18:23:30 +04:00
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
2007-10-22 03:41:48 +04:00
return iova ;
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( reserve_iova ) ;
2007-10-22 03:41:48 +04:00
/**
* copy_reserved_iova - copies the reserved between domains
* @ from : - source doamin from where to copy
* @ to : - destination domin where to copy
* This function copies reserved iova ' s from one doamin to
* other .
*/
void
copy_reserved_iova ( struct iova_domain * from , struct iova_domain * to )
{
unsigned long flags ;
struct rb_node * node ;
2009-07-08 18:23:30 +04:00
spin_lock_irqsave ( & from - > iova_rbtree_lock , flags ) ;
2007-10-22 03:41:48 +04:00
for ( node = rb_first ( & from - > rbroot ) ; node ; node = rb_next ( node ) ) {
struct iova * iova = container_of ( node , struct iova , node ) ;
struct iova * new_iova ;
2015-04-17 07:32:47 +03:00
2007-10-22 03:41:48 +04:00
new_iova = reserve_iova ( to , iova - > pfn_lo , iova - > pfn_hi ) ;
if ( ! new_iova )
printk ( KERN_ERR " Reserve iova range %lx@%lx failed \n " ,
iova - > pfn_lo , iova - > pfn_lo ) ;
}
2009-07-08 18:23:30 +04:00
spin_unlock_irqrestore ( & from - > iova_rbtree_lock , flags ) ;
2007-10-22 03:41:48 +04:00
}
2015-07-13 14:31:29 +03:00
EXPORT_SYMBOL_GPL ( copy_reserved_iova ) ;
2014-02-19 10:07:37 +04:00
struct iova *
split_and_remove_iova ( struct iova_domain * iovad , struct iova * iova ,
unsigned long pfn_lo , unsigned long pfn_hi )
{
unsigned long flags ;
struct iova * prev = NULL , * next = NULL ;
spin_lock_irqsave ( & iovad - > iova_rbtree_lock , flags ) ;
if ( iova - > pfn_lo < pfn_lo ) {
prev = alloc_and_init_iova ( iova - > pfn_lo , pfn_lo - 1 ) ;
if ( prev = = NULL )
goto error ;
}
if ( iova - > pfn_hi > pfn_hi ) {
next = alloc_and_init_iova ( pfn_hi + 1 , iova - > pfn_hi ) ;
if ( next = = NULL )
goto error ;
}
__cached_rbnode_delete_update ( iovad , iova ) ;
rb_erase ( & iova - > node , & iovad - > rbroot ) ;
if ( prev ) {
iova_insert_rbtree ( & iovad - > rbroot , prev ) ;
iova - > pfn_lo = pfn_lo ;
}
if ( next ) {
iova_insert_rbtree ( & iovad - > rbroot , next ) ;
iova - > pfn_hi = pfn_hi ;
}
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
return iova ;
error :
spin_unlock_irqrestore ( & iovad - > iova_rbtree_lock , flags ) ;
if ( prev )
free_iova_mem ( prev ) ;
return NULL ;
}
2015-07-13 14:31:30 +03:00
MODULE_AUTHOR ( " Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> " ) ;
MODULE_LICENSE ( " GPL " ) ;