2008-01-24 16:13:08 -05:00
# include <linux/err.h>
2007-08-27 16:49:44 -04:00
# include <linux/gfp.h>
2008-01-24 16:13:08 -05:00
# include <linux/slab.h>
2007-08-27 16:49:44 -04:00
# include <linux/module.h>
# include <linux/spinlock.h>
2007-10-19 09:23:05 -04:00
# include <linux/version.h>
2008-01-24 16:13:08 -05:00
# include <linux/hardirq.h>
2007-08-27 16:49:44 -04:00
# include "extent_map.h"
2007-09-10 19:58:16 -04:00
/* temporary define until extent_map moves out of btrfs */
struct kmem_cache * btrfs_cache_create ( const char * name , size_t size ,
unsigned long extra_flags ,
void ( * ctor ) ( void * , struct kmem_cache * ,
unsigned long ) ) ;
2007-08-27 16:49:44 -04:00
static struct kmem_cache * extent_map_cache ;
2007-11-27 11:16:35 -05:00
2007-11-19 10:22:33 -05:00
int __init extent_map_init ( void )
2007-08-27 16:49:44 -04:00
{
2007-09-10 19:58:16 -04:00
extent_map_cache = btrfs_cache_create ( " extent_map " ,
2007-10-15 16:14:37 -04:00
sizeof ( struct extent_map ) , 0 ,
2007-08-27 16:49:44 -04:00
NULL ) ;
2007-11-19 10:22:33 -05:00
if ( ! extent_map_cache )
return - ENOMEM ;
return 0 ;
2007-08-27 16:49:44 -04:00
}
2007-12-11 09:25:06 -05:00
void extent_map_exit ( void )
2007-08-27 16:49:44 -04:00
{
if ( extent_map_cache )
kmem_cache_destroy ( extent_map_cache ) ;
}
2008-06-11 21:52:17 -04:00
/**
* extent_map_tree_init - initialize extent map tree
* @ tree : tree to initialize
* @ mask : flags for memory allocations during tree operations
*
* Initialize the extent tree @ tree . Should be called for each new inode
* or other user of the extent_map interface .
*/
2008-01-24 16:13:08 -05:00
void extent_map_tree_init ( struct extent_map_tree * tree , gfp_t mask )
2007-08-27 16:49:44 -04:00
{
tree - > map . rb_node = NULL ;
2008-01-24 16:13:08 -05:00
spin_lock_init ( & tree - > lock ) ;
2007-08-27 16:49:44 -04:00
}
EXPORT_SYMBOL ( extent_map_tree_init ) ;
2008-06-11 21:52:17 -04:00
/**
* alloc_extent_map - allocate new extent map structure
* @ mask : memory allocation flags
*
* Allocate a new extent_map structure . The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map ( )
*/
2007-08-27 16:49:44 -04:00
struct extent_map * alloc_extent_map ( gfp_t mask )
{
struct extent_map * em ;
em = kmem_cache_alloc ( extent_map_cache , mask ) ;
if ( ! em | | IS_ERR ( em ) )
return em ;
em - > in_tree = 0 ;
2008-01-24 16:13:08 -05:00
em - > flags = 0 ;
2007-08-27 16:49:44 -04:00
atomic_set ( & em - > refs , 1 ) ;
return em ;
}
EXPORT_SYMBOL ( alloc_extent_map ) ;
2008-06-11 21:52:17 -04:00
/**
* free_extent_map - drop reference count of an extent_map
* @ em : extent map beeing releasead
*
* Drops the reference out on @ em by one and free the structure
* if the reference count hits zero .
*/
2007-08-27 16:49:44 -04:00
void free_extent_map ( struct extent_map * em )
{
2007-08-30 11:54:02 -04:00
if ( ! em )
return ;
2008-01-24 16:13:08 -05:00
WARN_ON ( atomic_read ( & em - > refs ) = = 0 ) ;
2007-08-27 16:49:44 -04:00
if ( atomic_dec_and_test ( & em - > refs ) ) {
WARN_ON ( em - > in_tree ) ;
kmem_cache_free ( extent_map_cache , em ) ;
}
}
EXPORT_SYMBOL ( free_extent_map ) ;
static struct rb_node * tree_insert ( struct rb_root * root , u64 offset ,
struct rb_node * node )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
2008-01-24 16:13:08 -05:00
struct extent_map * entry ;
2007-08-27 16:49:44 -04:00
while ( * p ) {
parent = * p ;
2008-01-24 16:13:08 -05:00
entry = rb_entry ( parent , struct extent_map , rb_node ) ;
WARN_ON ( ! entry - > in_tree ) ;
2007-08-27 16:49:44 -04:00
if ( offset < entry - > start )
p = & ( * p ) - > rb_left ;
2008-01-24 16:13:08 -05:00
else if ( offset > = extent_map_end ( entry ) )
2007-08-27 16:49:44 -04:00
p = & ( * p ) - > rb_right ;
else
return parent ;
}
2008-01-24 16:13:08 -05:00
entry = rb_entry ( node , struct extent_map , rb_node ) ;
2007-08-27 16:49:44 -04:00
entry - > in_tree = 1 ;
rb_link_node ( node , parent , p ) ;
rb_insert_color ( node , root ) ;
return NULL ;
}
static struct rb_node * __tree_search ( struct rb_root * root , u64 offset ,
2008-01-22 16:47:59 -05:00
struct rb_node * * prev_ret ,
struct rb_node * * next_ret )
2007-08-27 16:49:44 -04:00
{
struct rb_node * n = root - > rb_node ;
struct rb_node * prev = NULL ;
2008-01-22 16:47:59 -05:00
struct rb_node * orig_prev = NULL ;
2008-01-24 16:13:08 -05:00
struct extent_map * entry ;
struct extent_map * prev_entry = NULL ;
2007-08-27 16:49:44 -04:00
while ( n ) {
2008-01-24 16:13:08 -05:00
entry = rb_entry ( n , struct extent_map , rb_node ) ;
2007-08-27 16:49:44 -04:00
prev = n ;
prev_entry = entry ;
2008-01-24 16:13:08 -05:00
WARN_ON ( ! entry - > in_tree ) ;
2007-08-27 16:49:44 -04:00
if ( offset < entry - > start )
n = n - > rb_left ;
2008-01-24 16:13:08 -05:00
else if ( offset > = extent_map_end ( entry ) )
2007-08-27 16:49:44 -04:00
n = n - > rb_right ;
else
return n ;
}
2008-01-22 16:47:59 -05:00
if ( prev_ret ) {
orig_prev = prev ;
2008-01-24 16:13:08 -05:00
while ( prev & & offset > = extent_map_end ( prev_entry ) ) {
2008-01-22 16:47:59 -05:00
prev = rb_next ( prev ) ;
2008-01-24 16:13:08 -05:00
prev_entry = rb_entry ( prev , struct extent_map , rb_node ) ;
2008-01-22 16:47:59 -05:00
}
* prev_ret = prev ;
prev = orig_prev ;
}
if ( next_ret ) {
2008-01-24 16:13:08 -05:00
prev_entry = rb_entry ( prev , struct extent_map , rb_node ) ;
2008-01-22 16:47:59 -05:00
while ( prev & & offset < prev_entry - > start ) {
prev = rb_prev ( prev ) ;
2008-01-24 16:13:08 -05:00
prev_entry = rb_entry ( prev , struct extent_map , rb_node ) ;
2008-01-22 16:47:59 -05:00
}
* next_ret = prev ;
2007-08-27 16:49:44 -04:00
}
return NULL ;
}
static inline struct rb_node * tree_search ( struct rb_root * root , u64 offset )
{
struct rb_node * prev ;
struct rb_node * ret ;
2008-01-22 16:47:59 -05:00
ret = __tree_search ( root , offset , & prev , NULL ) ;
2007-08-27 16:49:44 -04:00
if ( ! ret )
return prev ;
return ret ;
}
2008-01-24 16:13:08 -05:00
static int mergable_maps ( struct extent_map * prev , struct extent_map * next )
2007-08-27 16:49:44 -04:00
{
2008-07-18 12:01:11 -04:00
if ( test_bit ( EXTENT_FLAG_PINNED , & prev - > flags ) )
return 0 ;
2008-01-24 16:13:08 -05:00
if ( extent_map_end ( prev ) = = next - > start & &
prev - > flags = = next - > flags & &
prev - > bdev = = next - > bdev & &
( ( next - > block_start = = EXTENT_MAP_HOLE & &
prev - > block_start = = EXTENT_MAP_HOLE ) | |
( next - > block_start = = EXTENT_MAP_INLINE & &
prev - > block_start = = EXTENT_MAP_INLINE ) | |
( next - > block_start = = EXTENT_MAP_DELALLOC & &
prev - > block_start = = EXTENT_MAP_DELALLOC ) | |
( next - > block_start < EXTENT_MAP_LAST_BYTE - 1 & &
next - > block_start = = extent_map_block_end ( prev ) ) ) ) {
return 1 ;
}
2007-08-27 16:49:44 -04:00
return 0 ;
}
2008-06-11 21:52:17 -04:00
/**
* add_extent_mapping - add new extent map to the extent tree
* @ tree : tree to insert new map in
* @ em : map to insert
*
* Insert @ em into @ tree or perform a simple forward / backward merge with
* existing mappings . The extent_map struct passed in will be inserted
* into the tree directly , with an additional reference taken , or a
* reference dropped if the merge attempt was sucessfull .
2007-08-27 16:49:44 -04:00
*/
int add_extent_mapping ( struct extent_map_tree * tree ,
struct extent_map * em )
{
int ret = 0 ;
2008-01-24 16:13:08 -05:00
struct extent_map * merge = NULL ;
2007-08-27 16:49:44 -04:00
struct rb_node * rb ;
2008-08-20 08:51:50 -04:00
struct extent_map * exist ;
2007-08-27 16:49:44 -04:00
2008-08-20 08:51:50 -04:00
exist = lookup_extent_mapping ( tree , em - > start , em - > len ) ;
if ( exist ) {
free_extent_map ( exist ) ;
ret = - EEXIST ;
goto out ;
}
2008-07-24 10:09:43 -04:00
assert_spin_locked ( & tree - > lock ) ;
2008-01-24 16:13:08 -05:00
rb = tree_insert ( & tree - > map , em - > start , & em - > rb_node ) ;
2007-08-27 16:49:44 -04:00
if ( rb ) {
ret = - EEXIST ;
2008-07-17 12:53:50 -04:00
free_extent_map ( merge ) ;
2007-08-27 16:49:44 -04:00
goto out ;
}
atomic_inc ( & em - > refs ) ;
if ( em - > start ! = 0 ) {
rb = rb_prev ( & em - > rb_node ) ;
if ( rb )
2008-01-24 16:13:08 -05:00
merge = rb_entry ( rb , struct extent_map , rb_node ) ;
if ( rb & & mergable_maps ( merge , em ) ) {
em - > start = merge - > start ;
em - > len + = merge - > len ;
em - > block_start = merge - > block_start ;
merge - > in_tree = 0 ;
rb_erase ( & merge - > rb_node , & tree - > map ) ;
free_extent_map ( merge ) ;
2007-08-27 16:49:44 -04:00
}
}
2008-01-24 16:13:08 -05:00
rb = rb_next ( & em - > rb_node ) ;
if ( rb )
merge = rb_entry ( rb , struct extent_map , rb_node ) ;
if ( rb & & mergable_maps ( em , merge ) ) {
em - > len + = merge - > len ;
rb_erase ( & merge - > rb_node , & tree - > map ) ;
merge - > in_tree = 0 ;
free_extent_map ( merge ) ;
}
2007-08-27 16:49:44 -04:00
out :
return ret ;
}
EXPORT_SYMBOL ( add_extent_mapping ) ;
2008-01-24 16:13:08 -05:00
static u64 range_end ( u64 start , u64 len )
{
if ( start + len < start )
return ( u64 ) - 1 ;
return start + len ;
}
2008-06-11 21:52:17 -04:00
/**
* lookup_extent_mapping - lookup extent_map
* @ tree : tree to lookup in
* @ start : byte offset to start the search
* @ len : length of the lookup range
*
* Find and return the first extent_map struct in @ tree that intersects the
* [ start , len ] range . There may be additional objects in the tree that
* intersect , so check the object returned carefully to make sure that no
* additional lookups are needed .
2007-08-27 16:49:44 -04:00
*/
struct extent_map * lookup_extent_mapping ( struct extent_map_tree * tree ,
2008-01-24 16:13:08 -05:00
u64 start , u64 len )
2007-08-27 16:49:44 -04:00
{
struct extent_map * em ;
struct rb_node * rb_node ;
2008-06-10 10:21:04 -04:00
struct rb_node * prev = NULL ;
struct rb_node * next = NULL ;
u64 end = range_end ( start , len ) ;
2008-07-24 10:09:43 -04:00
assert_spin_locked ( & tree - > lock ) ;
2008-01-22 16:47:59 -05:00
rb_node = __tree_search ( & tree - > map , start , & prev , & next ) ;
if ( ! rb_node & & prev ) {
em = rb_entry ( prev , struct extent_map , rb_node ) ;
2008-01-24 16:13:08 -05:00
if ( end > em - > start & & start < extent_map_end ( em ) )
2008-01-22 16:47:59 -05:00
goto found ;
}
if ( ! rb_node & & next ) {
em = rb_entry ( next , struct extent_map , rb_node ) ;
2008-01-24 16:13:08 -05:00
if ( end > em - > start & & start < extent_map_end ( em ) )
2008-01-22 16:47:59 -05:00
goto found ;
}
2007-08-27 16:49:44 -04:00
if ( ! rb_node ) {
em = NULL ;
goto out ;
}
if ( IS_ERR ( rb_node ) ) {
em = ERR_PTR ( PTR_ERR ( rb_node ) ) ;
goto out ;
}
em = rb_entry ( rb_node , struct extent_map , rb_node ) ;
2008-01-24 16:13:08 -05:00
if ( end > em - > start & & start < extent_map_end ( em ) )
goto found ;
em = NULL ;
goto out ;
2008-01-22 16:47:59 -05:00
found :
2007-08-27 16:49:44 -04:00
atomic_inc ( & em - > refs ) ;
out :
return em ;
}
EXPORT_SYMBOL ( lookup_extent_mapping ) ;
2008-06-11 21:52:17 -04:00
/**
* remove_extent_mapping - removes an extent_map from the extent tree
* @ tree : extent tree to remove from
* @ em : extent map beeing removed
*
* Removes @ em from @ tree . No reference counts are dropped , and no checks
* are done to see if the range is in use
2007-08-27 16:49:44 -04:00
*/
int remove_extent_mapping ( struct extent_map_tree * tree , struct extent_map * em )
{
2008-01-24 16:13:08 -05:00
int ret = 0 ;
2007-08-27 16:49:44 -04:00
2008-07-18 12:01:11 -04:00
WARN_ON ( test_bit ( EXTENT_FLAG_PINNED , & em - > flags ) ) ;
2008-07-24 10:09:43 -04:00
assert_spin_locked ( & tree - > lock ) ;
2008-01-24 16:13:08 -05:00
rb_erase ( & em - > rb_node , & tree - > map ) ;
em - > in_tree = 0 ;
2007-08-27 16:49:44 -04:00
return ret ;
}
EXPORT_SYMBOL ( remove_extent_mapping ) ;