2005-04-17 02:20:36 +04:00
/*
* linux / fs / hfsplus / btree . c
*
* Copyright ( C ) 2001
* Brad Boyer ( flar @ allandria . com )
* ( C ) 2003 Ardis Technologies < roman @ ardistech . com >
*
* Handle opening / closing btree
*/
# include <linux/slab.h>
# include <linux/pagemap.h>
2007-05-08 11:24:30 +04:00
# include <linux/log2.h>
2005-04-17 02:20:36 +04:00
# include "hfsplus_fs.h"
# include "hfsplus_raw.h"
/* Get a reference to a B*Tree and do some initial checks */
struct hfs_btree * hfs_btree_open ( struct super_block * sb , u32 id )
{
struct hfs_btree * tree ;
struct hfs_btree_header_rec * head ;
struct address_space * mapping ;
2008-02-07 11:15:40 +03:00
struct inode * inode ;
2005-04-17 02:20:36 +04:00
struct page * page ;
unsigned int size ;
2006-09-27 12:49:37 +04:00
tree = kzalloc ( sizeof ( * tree ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! tree )
return NULL ;
2010-10-01 07:46:52 +04:00
mutex_init ( & tree - > tree_lock ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & tree - > hash_lock ) ;
tree - > sb = sb ;
tree - > cnid = id ;
2008-02-07 11:15:40 +03:00
inode = hfsplus_iget ( sb , id ) ;
if ( IS_ERR ( inode ) )
2005-04-17 02:20:36 +04:00
goto free_tree ;
2008-02-07 11:15:40 +03:00
tree - > inode = inode ;
2005-04-17 02:20:36 +04:00
2010-10-14 17:53:37 +04:00
if ( ! HFSPLUS_I ( tree - > inode ) - > first_blocks ) {
printk ( KERN_ERR
" hfs: invalid btree extent records (0 size). \n " ) ;
goto free_inode ;
}
2005-04-17 02:20:36 +04:00
mapping = tree - > inode - > i_mapping ;
2006-06-23 13:05:08 +04:00
page = read_mapping_page ( mapping , 0 , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( IS_ERR ( page ) )
2010-10-14 17:53:37 +04:00
goto free_inode ;
2005-04-17 02:20:36 +04:00
/* Load the header */
2010-12-16 19:08:38 +03:00
head = ( struct hfs_btree_header_rec * ) ( kmap ( page ) +
sizeof ( struct hfs_bnode_desc ) ) ;
2005-04-17 02:20:36 +04:00
tree - > root = be32_to_cpu ( head - > root ) ;
tree - > leaf_count = be32_to_cpu ( head - > leaf_count ) ;
tree - > leaf_head = be32_to_cpu ( head - > leaf_head ) ;
tree - > leaf_tail = be32_to_cpu ( head - > leaf_tail ) ;
tree - > node_count = be32_to_cpu ( head - > node_count ) ;
tree - > free_nodes = be32_to_cpu ( head - > free_nodes ) ;
tree - > attributes = be32_to_cpu ( head - > attributes ) ;
tree - > node_size = be16_to_cpu ( head - > node_size ) ;
tree - > max_key_len = be16_to_cpu ( head - > max_key_len ) ;
tree - > depth = be16_to_cpu ( head - > depth ) ;
2010-10-14 17:53:48 +04:00
/* Verify the tree and set the correct compare function */
switch ( id ) {
case HFSPLUS_EXT_CNID :
if ( tree - > max_key_len ! = HFSPLUS_EXT_KEYLEN - sizeof ( u16 ) ) {
printk ( KERN_ERR " hfs: invalid extent max_key_len %d \n " ,
tree - > max_key_len ) ;
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( tree - > attributes & HFS_TREE_VARIDXKEYS ) {
printk ( KERN_ERR " hfs: invalid extent btree flag \n " ) ;
goto fail_page ;
}
2006-01-19 04:43:08 +03:00
tree - > keycmp = hfsplus_ext_cmp_key ;
2010-10-14 17:53:48 +04:00
break ;
case HFSPLUS_CAT_CNID :
if ( tree - > max_key_len ! = HFSPLUS_CAT_KEYLEN - sizeof ( u16 ) ) {
printk ( KERN_ERR " hfs: invalid catalog max_key_len %d \n " ,
tree - > max_key_len ) ;
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( ! ( tree - > attributes & HFS_TREE_VARIDXKEYS ) ) {
printk ( KERN_ERR " hfs: invalid catalog btree flag \n " ) ;
goto fail_page ;
}
2010-10-14 17:53:48 +04:00
2010-10-01 07:45:20 +04:00
if ( test_bit ( HFSPLUS_SB_HFSX , & HFSPLUS_SB ( sb ) - > flags ) & &
2006-01-19 04:43:08 +03:00
( head - > key_type = = HFSPLUS_KEY_BINARY ) )
tree - > keycmp = hfsplus_cat_bin_cmp_key ;
2007-07-16 10:41:23 +04:00
else {
2006-01-19 04:43:08 +03:00
tree - > keycmp = hfsplus_cat_case_cmp_key ;
2010-10-01 07:45:20 +04:00
set_bit ( HFSPLUS_SB_CASEFOLD , & HFSPLUS_SB ( sb ) - > flags ) ;
2007-07-16 10:41:23 +04:00
}
2010-10-14 17:53:48 +04:00
break ;
2013-02-28 05:03:04 +04:00
case HFSPLUS_ATTR_CNID :
if ( tree - > max_key_len ! = HFSPLUS_ATTR_KEYLEN - sizeof ( u16 ) ) {
printk ( KERN_ERR " hfs: invalid attributes max_key_len %d \n " ,
tree - > max_key_len ) ;
goto fail_page ;
}
tree - > keycmp = hfsplus_attr_bin_cmp_key ;
break ;
2010-10-14 17:53:48 +04:00
default :
2006-01-19 04:43:08 +03:00
printk ( KERN_ERR " hfs: unknown B*Tree requested \n " ) ;
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( ! ( tree - > attributes & HFS_TREE_BIGKEYS ) ) {
printk ( KERN_ERR " hfs: invalid btree flag \n " ) ;
goto fail_page ;
}
2005-04-17 02:20:36 +04:00
size = tree - > node_size ;
2007-05-08 11:24:30 +04:00
if ( ! is_power_of_2 ( size ) )
2005-04-17 02:20:36 +04:00
goto fail_page ;
if ( ! tree - > node_count )
goto fail_page ;
2010-10-14 17:53:48 +04:00
2005-04-17 02:20:36 +04:00
tree - > node_size_shift = ffs ( size ) - 1 ;
2010-12-16 19:08:38 +03:00
tree - > pages_per_bnode =
( tree - > node_size + PAGE_CACHE_SIZE - 1 ) > >
PAGE_CACHE_SHIFT ;
2005-04-17 02:20:36 +04:00
kunmap ( page ) ;
page_cache_release ( page ) ;
return tree ;
fail_page :
page_cache_release ( page ) ;
2010-10-14 17:53:37 +04:00
free_inode :
2010-10-14 17:53:48 +04:00
tree - > inode - > i_mapping - > a_ops = & hfsplus_aops ;
2005-04-17 02:20:36 +04:00
iput ( tree - > inode ) ;
2010-10-14 17:53:37 +04:00
free_tree :
2005-04-17 02:20:36 +04:00
kfree ( tree ) ;
return NULL ;
}
/* Release resources used by a btree */
void hfs_btree_close ( struct hfs_btree * tree )
{
struct hfs_bnode * node ;
int i ;
if ( ! tree )
return ;
for ( i = 0 ; i < NODE_HASH_SIZE ; i + + ) {
while ( ( node = tree - > node_hash [ i ] ) ) {
tree - > node_hash [ i ] = node - > next_hash ;
if ( atomic_read ( & node - > refcnt ) )
2010-12-16 19:08:38 +03:00
printk ( KERN_CRIT " hfs: node %d:%d "
" still has %d user(s)! \n " ,
node - > tree - > cnid , node - > this ,
atomic_read ( & node - > refcnt ) ) ;
2005-04-17 02:20:36 +04:00
hfs_bnode_free ( node ) ;
tree - > node_hash_cnt - - ;
}
}
iput ( tree - > inode ) ;
kfree ( tree ) ;
}
2012-12-21 03:05:28 +04:00
int hfs_btree_write ( struct hfs_btree * tree )
2005-04-17 02:20:36 +04:00
{
struct hfs_btree_header_rec * head ;
struct hfs_bnode * node ;
struct page * page ;
node = hfs_bnode_find ( tree , 0 ) ;
if ( IS_ERR ( node ) )
/* panic? */
2012-12-21 03:05:28 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
/* Load the header */
page = node - > page [ 0 ] ;
2010-12-16 19:08:38 +03:00
head = ( struct hfs_btree_header_rec * ) ( kmap ( page ) +
sizeof ( struct hfs_bnode_desc ) ) ;
2005-04-17 02:20:36 +04:00
head - > root = cpu_to_be32 ( tree - > root ) ;
head - > leaf_count = cpu_to_be32 ( tree - > leaf_count ) ;
head - > leaf_head = cpu_to_be32 ( tree - > leaf_head ) ;
head - > leaf_tail = cpu_to_be32 ( tree - > leaf_tail ) ;
head - > node_count = cpu_to_be32 ( tree - > node_count ) ;
head - > free_nodes = cpu_to_be32 ( tree - > free_nodes ) ;
head - > attributes = cpu_to_be32 ( tree - > attributes ) ;
head - > depth = cpu_to_be16 ( tree - > depth ) ;
kunmap ( page ) ;
set_page_dirty ( page ) ;
hfs_bnode_put ( node ) ;
2012-12-21 03:05:28 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static struct hfs_bnode * hfs_bmap_new_bmap ( struct hfs_bnode * prev , u32 idx )
{
struct hfs_btree * tree = prev - > tree ;
struct hfs_bnode * node ;
struct hfs_bnode_desc desc ;
__be32 cnid ;
node = hfs_bnode_create ( tree , idx ) ;
if ( IS_ERR ( node ) )
return node ;
tree - > free_nodes - - ;
prev - > next = idx ;
cnid = cpu_to_be32 ( idx ) ;
hfs_bnode_write ( prev , & cnid , offsetof ( struct hfs_bnode_desc , next ) , 4 ) ;
node - > type = HFS_NODE_MAP ;
node - > num_recs = 1 ;
hfs_bnode_clear ( node , 0 , tree - > node_size ) ;
desc . next = 0 ;
desc . prev = 0 ;
desc . type = HFS_NODE_MAP ;
desc . height = 0 ;
desc . num_recs = cpu_to_be16 ( 1 ) ;
desc . reserved = 0 ;
hfs_bnode_write ( node , & desc , 0 , sizeof ( desc ) ) ;
hfs_bnode_write_u16 ( node , 14 , 0x8000 ) ;
hfs_bnode_write_u16 ( node , tree - > node_size - 2 , 14 ) ;
hfs_bnode_write_u16 ( node , tree - > node_size - 4 , tree - > node_size - 6 ) ;
return node ;
}
struct hfs_bnode * hfs_bmap_alloc ( struct hfs_btree * tree )
{
struct hfs_bnode * node , * next_node ;
struct page * * pagep ;
u32 nidx , idx ;
2008-04-30 11:54:54 +04:00
unsigned off ;
u16 off16 ;
u16 len ;
2005-04-17 02:20:36 +04:00
u8 * data , byte , m ;
int i ;
while ( ! tree - > free_nodes ) {
struct inode * inode = tree - > inode ;
2010-10-01 07:43:31 +04:00
struct hfsplus_inode_info * hip = HFSPLUS_I ( inode ) ;
2005-04-17 02:20:36 +04:00
u32 count ;
int res ;
res = hfsplus_file_extend ( inode ) ;
if ( res )
return ERR_PTR ( res ) ;
2010-10-01 07:43:31 +04:00
hip - > phys_size = inode - > i_size =
( loff_t ) hip - > alloc_blocks < <
2010-10-01 07:42:59 +04:00
HFSPLUS_SB ( tree - > sb ) - > alloc_blksz_shift ;
2010-10-01 07:43:31 +04:00
hip - > fs_blocks =
hip - > alloc_blocks < < HFSPLUS_SB ( tree - > sb ) - > fs_shift ;
2005-04-17 02:20:36 +04:00
inode_set_bytes ( inode , inode - > i_size ) ;
count = inode - > i_size > > tree - > node_size_shift ;
tree - > free_nodes = count - tree - > node_count ;
tree - > node_count = count ;
}
nidx = 0 ;
node = hfs_bnode_find ( tree , nidx ) ;
if ( IS_ERR ( node ) )
return node ;
2008-04-30 11:54:54 +04:00
len = hfs_brec_lenoff ( node , 2 , & off16 ) ;
off = off16 ;
2005-04-17 02:20:36 +04:00
off + = node - > page_offset ;
pagep = node - > page + ( off > > PAGE_CACHE_SHIFT ) ;
data = kmap ( * pagep ) ;
off & = ~ PAGE_CACHE_MASK ;
idx = 0 ;
for ( ; ; ) {
while ( len ) {
byte = data [ off ] ;
if ( byte ! = 0xff ) {
for ( m = 0x80 , i = 0 ; i < 8 ; m > > = 1 , i + + ) {
if ( ! ( byte & m ) ) {
idx + = i ;
data [ off ] | = m ;
set_page_dirty ( * pagep ) ;
kunmap ( * pagep ) ;
tree - > free_nodes - - ;
mark_inode_dirty ( tree - > inode ) ;
hfs_bnode_put ( node ) ;
2010-12-16 19:08:38 +03:00
return hfs_bnode_create ( tree ,
idx ) ;
2005-04-17 02:20:36 +04:00
}
}
}
if ( + + off > = PAGE_CACHE_SIZE ) {
kunmap ( * pagep ) ;
data = kmap ( * + + pagep ) ;
off = 0 ;
}
idx + = 8 ;
len - - ;
}
kunmap ( * pagep ) ;
nidx = node - > next ;
if ( ! nidx ) {
2013-05-01 02:27:54 +04:00
hfs_dbg ( BNODE_MOD , " create new bmap node \n " ) ;
2005-04-17 02:20:36 +04:00
next_node = hfs_bmap_new_bmap ( node , idx ) ;
} else
next_node = hfs_bnode_find ( tree , nidx ) ;
hfs_bnode_put ( node ) ;
if ( IS_ERR ( next_node ) )
return next_node ;
node = next_node ;
2008-04-30 11:54:54 +04:00
len = hfs_brec_lenoff ( node , 0 , & off16 ) ;
off = off16 ;
2005-04-17 02:20:36 +04:00
off + = node - > page_offset ;
pagep = node - > page + ( off > > PAGE_CACHE_SHIFT ) ;
data = kmap ( * pagep ) ;
off & = ~ PAGE_CACHE_MASK ;
}
}
void hfs_bmap_free ( struct hfs_bnode * node )
{
struct hfs_btree * tree ;
struct page * page ;
u16 off , len ;
u32 nidx ;
u8 * data , byte , m ;
2013-05-01 02:27:54 +04:00
hfs_dbg ( BNODE_MOD , " btree_free_node: %u \n " , node - > this ) ;
2006-04-01 03:14:43 +04:00
BUG_ON ( ! node - > this ) ;
2005-04-17 02:20:36 +04:00
tree = node - > tree ;
nidx = node - > this ;
node = hfs_bnode_find ( tree , 0 ) ;
if ( IS_ERR ( node ) )
return ;
len = hfs_brec_lenoff ( node , 2 , & off ) ;
while ( nidx > = len * 8 ) {
u32 i ;
nidx - = len * 8 ;
i = node - > next ;
hfs_bnode_put ( node ) ;
if ( ! i ) {
/* panic */ ;
2010-12-16 19:08:38 +03:00
printk ( KERN_CRIT " hfs: unable to free bnode %u. "
" bmap not found! \n " ,
node - > this ) ;
2005-04-17 02:20:36 +04:00
return ;
}
node = hfs_bnode_find ( tree , i ) ;
if ( IS_ERR ( node ) )
return ;
if ( node - > type ! = HFS_NODE_MAP ) {
/* panic */ ;
2010-12-16 19:08:38 +03:00
printk ( KERN_CRIT " hfs: invalid bmap found! "
" (%u,%d) \n " ,
node - > this , node - > type ) ;
2005-04-17 02:20:36 +04:00
hfs_bnode_put ( node ) ;
return ;
}
len = hfs_brec_lenoff ( node , 0 , & off ) ;
}
off + = node - > page_offset + nidx / 8 ;
page = node - > page [ off > > PAGE_CACHE_SHIFT ] ;
data = kmap ( page ) ;
off & = ~ PAGE_CACHE_MASK ;
m = 1 < < ( ~ nidx & 7 ) ;
byte = data [ off ] ;
if ( ! ( byte & m ) ) {
2010-12-16 19:08:38 +03:00
printk ( KERN_CRIT " hfs: trying to free free bnode "
" %u(%d) \n " ,
node - > this , node - > type ) ;
2005-04-17 02:20:36 +04:00
kunmap ( page ) ;
hfs_bnode_put ( node ) ;
return ;
}
data [ off ] = byte & ~ m ;
set_page_dirty ( page ) ;
kunmap ( page ) ;
hfs_bnode_put ( node ) ;
tree - > free_nodes + + ;
mark_inode_dirty ( tree - > inode ) ;
}