2005-04-17 02:20:36 +04:00
/*
* linux / fs / hfsplus / btree . c
*
* Copyright ( C ) 2001
* Brad Boyer ( flar @ allandria . com )
* ( C ) 2003 Ardis Technologies < roman @ ardistech . com >
*
* Handle opening / closing btree
*/
# include <linux/slab.h>
# include <linux/pagemap.h>
2007-05-08 11:24:30 +04:00
# include <linux/log2.h>
2005-04-17 02:20:36 +04:00
# include "hfsplus_fs.h"
# include "hfsplus_raw.h"
2013-11-13 03:11:07 +04:00
/*
* Initial source code of clump size calculation is gotten
* from http : //opensource.apple.com/tarballs/diskdev_cmds/
*/
# define CLUMP_ENTRIES 15
static short clumptbl [ CLUMP_ENTRIES * 3 ] = {
/*
* Volume Attributes Catalog Extents
* Size Clump ( MB ) Clump ( MB ) Clump ( MB )
*/
/* 1GB */ 4 , 4 , 4 ,
/* 2GB */ 6 , 6 , 4 ,
/* 4GB */ 8 , 8 , 4 ,
/* 8GB */ 11 , 11 , 5 ,
/*
* For volumes 16 GB and larger , we want to make sure that a full OS
* install won ' t require fragmentation of the Catalog or Attributes
* B - trees . We do this by making the clump sizes sufficiently large ,
* and by leaving a gap after the B - trees for them to grow into .
*
* For SnowLeopard 10 A298 , a FullNetInstall with all packages selected
* results in :
* Catalog B - tree Header
* nodeSize : 8192
* totalNodes : 31616
* freeNodes : 1978
* ( used = 231.55 MB )
* Attributes B - tree Header
* nodeSize : 8192
* totalNodes : 63232
* freeNodes : 958
* ( used = 486.52 MB )
*
* We also want Time Machine backup volumes to have a sufficiently
* large clump size to reduce fragmentation .
*
* The series of numbers for Catalog and Attribute form a geometric
* series . For Catalog ( 16 GB to 512 GB ) , each term is 8 * * ( 1 / 5 ) times
* the previous term . For Attributes ( 16 GB to 512 GB ) , each term is
* 4 * * ( 1 / 5 ) times the previous term . For 1 TB to 16 TB , each term is
* 2 * * ( 1 / 5 ) times the previous term .
*/
/* 16GB */ 64 , 32 , 5 ,
/* 32GB */ 84 , 49 , 6 ,
/* 64GB */ 111 , 74 , 7 ,
/* 128GB */ 147 , 111 , 8 ,
/* 256GB */ 194 , 169 , 9 ,
/* 512GB */ 256 , 256 , 11 ,
/* 1TB */ 294 , 294 , 14 ,
/* 2TB */ 338 , 338 , 16 ,
/* 4TB */ 388 , 388 , 20 ,
/* 8TB */ 446 , 446 , 25 ,
/* 16TB */ 512 , 512 , 32
} ;
u32 hfsplus_calc_btree_clump_size ( u32 block_size , u32 node_size ,
u64 sectors , int file_id )
{
u32 mod = max ( node_size , block_size ) ;
u32 clump_size ;
int column ;
int i ;
/* Figure out which column of the above table to use for this file. */
switch ( file_id ) {
case HFSPLUS_ATTR_CNID :
column = 0 ;
break ;
case HFSPLUS_CAT_CNID :
column = 1 ;
break ;
default :
column = 2 ;
break ;
}
/*
* The default clump size is 0.8 % of the volume size . And
* it must also be a multiple of the node and block size .
*/
if ( sectors < 0x200000 ) {
clump_size = sectors < < 2 ; /* 0.8 % */
if ( clump_size < ( 8 * node_size ) )
clump_size = 8 * node_size ;
} else {
/* turn exponent into table index... */
for ( i = 0 , sectors = sectors > > 22 ;
sectors & & ( i < CLUMP_ENTRIES - 1 ) ;
+ + i , sectors = sectors > > 1 ) {
/* empty body */
}
clump_size = clumptbl [ column + ( i ) * 3 ] * 1024 * 1024 ;
}
/*
* Round the clump size to a multiple of node and block size .
* NOTE : This rounds down .
*/
clump_size / = mod ;
clump_size * = mod ;
/*
* Rounding down could have rounded down to 0 if the block size was
* greater than the clump size . If so , just use one block or node .
*/
if ( clump_size = = 0 )
clump_size = mod ;
return clump_size ;
}
2005-04-17 02:20:36 +04:00
/* Get a reference to a B*Tree and do some initial checks */
struct hfs_btree * hfs_btree_open ( struct super_block * sb , u32 id )
{
struct hfs_btree * tree ;
struct hfs_btree_header_rec * head ;
struct address_space * mapping ;
2008-02-07 11:15:40 +03:00
struct inode * inode ;
2005-04-17 02:20:36 +04:00
struct page * page ;
unsigned int size ;
2006-09-27 12:49:37 +04:00
tree = kzalloc ( sizeof ( * tree ) , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! tree )
return NULL ;
2010-10-01 07:46:52 +04:00
mutex_init ( & tree - > tree_lock ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & tree - > hash_lock ) ;
tree - > sb = sb ;
tree - > cnid = id ;
2008-02-07 11:15:40 +03:00
inode = hfsplus_iget ( sb , id ) ;
if ( IS_ERR ( inode ) )
2005-04-17 02:20:36 +04:00
goto free_tree ;
2008-02-07 11:15:40 +03:00
tree - > inode = inode ;
2005-04-17 02:20:36 +04:00
2010-10-14 17:53:37 +04:00
if ( ! HFSPLUS_I ( tree - > inode ) - > first_blocks ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid btree extent records (0 size) \n " ) ;
2010-10-14 17:53:37 +04:00
goto free_inode ;
}
2005-04-17 02:20:36 +04:00
mapping = tree - > inode - > i_mapping ;
2006-06-23 13:05:08 +04:00
page = read_mapping_page ( mapping , 0 , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( IS_ERR ( page ) )
2010-10-14 17:53:37 +04:00
goto free_inode ;
2005-04-17 02:20:36 +04:00
/* Load the header */
2010-12-16 19:08:38 +03:00
head = ( struct hfs_btree_header_rec * ) ( kmap ( page ) +
sizeof ( struct hfs_bnode_desc ) ) ;
2005-04-17 02:20:36 +04:00
tree - > root = be32_to_cpu ( head - > root ) ;
tree - > leaf_count = be32_to_cpu ( head - > leaf_count ) ;
tree - > leaf_head = be32_to_cpu ( head - > leaf_head ) ;
tree - > leaf_tail = be32_to_cpu ( head - > leaf_tail ) ;
tree - > node_count = be32_to_cpu ( head - > node_count ) ;
tree - > free_nodes = be32_to_cpu ( head - > free_nodes ) ;
tree - > attributes = be32_to_cpu ( head - > attributes ) ;
tree - > node_size = be16_to_cpu ( head - > node_size ) ;
tree - > max_key_len = be16_to_cpu ( head - > max_key_len ) ;
tree - > depth = be16_to_cpu ( head - > depth ) ;
2010-10-14 17:53:48 +04:00
/* Verify the tree and set the correct compare function */
switch ( id ) {
case HFSPLUS_EXT_CNID :
if ( tree - > max_key_len ! = HFSPLUS_EXT_KEYLEN - sizeof ( u16 ) ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid extent max_key_len %d \n " ,
2010-10-14 17:53:48 +04:00
tree - > max_key_len ) ;
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( tree - > attributes & HFS_TREE_VARIDXKEYS ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid extent btree flag \n " ) ;
2010-10-14 17:54:23 +04:00
goto fail_page ;
}
2006-01-19 04:43:08 +03:00
tree - > keycmp = hfsplus_ext_cmp_key ;
2010-10-14 17:53:48 +04:00
break ;
case HFSPLUS_CAT_CNID :
if ( tree - > max_key_len ! = HFSPLUS_CAT_KEYLEN - sizeof ( u16 ) ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid catalog max_key_len %d \n " ,
2010-10-14 17:53:48 +04:00
tree - > max_key_len ) ;
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( ! ( tree - > attributes & HFS_TREE_VARIDXKEYS ) ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid catalog btree flag \n " ) ;
2010-10-14 17:54:23 +04:00
goto fail_page ;
}
2010-10-14 17:53:48 +04:00
2010-10-01 07:45:20 +04:00
if ( test_bit ( HFSPLUS_SB_HFSX , & HFSPLUS_SB ( sb ) - > flags ) & &
2006-01-19 04:43:08 +03:00
( head - > key_type = = HFSPLUS_KEY_BINARY ) )
tree - > keycmp = hfsplus_cat_bin_cmp_key ;
2007-07-16 10:41:23 +04:00
else {
2006-01-19 04:43:08 +03:00
tree - > keycmp = hfsplus_cat_case_cmp_key ;
2010-10-01 07:45:20 +04:00
set_bit ( HFSPLUS_SB_CASEFOLD , & HFSPLUS_SB ( sb ) - > flags ) ;
2007-07-16 10:41:23 +04:00
}
2010-10-14 17:53:48 +04:00
break ;
2013-02-28 05:03:04 +04:00
case HFSPLUS_ATTR_CNID :
if ( tree - > max_key_len ! = HFSPLUS_ATTR_KEYLEN - sizeof ( u16 ) ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid attributes max_key_len %d \n " ,
2013-02-28 05:03:04 +04:00
tree - > max_key_len ) ;
goto fail_page ;
}
tree - > keycmp = hfsplus_attr_bin_cmp_key ;
break ;
2010-10-14 17:53:48 +04:00
default :
2013-05-01 02:27:55 +04:00
pr_err ( " unknown B*Tree requested \n " ) ;
2006-01-19 04:43:08 +03:00
goto fail_page ;
}
2010-10-14 17:54:23 +04:00
if ( ! ( tree - > attributes & HFS_TREE_BIGKEYS ) ) {
2013-05-01 02:27:55 +04:00
pr_err ( " invalid btree flag \n " ) ;
2010-10-14 17:54:23 +04:00
goto fail_page ;
}
2005-04-17 02:20:36 +04:00
size = tree - > node_size ;
2007-05-08 11:24:30 +04:00
if ( ! is_power_of_2 ( size ) )
2005-04-17 02:20:36 +04:00
goto fail_page ;
if ( ! tree - > node_count )
goto fail_page ;
2010-10-14 17:53:48 +04:00
2005-04-17 02:20:36 +04:00
tree - > node_size_shift = ffs ( size ) - 1 ;
2010-12-16 19:08:38 +03:00
tree - > pages_per_bnode =
( tree - > node_size + PAGE_CACHE_SIZE - 1 ) > >
PAGE_CACHE_SHIFT ;
2005-04-17 02:20:36 +04:00
kunmap ( page ) ;
page_cache_release ( page ) ;
return tree ;
fail_page :
page_cache_release ( page ) ;
2010-10-14 17:53:37 +04:00
free_inode :
2010-10-14 17:53:48 +04:00
tree - > inode - > i_mapping - > a_ops = & hfsplus_aops ;
2005-04-17 02:20:36 +04:00
iput ( tree - > inode ) ;
2010-10-14 17:53:37 +04:00
free_tree :
2005-04-17 02:20:36 +04:00
kfree ( tree ) ;
return NULL ;
}
/* Release resources used by a btree */
void hfs_btree_close ( struct hfs_btree * tree )
{
struct hfs_bnode * node ;
int i ;
if ( ! tree )
return ;
for ( i = 0 ; i < NODE_HASH_SIZE ; i + + ) {
while ( ( node = tree - > node_hash [ i ] ) ) {
tree - > node_hash [ i ] = node - > next_hash ;
if ( atomic_read ( & node - > refcnt ) )
2013-05-01 02:27:55 +04:00
pr_crit ( " node %d:%d "
2010-12-16 19:08:38 +03:00
" still has %d user(s)! \n " ,
node - > tree - > cnid , node - > this ,
atomic_read ( & node - > refcnt ) ) ;
2005-04-17 02:20:36 +04:00
hfs_bnode_free ( node ) ;
tree - > node_hash_cnt - - ;
}
}
iput ( tree - > inode ) ;
kfree ( tree ) ;
}
2012-12-21 03:05:28 +04:00
int hfs_btree_write ( struct hfs_btree * tree )
2005-04-17 02:20:36 +04:00
{
struct hfs_btree_header_rec * head ;
struct hfs_bnode * node ;
struct page * page ;
node = hfs_bnode_find ( tree , 0 ) ;
if ( IS_ERR ( node ) )
/* panic? */
2012-12-21 03:05:28 +04:00
return - EIO ;
2005-04-17 02:20:36 +04:00
/* Load the header */
page = node - > page [ 0 ] ;
2010-12-16 19:08:38 +03:00
head = ( struct hfs_btree_header_rec * ) ( kmap ( page ) +
sizeof ( struct hfs_bnode_desc ) ) ;
2005-04-17 02:20:36 +04:00
head - > root = cpu_to_be32 ( tree - > root ) ;
head - > leaf_count = cpu_to_be32 ( tree - > leaf_count ) ;
head - > leaf_head = cpu_to_be32 ( tree - > leaf_head ) ;
head - > leaf_tail = cpu_to_be32 ( tree - > leaf_tail ) ;
head - > node_count = cpu_to_be32 ( tree - > node_count ) ;
head - > free_nodes = cpu_to_be32 ( tree - > free_nodes ) ;
head - > attributes = cpu_to_be32 ( tree - > attributes ) ;
head - > depth = cpu_to_be16 ( tree - > depth ) ;
kunmap ( page ) ;
set_page_dirty ( page ) ;
hfs_bnode_put ( node ) ;
2012-12-21 03:05:28 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static struct hfs_bnode * hfs_bmap_new_bmap ( struct hfs_bnode * prev , u32 idx )
{
struct hfs_btree * tree = prev - > tree ;
struct hfs_bnode * node ;
struct hfs_bnode_desc desc ;
__be32 cnid ;
node = hfs_bnode_create ( tree , idx ) ;
if ( IS_ERR ( node ) )
return node ;
tree - > free_nodes - - ;
prev - > next = idx ;
cnid = cpu_to_be32 ( idx ) ;
hfs_bnode_write ( prev , & cnid , offsetof ( struct hfs_bnode_desc , next ) , 4 ) ;
node - > type = HFS_NODE_MAP ;
node - > num_recs = 1 ;
hfs_bnode_clear ( node , 0 , tree - > node_size ) ;
desc . next = 0 ;
desc . prev = 0 ;
desc . type = HFS_NODE_MAP ;
desc . height = 0 ;
desc . num_recs = cpu_to_be16 ( 1 ) ;
desc . reserved = 0 ;
hfs_bnode_write ( node , & desc , 0 , sizeof ( desc ) ) ;
hfs_bnode_write_u16 ( node , 14 , 0x8000 ) ;
hfs_bnode_write_u16 ( node , tree - > node_size - 2 , 14 ) ;
hfs_bnode_write_u16 ( node , tree - > node_size - 4 , tree - > node_size - 6 ) ;
return node ;
}
struct hfs_bnode * hfs_bmap_alloc ( struct hfs_btree * tree )
{
struct hfs_bnode * node , * next_node ;
struct page * * pagep ;
u32 nidx , idx ;
2008-04-30 11:54:54 +04:00
unsigned off ;
u16 off16 ;
u16 len ;
2005-04-17 02:20:36 +04:00
u8 * data , byte , m ;
int i ;
while ( ! tree - > free_nodes ) {
struct inode * inode = tree - > inode ;
2010-10-01 07:43:31 +04:00
struct hfsplus_inode_info * hip = HFSPLUS_I ( inode ) ;
2005-04-17 02:20:36 +04:00
u32 count ;
int res ;
2014-06-07 01:36:28 +04:00
res = hfsplus_file_extend ( inode , hfs_bnode_need_zeroout ( tree ) ) ;
2005-04-17 02:20:36 +04:00
if ( res )
return ERR_PTR ( res ) ;
2010-10-01 07:43:31 +04:00
hip - > phys_size = inode - > i_size =
( loff_t ) hip - > alloc_blocks < <
2010-10-01 07:42:59 +04:00
HFSPLUS_SB ( tree - > sb ) - > alloc_blksz_shift ;
2010-10-01 07:43:31 +04:00
hip - > fs_blocks =
hip - > alloc_blocks < < HFSPLUS_SB ( tree - > sb ) - > fs_shift ;
2005-04-17 02:20:36 +04:00
inode_set_bytes ( inode , inode - > i_size ) ;
count = inode - > i_size > > tree - > node_size_shift ;
tree - > free_nodes = count - tree - > node_count ;
tree - > node_count = count ;
}
nidx = 0 ;
node = hfs_bnode_find ( tree , nidx ) ;
if ( IS_ERR ( node ) )
return node ;
2008-04-30 11:54:54 +04:00
len = hfs_brec_lenoff ( node , 2 , & off16 ) ;
off = off16 ;
2005-04-17 02:20:36 +04:00
off + = node - > page_offset ;
pagep = node - > page + ( off > > PAGE_CACHE_SHIFT ) ;
data = kmap ( * pagep ) ;
off & = ~ PAGE_CACHE_MASK ;
idx = 0 ;
for ( ; ; ) {
while ( len ) {
byte = data [ off ] ;
if ( byte ! = 0xff ) {
for ( m = 0x80 , i = 0 ; i < 8 ; m > > = 1 , i + + ) {
if ( ! ( byte & m ) ) {
idx + = i ;
data [ off ] | = m ;
set_page_dirty ( * pagep ) ;
kunmap ( * pagep ) ;
tree - > free_nodes - - ;
mark_inode_dirty ( tree - > inode ) ;
hfs_bnode_put ( node ) ;
2010-12-16 19:08:38 +03:00
return hfs_bnode_create ( tree ,
idx ) ;
2005-04-17 02:20:36 +04:00
}
}
}
if ( + + off > = PAGE_CACHE_SIZE ) {
kunmap ( * pagep ) ;
data = kmap ( * + + pagep ) ;
off = 0 ;
}
idx + = 8 ;
len - - ;
}
kunmap ( * pagep ) ;
nidx = node - > next ;
if ( ! nidx ) {
2013-05-01 02:27:54 +04:00
hfs_dbg ( BNODE_MOD , " create new bmap node \n " ) ;
2005-04-17 02:20:36 +04:00
next_node = hfs_bmap_new_bmap ( node , idx ) ;
} else
next_node = hfs_bnode_find ( tree , nidx ) ;
hfs_bnode_put ( node ) ;
if ( IS_ERR ( next_node ) )
return next_node ;
node = next_node ;
2008-04-30 11:54:54 +04:00
len = hfs_brec_lenoff ( node , 0 , & off16 ) ;
off = off16 ;
2005-04-17 02:20:36 +04:00
off + = node - > page_offset ;
pagep = node - > page + ( off > > PAGE_CACHE_SHIFT ) ;
data = kmap ( * pagep ) ;
off & = ~ PAGE_CACHE_MASK ;
}
}
void hfs_bmap_free ( struct hfs_bnode * node )
{
struct hfs_btree * tree ;
struct page * page ;
u16 off , len ;
u32 nidx ;
u8 * data , byte , m ;
2013-05-01 02:27:54 +04:00
hfs_dbg ( BNODE_MOD , " btree_free_node: %u \n " , node - > this ) ;
2006-04-01 03:14:43 +04:00
BUG_ON ( ! node - > this ) ;
2005-04-17 02:20:36 +04:00
tree = node - > tree ;
nidx = node - > this ;
node = hfs_bnode_find ( tree , 0 ) ;
if ( IS_ERR ( node ) )
return ;
len = hfs_brec_lenoff ( node , 2 , & off ) ;
while ( nidx > = len * 8 ) {
u32 i ;
nidx - = len * 8 ;
i = node - > next ;
hfs_bnode_put ( node ) ;
if ( ! i ) {
/* panic */ ;
2013-05-01 02:27:55 +04:00
pr_crit ( " unable to free bnode %u. "
2010-12-16 19:08:38 +03:00
" bmap not found! \n " ,
node - > this ) ;
2005-04-17 02:20:36 +04:00
return ;
}
node = hfs_bnode_find ( tree , i ) ;
if ( IS_ERR ( node ) )
return ;
if ( node - > type ! = HFS_NODE_MAP ) {
/* panic */ ;
2013-05-01 02:27:55 +04:00
pr_crit ( " invalid bmap found! "
2010-12-16 19:08:38 +03:00
" (%u,%d) \n " ,
node - > this , node - > type ) ;
2005-04-17 02:20:36 +04:00
hfs_bnode_put ( node ) ;
return ;
}
len = hfs_brec_lenoff ( node , 0 , & off ) ;
}
off + = node - > page_offset + nidx / 8 ;
page = node - > page [ off > > PAGE_CACHE_SHIFT ] ;
data = kmap ( page ) ;
off & = ~ PAGE_CACHE_MASK ;
m = 1 < < ( ~ nidx & 7 ) ;
byte = data [ off ] ;
if ( ! ( byte & m ) ) {
2013-05-01 02:27:55 +04:00
pr_crit ( " trying to free free bnode "
2010-12-16 19:08:38 +03:00
" %u(%d) \n " ,
node - > this , node - > type ) ;
2005-04-17 02:20:36 +04:00
kunmap ( page ) ;
hfs_bnode_put ( node ) ;
return ;
}
data [ off ] = byte & ~ m ;
set_page_dirty ( page ) ;
kunmap ( page ) ;
hfs_bnode_put ( node ) ;
tree - > free_nodes + + ;
mark_inode_dirty ( tree - > inode ) ;
}