2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2005 Silicon Graphics , Inc .
2013-04-24 12:58:02 +04:00
* Copyright ( c ) 2013 Red Hat , Inc .
2005-11-02 06:58:39 +03:00
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*
2005-11-02 06:58:39 +03:00
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License as
2005-04-17 02:20:36 +04:00
* published by the Free Software Foundation .
*
2005-11-02 06:58:39 +03:00
* This program is distributed in the hope that it would be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
2005-04-17 02:20:36 +04:00
*
2005-11-02 06:58:39 +03:00
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write the Free Software Foundation ,
* Inc . , 51 Franklin St , Fifth Floor , Boston , MA 02110 - 1301 USA
2005-04-17 02:20:36 +04:00
*/
# include "xfs.h"
2005-11-02 06:38:42 +03:00
# include "xfs_fs.h"
2013-10-23 03:36:05 +04:00
# include "xfs_shared.h"
2013-10-23 03:50:10 +04:00
# include "xfs_format.h"
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-11-02 06:38:42 +03:00
# include "xfs_bit.h"
2005-04-17 02:20:36 +04:00
# include "xfs_sb.h"
# include "xfs_ag.h"
# include "xfs_mount.h"
2013-10-15 02:17:51 +04:00
# include "xfs_da_format.h"
2005-11-02 06:38:42 +03:00
# include "xfs_da_btree.h"
2013-08-12 14:49:37 +04:00
# include "xfs_dir2.h"
2011-07-13 15:43:48 +04:00
# include "xfs_dir2_priv.h"
2005-04-17 02:20:36 +04:00
# include "xfs_inode.h"
2013-10-23 03:50:10 +04:00
# include "xfs_trans.h"
2005-11-02 06:38:42 +03:00
# include "xfs_inode_item.h"
# include "xfs_alloc.h"
2005-04-17 02:20:36 +04:00
# include "xfs_bmap.h"
# include "xfs_attr.h"
# include "xfs_attr_leaf.h"
# include "xfs_error.h"
2009-12-15 02:14:59 +03:00
# include "xfs_trace.h"
2013-04-24 12:58:02 +04:00
# include "xfs_cksum.h"
# include "xfs_buf_item.h"
2005-04-17 02:20:36 +04:00
/*
* xfs_da_btree . c
*
* Routines to implement directories as Btrees of hashed names .
*/
/*========================================================================
* Function prototypes for the kernel .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Routines used for growing the Btree .
*/
2013-04-24 12:58:02 +04:00
STATIC int xfs_da3_root_split ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * existing_root ,
xfs_da_state_blk_t * new_child ) ;
2013-04-24 12:58:02 +04:00
STATIC int xfs_da3_node_split ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * existing_blk ,
xfs_da_state_blk_t * split_blk ,
xfs_da_state_blk_t * blk_to_add ,
int treelevel ,
int * result ) ;
2013-04-24 12:58:02 +04:00
STATIC void xfs_da3_node_rebalance ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * node_blk_1 ,
xfs_da_state_blk_t * node_blk_2 ) ;
2013-04-24 12:58:02 +04:00
STATIC void xfs_da3_node_add ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * old_node_blk ,
xfs_da_state_blk_t * new_node_blk ) ;
/*
* Routines used for shrinking the Btree .
*/
2013-04-24 12:58:02 +04:00
STATIC int xfs_da3_root_join ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * root_blk ) ;
2013-04-24 12:58:02 +04:00
STATIC int xfs_da3_node_toosmall ( xfs_da_state_t * state , int * retval ) ;
STATIC void xfs_da3_node_remove ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * drop_blk ) ;
2013-04-24 12:58:02 +04:00
STATIC void xfs_da3_node_unbalance ( xfs_da_state_t * state ,
2005-04-17 02:20:36 +04:00
xfs_da_state_blk_t * src_node_blk ,
xfs_da_state_blk_t * dst_node_blk ) ;
/*
* Utility routines .
*/
2013-04-24 12:58:02 +04:00
STATIC int xfs_da3_blk_unlink ( xfs_da_state_t * state ,
2005-06-21 09:36:52 +04:00
xfs_da_state_blk_t * drop_blk ,
xfs_da_state_blk_t * save_blk ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
kmem_zone_t * xfs_da_state_zone ; /* anchor for state struct zone */
/*
* Allocate a dir - state structure .
* We don ' t put them on the stack since they ' re large .
*/
xfs_da_state_t *
xfs_da_state_alloc ( void )
{
return kmem_zone_zalloc ( xfs_da_state_zone , KM_NOFS ) ;
}
/*
* Kill the altpath contents of a da - state structure .
*/
STATIC void
xfs_da_state_kill_altpath ( xfs_da_state_t * state )
{
int i ;
for ( i = 0 ; i < state - > altpath . active ; i + + )
state - > altpath . blk [ i ] . bp = NULL ;
state - > altpath . active = 0 ;
}
/*
* Free a da - state structure .
*/
void
xfs_da_state_free ( xfs_da_state_t * state )
{
xfs_da_state_kill_altpath ( state ) ;
# ifdef DEBUG
memset ( ( char * ) state , 0 , sizeof ( * state ) ) ;
# endif /* DEBUG */
kmem_zone_free ( xfs_da_state_zone , state ) ;
}
static bool
xfs_da3_node_verify (
2012-11-12 15:54:17 +04:00
struct xfs_buf * bp )
{
struct xfs_mount * mp = bp - > b_target - > bt_mount ;
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * hdr = bp - > b_addr ;
struct xfs_da3_icnode_hdr ichdr ;
2013-10-29 15:11:52 +04:00
const struct xfs_dir_ops * ops ;
ops = xfs_dir_get_ops ( mp , NULL ) ;
2013-04-24 12:58:02 +04:00
2013-10-29 15:11:52 +04:00
ops - > node_hdr_from_disk ( & ichdr , hdr ) ;
2013-04-24 12:58:02 +04:00
if ( xfs_sb_version_hascrc ( & mp - > m_sb ) ) {
struct xfs_da3_node_hdr * hdr3 = bp - > b_addr ;
if ( ichdr . magic ! = XFS_DA3_NODE_MAGIC )
return false ;
if ( ! uuid_equal ( & hdr3 - > info . uuid , & mp - > m_sb . sb_uuid ) )
return false ;
if ( be64_to_cpu ( hdr3 - > info . blkno ) ! = bp - > b_bn )
return false ;
} else {
if ( ichdr . magic ! = XFS_DA_NODE_MAGIC )
return false ;
2012-11-12 15:54:17 +04:00
}
2013-04-24 12:58:02 +04:00
if ( ichdr . level = = 0 )
return false ;
if ( ichdr . level > XFS_DA_NODE_MAXDEPTH )
return false ;
if ( ichdr . count = = 0 )
return false ;
/*
* we don ' t know if the node is for and attribute or directory tree ,
* so only fail if the count is outside both bounds
*/
if ( ichdr . count > mp - > m_dir_node_ents & &
ichdr . count > mp - > m_attr_node_ents )
return false ;
/* XXX: hash order check? */
2012-11-12 15:54:17 +04:00
2013-04-24 12:58:02 +04:00
return true ;
2012-11-12 15:54:17 +04:00
}
static void
2013-04-24 12:58:02 +04:00
xfs_da3_node_write_verify (
2012-11-14 10:52:32 +04:00
struct xfs_buf * bp )
{
2013-04-24 12:58:02 +04:00
struct xfs_mount * mp = bp - > b_target - > bt_mount ;
struct xfs_buf_log_item * bip = bp - > b_fspriv ;
struct xfs_da3_node_hdr * hdr3 = bp - > b_addr ;
if ( ! xfs_da3_node_verify ( bp ) ) {
xfs_buf_ioerror ( bp , EFSCORRUPTED ) ;
2014-02-27 08:23:10 +04:00
xfs_verifier_error ( bp ) ;
2013-04-24 12:58:02 +04:00
return ;
}
if ( ! xfs_sb_version_hascrc ( & mp - > m_sb ) )
return ;
if ( bip )
hdr3 - > info . lsn = cpu_to_be64 ( bip - > bli_item . li_lsn ) ;
2014-02-27 08:18:23 +04:00
xfs_buf_update_cksum ( bp , XFS_DA3_NODE_CRC_OFF ) ;
2012-11-14 10:52:32 +04:00
}
2012-11-14 10:54:40 +04:00
/*
* leaf / node format detection on trees is sketchy , so a node read can be done on
* leaf level blocks when detection identifies the tree as a node format tree
* incorrectly . In this case , we need to swap the verifier to match the correct
* format of the block being read .
*/
2012-11-14 10:52:32 +04:00
static void
2013-04-24 12:58:02 +04:00
xfs_da3_node_read_verify (
2012-11-12 15:54:17 +04:00
struct xfs_buf * bp )
{
struct xfs_da_blkinfo * info = bp - > b_addr ;
switch ( be16_to_cpu ( info - > magic ) ) {
2013-04-24 12:58:02 +04:00
case XFS_DA3_NODE_MAGIC :
2014-02-27 08:23:10 +04:00
if ( ! xfs_buf_verify_cksum ( bp , XFS_DA3_NODE_CRC_OFF ) ) {
xfs_buf_ioerror ( bp , EFSBADCRC ) ;
2013-04-24 12:58:02 +04:00
break ;
2014-02-27 08:23:10 +04:00
}
2013-04-24 12:58:02 +04:00
/* fall through */
2012-11-12 15:54:17 +04:00
case XFS_DA_NODE_MAGIC :
2014-02-27 08:23:10 +04:00
if ( ! xfs_da3_node_verify ( bp ) ) {
xfs_buf_ioerror ( bp , EFSCORRUPTED ) ;
2013-04-24 12:58:02 +04:00
break ;
2014-02-27 08:23:10 +04:00
}
2013-04-24 12:58:02 +04:00
return ;
2012-11-12 15:54:17 +04:00
case XFS_ATTR_LEAF_MAGIC :
2013-05-20 03:51:13 +04:00
case XFS_ATTR3_LEAF_MAGIC :
2013-04-24 12:58:55 +04:00
bp - > b_ops = & xfs_attr3_leaf_buf_ops ;
2012-11-14 10:54:40 +04:00
bp - > b_ops - > verify_read ( bp ) ;
2012-11-12 15:54:17 +04:00
return ;
case XFS_DIR2_LEAFN_MAGIC :
2013-04-12 01:30:21 +04:00
case XFS_DIR3_LEAFN_MAGIC :
bp - > b_ops = & xfs_dir3_leafn_buf_ops ;
2012-11-14 10:54:40 +04:00
bp - > b_ops - > verify_read ( bp ) ;
2012-11-12 15:54:17 +04:00
return ;
default :
break ;
}
2013-04-24 12:58:02 +04:00
/* corrupt block */
2014-02-27 08:23:10 +04:00
xfs_verifier_error ( bp ) ;
2012-11-12 15:54:17 +04:00
}
2013-04-24 12:58:02 +04:00
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
. verify_read = xfs_da3_node_read_verify ,
. verify_write = xfs_da3_node_write_verify ,
2012-11-14 10:54:40 +04:00
} ;
2012-11-12 15:54:17 +04:00
int
2013-04-24 12:58:02 +04:00
xfs_da3_node_read (
2012-11-12 15:54:17 +04:00
struct xfs_trans * tp ,
struct xfs_inode * dp ,
xfs_dablk_t bno ,
xfs_daddr_t mappedbno ,
struct xfs_buf * * bpp ,
int which_fork )
{
2013-04-03 09:11:29 +04:00
int err ;
err = xfs_da_read_buf ( tp , dp , bno , mappedbno , bpp ,
2013-04-24 12:58:02 +04:00
which_fork , & xfs_da3_node_buf_ops ) ;
2013-04-03 09:11:29 +04:00
if ( ! err & & tp ) {
struct xfs_da_blkinfo * info = ( * bpp ) - > b_addr ;
int type ;
switch ( be16_to_cpu ( info - > magic ) ) {
case XFS_DA_NODE_MAGIC :
2013-04-30 15:39:36 +04:00
case XFS_DA3_NODE_MAGIC :
2013-04-03 09:11:30 +04:00
type = XFS_BLFT_DA_NODE_BUF ;
2013-04-03 09:11:29 +04:00
break ;
case XFS_ATTR_LEAF_MAGIC :
case XFS_ATTR3_LEAF_MAGIC :
2013-04-03 09:11:30 +04:00
type = XFS_BLFT_ATTR_LEAF_BUF ;
2013-04-03 09:11:29 +04:00
break ;
case XFS_DIR2_LEAFN_MAGIC :
case XFS_DIR3_LEAFN_MAGIC :
2013-04-03 09:11:30 +04:00
type = XFS_BLFT_DIR_LEAFN_BUF ;
2013-04-03 09:11:29 +04:00
break ;
default :
type = 0 ;
ASSERT ( 0 ) ;
break ;
}
xfs_trans_buf_set_type ( tp , * bpp , type ) ;
}
return err ;
2012-11-12 15:54:17 +04:00
}
2005-04-17 02:20:36 +04:00
/*========================================================================
* Routines used for growing the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Create the initial contents of an intermediate node .
*/
int
2013-04-24 12:58:02 +04:00
xfs_da3_node_create (
struct xfs_da_args * args ,
xfs_dablk_t blkno ,
int level ,
struct xfs_buf * * bpp ,
int whichfork )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_trans * tp = args - > trans ;
struct xfs_mount * mp = tp - > t_mountp ;
struct xfs_da3_icnode_hdr ichdr = { 0 } ;
struct xfs_buf * bp ;
int error ;
2013-10-29 15:11:52 +04:00
struct xfs_inode * dp = args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_create ( args ) ;
2013-04-24 12:58:02 +04:00
ASSERT ( level < = XFS_DA_NODE_MAXDEPTH ) ;
2012-03-22 09:15:13 +04:00
2013-10-29 15:11:52 +04:00
error = xfs_da_get_buf ( tp , dp , blkno , - 1 , & bp , whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
2013-04-03 09:11:29 +04:00
bp - > b_ops = & xfs_da3_node_buf_ops ;
2013-04-03 09:11:30 +04:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DA_NODE_BUF ) ;
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
if ( xfs_sb_version_hascrc ( & mp - > m_sb ) ) {
struct xfs_da3_node_hdr * hdr3 = bp - > b_addr ;
ichdr . magic = XFS_DA3_NODE_MAGIC ;
hdr3 - > info . blkno = cpu_to_be64 ( bp - > b_bn ) ;
hdr3 - > info . owner = cpu_to_be64 ( args - > dp - > i_ino ) ;
uuid_copy ( & hdr3 - > info . uuid , & mp - > m_sb . sb_uuid ) ;
} else {
ichdr . magic = XFS_DA_NODE_MAGIC ;
}
ichdr . level = level ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node , & ichdr ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , bp ,
2013-10-30 02:15:02 +04:00
XFS_DA_LOGRANGE ( node , & node - > hdr , dp - > d_ops - > node_hdr_size ) ) ;
2005-04-17 02:20:36 +04:00
* bpp = bp ;
return ( 0 ) ;
}
/*
* Split a leaf node , rebalance , then possibly split
* intermediate nodes , rebalance , etc .
*/
int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_split (
struct xfs_da_state * state )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_state_blk * oldblk ;
struct xfs_da_state_blk * newblk ;
struct xfs_da_state_blk * addblk ;
struct xfs_da_intnode * node ;
struct xfs_buf * bp ;
int max ;
2013-08-12 14:49:44 +04:00
int action = 0 ;
2013-04-24 12:58:02 +04:00
int error ;
int i ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_split ( state - > args ) ;
2005-04-17 02:20:36 +04:00
/*
* Walk back up the tree splitting / inserting / adjusting as necessary .
* If we need to insert and there isn ' t room , split the node , then
* decide which fragment to insert the new block from below into .
* Note that we may split the root this way , but we need more fixup .
*/
max = state - > path . active - 1 ;
ASSERT ( ( max > = 0 ) & & ( max < XFS_DA_NODE_MAXDEPTH ) ) ;
ASSERT ( state - > path . blk [ max ] . magic = = XFS_ATTR_LEAF_MAGIC | |
2006-06-20 07:04:51 +04:00
state - > path . blk [ max ] . magic = = XFS_DIR2_LEAFN_MAGIC ) ;
2005-04-17 02:20:36 +04:00
addblk = & state - > path . blk [ max ] ; /* initial dummy value */
for ( i = max ; ( i > = 0 ) & & addblk ; state - > path . active - - , i - - ) {
oldblk = & state - > path . blk [ i ] ;
newblk = & state - > altpath . blk [ i ] ;
/*
* If a leaf node then
* Allocate a new leaf node , then rebalance across them .
* else if an intermediate node then
* We split on the last layer , must we split the node ?
*/
switch ( oldblk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 12:58:55 +04:00
error = xfs_attr3_leaf_split ( state , oldblk , newblk ) ;
2005-04-17 02:20:36 +04:00
if ( ( error ! = 0 ) & & ( error ! = ENOSPC ) ) {
return ( error ) ; /* GROT: attr is inconsistent */
}
if ( ! error ) {
addblk = newblk ;
break ;
}
/*
* Entry wouldn ' t fit , split the leaf again .
*/
state - > extravalid = 1 ;
if ( state - > inleaf ) {
state - > extraafter = 0 ; /* before newblk */
2012-03-22 09:15:13 +04:00
trace_xfs_attr_leaf_split_before ( state - > args ) ;
2013-04-24 12:58:55 +04:00
error = xfs_attr3_leaf_split ( state , oldblk ,
2005-04-17 02:20:36 +04:00
& state - > extrablk ) ;
} else {
state - > extraafter = 1 ; /* after newblk */
2012-03-22 09:15:13 +04:00
trace_xfs_attr_leaf_split_after ( state - > args ) ;
2013-04-24 12:58:55 +04:00
error = xfs_attr3_leaf_split ( state , newblk ,
2005-04-17 02:20:36 +04:00
& state - > extrablk ) ;
}
if ( error )
return ( error ) ; /* GROT: attr inconsistent */
addblk = newblk ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
error = xfs_dir2_leafn_split ( state , oldblk , newblk ) ;
if ( error )
return error ;
addblk = newblk ;
break ;
case XFS_DA_NODE_MAGIC :
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_split ( state , oldblk , newblk , addblk ,
2005-04-17 02:20:36 +04:00
max - i , & action ) ;
addblk - > bp = NULL ;
if ( error )
return ( error ) ; /* GROT: dir is inconsistent */
/*
* Record the newly split block for the next time thru ?
*/
if ( action )
addblk = newblk ;
else
addblk = NULL ;
break ;
}
/*
* Update the btree to show the new hashval for this child .
*/
2013-04-24 12:58:02 +04:00
xfs_da3_fixhashpath ( state , & state - > path ) ;
2005-04-17 02:20:36 +04:00
}
if ( ! addblk )
return ( 0 ) ;
/*
* Split the root node .
*/
ASSERT ( state - > path . active = = 0 ) ;
oldblk = & state - > path . blk [ 0 ] ;
2013-04-24 12:58:02 +04:00
error = xfs_da3_root_split ( state , oldblk , addblk ) ;
2005-04-17 02:20:36 +04:00
if ( error ) {
addblk - > bp = NULL ;
return ( error ) ; /* GROT: dir is inconsistent */
}
/*
* Update pointers to the node which used to be block 0 and
* just got bumped because of the addition of a new root node .
* There might be three blocks involved if a double split occurred ,
* and the original block 0 could be at any position in the list .
2013-04-24 12:58:02 +04:00
*
* Note : the magic numbers and sibling pointers are in the same
* physical place for both v2 and v3 headers ( by design ) . Hence it
* doesn ' t matter which version of the xfs_da_intnode structure we use
* here as the result will be the same using either structure .
2005-04-17 02:20:36 +04:00
*/
2012-06-22 12:50:14 +04:00
node = oldblk - > bp - > b_addr ;
2005-04-17 02:20:36 +04:00
if ( node - > hdr . info . forw ) {
2006-03-17 09:28:40 +03:00
if ( be32_to_cpu ( node - > hdr . info . forw ) = = addblk - > blkno ) {
2005-04-17 02:20:36 +04:00
bp = addblk - > bp ;
} else {
ASSERT ( state - > extravalid ) ;
bp = state - > extrablk . bp ;
}
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
2006-03-17 09:28:40 +03:00
node - > hdr . info . back = cpu_to_be32 ( oldblk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , bp ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( node , & node - > hdr . info ,
sizeof ( node - > hdr . info ) ) ) ;
}
2012-06-22 12:50:14 +04:00
node = oldblk - > bp - > b_addr ;
2006-03-17 09:28:40 +03:00
if ( node - > hdr . info . back ) {
if ( be32_to_cpu ( node - > hdr . info . back ) = = addblk - > blkno ) {
2005-04-17 02:20:36 +04:00
bp = addblk - > bp ;
} else {
ASSERT ( state - > extravalid ) ;
bp = state - > extrablk . bp ;
}
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
2006-03-17 09:28:40 +03:00
node - > hdr . info . forw = cpu_to_be32 ( oldblk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , bp ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( node , & node - > hdr . info ,
sizeof ( node - > hdr . info ) ) ) ;
}
addblk - > bp = NULL ;
return ( 0 ) ;
}
/*
* Split the root . We have to create a new root and point to the two
* parts ( the split old root ) that we just created . Copy block zero to
* the EOF , extending the inode in process .
*/
STATIC int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_root_split (
struct xfs_da_state * state ,
struct xfs_da_state_blk * blk1 ,
struct xfs_da_state_blk * blk2 )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_da_intnode * oldroot ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
struct xfs_inode * dp ;
struct xfs_trans * tp ;
struct xfs_mount * mp ;
struct xfs_dir2_leaf * leaf ;
xfs_dablk_t blkno ;
int level ;
int error ;
int size ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_root_split ( state - > args ) ;
2005-04-17 02:20:36 +04:00
/*
* Copy the existing ( incorrect ) block from the root node position
* to a free space somewhere .
*/
args = state - > args ;
error = xfs_da_grow_inode ( args , & blkno ) ;
if ( error )
2013-04-24 12:58:02 +04:00
return error ;
2005-04-17 02:20:36 +04:00
dp = args - > dp ;
tp = args - > trans ;
mp = state - > mp ;
error = xfs_da_get_buf ( tp , dp , blkno , - 1 , & bp , args - > whichfork ) ;
if ( error )
2013-04-24 12:58:02 +04:00
return error ;
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
oldroot = blk1 - > bp - > b_addr ;
2013-04-24 12:58:02 +04:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) ) {
struct xfs_da3_icnode_hdr nodehdr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , oldroot ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( oldroot ) ;
2013-04-24 12:58:02 +04:00
size = ( int ) ( ( char * ) & btree [ nodehdr . count ] - ( char * ) oldroot ) ;
level = nodehdr . level ;
2013-04-03 09:11:29 +04:00
/*
* we are about to copy oldroot to bp , so set up the type
* of bp while we know exactly what it will be .
*/
2013-04-03 09:11:30 +04:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DA_NODE_BUF ) ;
2005-04-17 02:20:36 +04:00
} else {
2013-04-12 01:30:21 +04:00
struct xfs_dir3_icleaf_hdr leafhdr ;
struct xfs_dir2_leaf_entry * ents ;
2005-04-17 02:20:36 +04:00
leaf = ( xfs_dir2_leaf_t * ) oldroot ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > leaf_hdr_from_disk ( & leafhdr , leaf ) ;
2013-10-29 15:11:50 +04:00
ents = dp - > d_ops - > leaf_ents_p ( leaf ) ;
2013-04-12 01:30:21 +04:00
ASSERT ( leafhdr . magic = = XFS_DIR2_LEAFN_MAGIC | |
leafhdr . magic = = XFS_DIR3_LEAFN_MAGIC ) ;
size = ( int ) ( ( char * ) & ents [ leafhdr . count ] - ( char * ) leaf ) ;
2013-04-24 12:58:02 +04:00
level = 0 ;
2013-04-03 09:11:29 +04:00
/*
* we are about to copy oldroot to bp , so set up the type
* of bp while we know exactly what it will be .
*/
2013-04-03 09:11:30 +04:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DIR_LEAFN_BUF ) ;
2005-04-17 02:20:36 +04:00
}
2013-04-24 12:58:02 +04:00
/*
* we can copy most of the information in the node from one block to
* another , but for CRC enabled headers we have to make sure that the
* block specific identifiers are kept intact . We update the buffer
* directly for this .
*/
2005-04-17 02:20:36 +04:00
memcpy ( node , oldroot , size ) ;
2013-04-24 12:58:02 +04:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
struct xfs_da3_intnode * node3 = ( struct xfs_da3_intnode * ) node ;
node3 - > hdr . info . blkno = cpu_to_be64 ( bp - > b_bn ) ;
}
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , bp , 0 , size - 1 ) ;
2012-11-14 10:53:49 +04:00
2012-11-14 10:54:40 +04:00
bp - > b_ops = blk1 - > bp - > b_ops ;
2013-09-02 04:32:01 +04:00
xfs_trans_buf_copy_type ( bp , blk1 - > bp ) ;
2005-04-17 02:20:36 +04:00
blk1 - > bp = bp ;
blk1 - > blkno = blkno ;
/*
* Set up the new root node .
*/
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_create ( args ,
2006-06-20 07:04:51 +04:00
( args - > whichfork = = XFS_DATA_FORK ) ? mp - > m_dirleafblk : 0 ,
2013-04-24 12:58:02 +04:00
level + 1 , & bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
2013-04-24 12:58:02 +04:00
return error ;
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
btree [ 0 ] . hashval = cpu_to_be32 ( blk1 - > hashval ) ;
btree [ 0 ] . before = cpu_to_be32 ( blk1 - > blkno ) ;
btree [ 1 ] . hashval = cpu_to_be32 ( blk2 - > hashval ) ;
btree [ 1 ] . before = cpu_to_be32 ( blk2 - > blkno ) ;
nodehdr . count = 2 ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node , & nodehdr ) ;
2005-04-17 02:20:36 +04:00
# ifdef DEBUG
2013-04-12 01:30:21 +04:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
2005-04-17 02:20:36 +04:00
ASSERT ( blk1 - > blkno > = mp - > m_dirleafblk & &
blk1 - > blkno < mp - > m_dirfreeblk ) ;
ASSERT ( blk2 - > blkno > = mp - > m_dirleafblk & &
blk2 - > blkno < mp - > m_dirfreeblk ) ;
}
# endif
/* Header is already logged by xfs_da_node_create */
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( node , btree , sizeof ( xfs_da_node_entry_t ) * 2 ) ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Split the node , rebalance , then add the new entry .
*/
STATIC int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_node_split (
struct xfs_da_state * state ,
struct xfs_da_state_blk * oldblk ,
struct xfs_da_state_blk * newblk ,
struct xfs_da_state_blk * addblk ,
int treelevel ,
int * result )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
xfs_dablk_t blkno ;
int newcount ;
int error ;
int useextra ;
2013-10-29 15:11:52 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_split ( state - > args ) ;
2012-06-22 12:50:14 +04:00
node = oldblk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2005-04-17 02:20:36 +04:00
/*
2006-06-20 07:04:51 +04:00
* With V2 dirs the extra block is data or freespace .
2005-04-17 02:20:36 +04:00
*/
2006-06-20 07:04:51 +04:00
useextra = state - > extravalid & & state - > args - > whichfork = = XFS_ATTR_FORK ;
2005-04-17 02:20:36 +04:00
newcount = 1 + useextra ;
/*
* Do we have to split the node ?
*/
2013-04-24 12:58:02 +04:00
if ( nodehdr . count + newcount > state - > node_ents ) {
2005-04-17 02:20:36 +04:00
/*
* Allocate a new node , add to the doubly linked chain of
* nodes , then move some of our excess entries into it .
*/
error = xfs_da_grow_inode ( state - > args , & blkno ) ;
if ( error )
return ( error ) ; /* GROT: dir is inconsistent */
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_create ( state - > args , blkno , treelevel ,
2005-04-17 02:20:36 +04:00
& newblk - > bp , state - > args - > whichfork ) ;
if ( error )
return ( error ) ; /* GROT: dir is inconsistent */
newblk - > blkno = blkno ;
newblk - > magic = XFS_DA_NODE_MAGIC ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_rebalance ( state , oldblk , newblk ) ;
error = xfs_da3_blk_link ( state , oldblk , newblk ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
* result = 1 ;
} else {
* result = 0 ;
}
/*
* Insert the new entry ( s ) into the correct block
* ( updating last hashval in the process ) .
*
2013-04-24 12:58:02 +04:00
* xfs_da3_node_add ( ) inserts BEFORE the given index ,
2005-04-17 02:20:36 +04:00
* and as a result of using node_lookup_int ( ) we always
* point to a valid entry ( not after one ) , but a split
* operation always results in a new block whose hashvals
* FOLLOW the current block .
*
* If we had double - split op below us , then add the extra block too .
*/
2012-06-22 12:50:14 +04:00
node = oldblk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-04-24 12:58:02 +04:00
if ( oldblk - > index < = nodehdr . count ) {
2005-04-17 02:20:36 +04:00
oldblk - > index + + ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_add ( state , oldblk , addblk ) ;
2005-04-17 02:20:36 +04:00
if ( useextra ) {
if ( state - > extraafter )
oldblk - > index + + ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_add ( state , oldblk , & state - > extrablk ) ;
2005-04-17 02:20:36 +04:00
state - > extravalid = 0 ;
}
} else {
newblk - > index + + ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_add ( state , newblk , addblk ) ;
2005-04-17 02:20:36 +04:00
if ( useextra ) {
if ( state - > extraafter )
newblk - > index + + ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_add ( state , newblk , & state - > extrablk ) ;
2005-04-17 02:20:36 +04:00
state - > extravalid = 0 ;
}
}
return ( 0 ) ;
}
/*
* Balance the btree elements between two intermediate nodes ,
* usually one full and one empty .
*
* NOTE : if blk2 is empty , then it will get the upper half of blk1 .
*/
STATIC void
2013-04-24 12:58:02 +04:00
xfs_da3_node_rebalance (
struct xfs_da_state * state ,
struct xfs_da_state_blk * blk1 ,
struct xfs_da_state_blk * blk2 )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node1 ;
struct xfs_da_intnode * node2 ;
struct xfs_da_intnode * tmpnode ;
struct xfs_da_node_entry * btree1 ;
struct xfs_da_node_entry * btree2 ;
struct xfs_da_node_entry * btree_s ;
struct xfs_da_node_entry * btree_d ;
struct xfs_da3_icnode_hdr nodehdr1 ;
struct xfs_da3_icnode_hdr nodehdr2 ;
struct xfs_trans * tp ;
int count ;
int tmp ;
int swap = 0 ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_rebalance ( state - > args ) ;
2012-06-22 12:50:14 +04:00
node1 = blk1 - > bp - > b_addr ;
node2 = blk2 - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr1 , node1 ) ;
dp - > d_ops - > node_hdr_from_disk ( & nodehdr2 , node2 ) ;
2013-10-29 15:11:51 +04:00
btree1 = dp - > d_ops - > node_tree_p ( node1 ) ;
btree2 = dp - > d_ops - > node_tree_p ( node2 ) ;
2013-04-24 12:58:02 +04:00
2005-04-17 02:20:36 +04:00
/*
* Figure out how many entries need to move , and in which direction .
* Swap the nodes around if that makes it simpler .
*/
2013-04-24 12:58:02 +04:00
if ( nodehdr1 . count > 0 & & nodehdr2 . count > 0 & &
( ( be32_to_cpu ( btree2 [ 0 ] . hashval ) < be32_to_cpu ( btree1 [ 0 ] . hashval ) ) | |
( be32_to_cpu ( btree2 [ nodehdr2 . count - 1 ] . hashval ) <
be32_to_cpu ( btree1 [ nodehdr1 . count - 1 ] . hashval ) ) ) ) {
2005-04-17 02:20:36 +04:00
tmpnode = node1 ;
node1 = node2 ;
node2 = tmpnode ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr1 , node1 ) ;
dp - > d_ops - > node_hdr_from_disk ( & nodehdr2 , node2 ) ;
2013-10-29 15:11:51 +04:00
btree1 = dp - > d_ops - > node_tree_p ( node1 ) ;
btree2 = dp - > d_ops - > node_tree_p ( node2 ) ;
2013-04-24 12:58:02 +04:00
swap = 1 ;
2005-04-17 02:20:36 +04:00
}
2013-04-24 12:58:02 +04:00
count = ( nodehdr1 . count - nodehdr2 . count ) / 2 ;
2005-04-17 02:20:36 +04:00
if ( count = = 0 )
return ;
tp = state - > args - > trans ;
/*
* Two cases : high - to - low and low - to - high .
*/
if ( count > 0 ) {
/*
* Move elements in node2 up to make a hole .
*/
2013-04-24 12:58:02 +04:00
tmp = nodehdr2 . count ;
if ( tmp > 0 ) {
2005-04-17 02:20:36 +04:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 12:58:02 +04:00
btree_s = & btree2 [ 0 ] ;
btree_d = & btree2 [ count ] ;
2005-04-17 02:20:36 +04:00
memmove ( btree_d , btree_s , tmp ) ;
}
/*
* Move the req ' d B - tree elements from high in node1 to
* low in node2 .
*/
2013-04-24 12:58:02 +04:00
nodehdr2 . count + = count ;
2005-04-17 02:20:36 +04:00
tmp = count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 12:58:02 +04:00
btree_s = & btree1 [ nodehdr1 . count - count ] ;
btree_d = & btree2 [ 0 ] ;
2005-04-17 02:20:36 +04:00
memcpy ( btree_d , btree_s , tmp ) ;
2013-04-24 12:58:02 +04:00
nodehdr1 . count - = count ;
2005-04-17 02:20:36 +04:00
} else {
/*
* Move the req ' d B - tree elements from low in node2 to
* high in node1 .
*/
count = - count ;
tmp = count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 12:58:02 +04:00
btree_s = & btree2 [ 0 ] ;
btree_d = & btree1 [ nodehdr1 . count ] ;
2005-04-17 02:20:36 +04:00
memcpy ( btree_d , btree_s , tmp ) ;
2013-04-24 12:58:02 +04:00
nodehdr1 . count + = count ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , blk1 - > bp ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( node1 , btree_d , tmp ) ) ;
/*
* Move elements in node2 down to fill the hole .
*/
2013-04-24 12:58:02 +04:00
tmp = nodehdr2 . count - count ;
2005-04-17 02:20:36 +04:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 12:58:02 +04:00
btree_s = & btree2 [ count ] ;
btree_d = & btree2 [ 0 ] ;
2005-04-17 02:20:36 +04:00
memmove ( btree_d , btree_s , tmp ) ;
2013-04-24 12:58:02 +04:00
nodehdr2 . count - = count ;
2005-04-17 02:20:36 +04:00
}
/*
* Log header of node 1 and all current bits of node 2.
*/
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node1 , & nodehdr1 ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , blk1 - > bp ,
2013-10-30 02:15:02 +04:00
XFS_DA_LOGRANGE ( node1 , & node1 - > hdr , dp - > d_ops - > node_hdr_size ) ) ;
2013-04-24 12:58:02 +04:00
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node2 , & nodehdr2 ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , blk2 - > bp ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( node2 , & node2 - > hdr ,
2013-10-30 02:15:02 +04:00
dp - > d_ops - > node_hdr_size +
2013-04-24 12:58:02 +04:00
( sizeof ( btree2 [ 0 ] ) * nodehdr2 . count ) ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Record the last hashval from each block for upward propagation .
* ( note : don ' t use the swapped node pointers )
*/
2013-04-24 12:58:02 +04:00
if ( swap ) {
node1 = blk1 - > bp - > b_addr ;
node2 = blk2 - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr1 , node1 ) ;
dp - > d_ops - > node_hdr_from_disk ( & nodehdr2 , node2 ) ;
2013-10-29 15:11:51 +04:00
btree1 = dp - > d_ops - > node_tree_p ( node1 ) ;
btree2 = dp - > d_ops - > node_tree_p ( node2 ) ;
2013-04-24 12:58:02 +04:00
}
blk1 - > hashval = be32_to_cpu ( btree1 [ nodehdr1 . count - 1 ] . hashval ) ;
blk2 - > hashval = be32_to_cpu ( btree2 [ nodehdr2 . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
/*
* Adjust the expected index for insertion .
*/
2013-04-24 12:58:02 +04:00
if ( blk1 - > index > = nodehdr1 . count ) {
blk2 - > index = blk1 - > index - nodehdr1 . count ;
blk1 - > index = nodehdr1 . count + 1 ; /* make it invalid */
2005-04-17 02:20:36 +04:00
}
}
/*
* Add a new entry to an intermediate node .
*/
STATIC void
2013-04-24 12:58:02 +04:00
xfs_da3_node_add (
struct xfs_da_state * state ,
struct xfs_da_state_blk * oldblk ,
struct xfs_da_state_blk * newblk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_node_entry * btree ;
int tmp ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_add ( state - > args ) ;
2012-06-22 12:50:14 +04:00
node = oldblk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
ASSERT ( oldblk - > index > = 0 & & oldblk - > index < = nodehdr . count ) ;
2005-04-17 02:20:36 +04:00
ASSERT ( newblk - > blkno ! = 0 ) ;
2006-06-20 07:04:51 +04:00
if ( state - > args - > whichfork = = XFS_DATA_FORK )
2010-07-20 11:54:45 +04:00
ASSERT ( newblk - > blkno > = state - > mp - > m_dirleafblk & &
newblk - > blkno < state - > mp - > m_dirfreeblk ) ;
2005-04-17 02:20:36 +04:00
/*
* We may need to make some room before we insert the new node .
*/
tmp = 0 ;
2013-04-24 12:58:02 +04:00
if ( oldblk - > index < nodehdr . count ) {
tmp = ( nodehdr . count - oldblk - > index ) * ( uint ) sizeof ( * btree ) ;
memmove ( & btree [ oldblk - > index + 1 ] , & btree [ oldblk - > index ] , tmp ) ;
2005-04-17 02:20:36 +04:00
}
2013-04-24 12:58:02 +04:00
btree [ oldblk - > index ] . hashval = cpu_to_be32 ( newblk - > hashval ) ;
btree [ oldblk - > index ] . before = cpu_to_be32 ( newblk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , oldblk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( node , & btree [ oldblk - > index ] ,
tmp + sizeof ( * btree ) ) ) ;
nodehdr . count + = 1 ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node , & nodehdr ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , oldblk - > bp ,
2013-10-30 02:15:02 +04:00
XFS_DA_LOGRANGE ( node , & node - > hdr , dp - > d_ops - > node_hdr_size ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Copy the last hash value from the oldblk to propagate upwards .
*/
2013-04-24 12:58:02 +04:00
oldblk - > hashval = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
/*========================================================================
* Routines used for shrinking the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Deallocate an empty leaf node , remove it from its parent ,
* possibly deallocating that block , etc . . .
*/
int
2013-04-24 12:58:02 +04:00
xfs_da3_join (
struct xfs_da_state * state )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_state_blk * drop_blk ;
struct xfs_da_state_blk * save_blk ;
int action = 0 ;
int error ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_join ( state - > args ) ;
2005-04-17 02:20:36 +04:00
drop_blk = & state - > path . blk [ state - > path . active - 1 ] ;
save_blk = & state - > altpath . blk [ state - > path . active - 1 ] ;
ASSERT ( state - > path . blk [ 0 ] . magic = = XFS_DA_NODE_MAGIC ) ;
ASSERT ( drop_blk - > magic = = XFS_ATTR_LEAF_MAGIC | |
2006-06-20 07:04:51 +04:00
drop_blk - > magic = = XFS_DIR2_LEAFN_MAGIC ) ;
2005-04-17 02:20:36 +04:00
/*
* Walk back up the tree joining / deallocating as necessary .
* When we stop dropping blocks , break out .
*/
for ( ; state - > path . active > = 2 ; drop_blk - - , save_blk - - ,
state - > path . active - - ) {
/*
* See if we can combine the block with a neighbor .
* ( action = = 0 ) = > no options , just leave
* ( action = = 1 ) = > coalesce , then unlink
* ( action = = 2 ) = > block empty , unlink it
*/
switch ( drop_blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 12:58:55 +04:00
error = xfs_attr3_leaf_toosmall ( state , & action ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
if ( action = = 0 )
return ( 0 ) ;
2013-04-24 12:58:55 +04:00
xfs_attr3_leaf_unbalance ( state , drop_blk , save_blk ) ;
2005-04-17 02:20:36 +04:00
break ;
case XFS_DIR2_LEAFN_MAGIC :
error = xfs_dir2_leafn_toosmall ( state , & action ) ;
if ( error )
return error ;
if ( action = = 0 )
return 0 ;
xfs_dir2_leafn_unbalance ( state , drop_blk , save_blk ) ;
break ;
case XFS_DA_NODE_MAGIC :
/*
* Remove the offending node , fixup hashvals ,
* check for a toosmall neighbor .
*/
2013-04-24 12:58:02 +04:00
xfs_da3_node_remove ( state , drop_blk ) ;
xfs_da3_fixhashpath ( state , & state - > path ) ;
error = xfs_da3_node_toosmall ( state , & action ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
if ( action = = 0 )
return 0 ;
2013-04-24 12:58:02 +04:00
xfs_da3_node_unbalance ( state , drop_blk , save_blk ) ;
2005-04-17 02:20:36 +04:00
break ;
}
2013-04-24 12:58:02 +04:00
xfs_da3_fixhashpath ( state , & state - > altpath ) ;
error = xfs_da3_blk_unlink ( state , drop_blk , save_blk ) ;
2005-04-17 02:20:36 +04:00
xfs_da_state_kill_altpath ( state ) ;
if ( error )
return ( error ) ;
error = xfs_da_shrink_inode ( state - > args , drop_blk - > blkno ,
drop_blk - > bp ) ;
drop_blk - > bp = NULL ;
if ( error )
return ( error ) ;
}
/*
* We joined all the way to the top . If it turns out that
* we only have one entry in the root , make the child block
* the new root .
*/
2013-04-24 12:58:02 +04:00
xfs_da3_node_remove ( state , drop_blk ) ;
xfs_da3_fixhashpath ( state , & state - > path ) ;
error = xfs_da3_root_join ( state , & state - > path . blk [ 0 ] ) ;
2005-04-17 02:20:36 +04:00
return ( error ) ;
}
2011-07-18 22:14:09 +04:00
# ifdef DEBUG
static void
xfs_da_blkinfo_onlychild_validate ( struct xfs_da_blkinfo * blkinfo , __u16 level )
{
__be16 magic = blkinfo - > magic ;
if ( level = = 1 ) {
ASSERT ( magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
2013-04-12 01:30:21 +04:00
magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) | |
2013-04-24 12:58:55 +04:00
magic = = cpu_to_be16 ( XFS_ATTR_LEAF_MAGIC ) | |
magic = = cpu_to_be16 ( XFS_ATTR3_LEAF_MAGIC ) ) ;
2013-04-24 12:58:02 +04:00
} else {
ASSERT ( magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) ) ;
}
2011-07-18 22:14:09 +04:00
ASSERT ( ! blkinfo - > forw ) ;
ASSERT ( ! blkinfo - > back ) ;
}
# else /* !DEBUG */
# define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
# endif /* !DEBUG */
2005-04-17 02:20:36 +04:00
/*
* We have only one entry in the root . Copy the only remaining child of
* the old root to block 0 as the new root node .
*/
STATIC int
2013-04-24 12:58:02 +04:00
xfs_da3_root_join (
struct xfs_da_state * state ,
struct xfs_da_state_blk * root_blk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * oldroot ;
struct xfs_da_args * args ;
xfs_dablk_t child ;
struct xfs_buf * bp ;
struct xfs_da3_icnode_hdr oldroothdr ;
struct xfs_da_node_entry * btree ;
int error ;
2013-10-29 15:11:52 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_root_join ( state - > args ) ;
2005-04-17 02:20:36 +04:00
ASSERT ( root_blk - > magic = = XFS_DA_NODE_MAGIC ) ;
2013-04-24 12:58:02 +04:00
args = state - > args ;
2012-06-22 12:50:14 +04:00
oldroot = root_blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & oldroothdr , oldroot ) ;
2013-04-24 12:58:02 +04:00
ASSERT ( oldroothdr . forw = = 0 ) ;
ASSERT ( oldroothdr . back = = 0 ) ;
2005-04-17 02:20:36 +04:00
/*
* If the root has more than one child , then don ' t do anything .
*/
2013-04-24 12:58:02 +04:00
if ( oldroothdr . count > 1 )
return 0 ;
2005-04-17 02:20:36 +04:00
/*
* Read in the ( only ) child block , then copy those bytes into
* the root block ' s buffer and free the original child block .
*/
2013-10-29 15:11:52 +04:00
btree = dp - > d_ops - > node_tree_p ( oldroot ) ;
2013-04-24 12:58:02 +04:00
child = be32_to_cpu ( btree [ 0 ] . before ) ;
2005-04-17 02:20:36 +04:00
ASSERT ( child ! = 0 ) ;
2013-10-29 15:11:52 +04:00
error = xfs_da3_node_read ( args - > trans , dp , child , - 1 , & bp ,
2012-11-12 15:54:17 +04:00
args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
2013-04-24 12:58:02 +04:00
return error ;
xfs_da_blkinfo_onlychild_validate ( bp - > b_addr , oldroothdr . level ) ;
2011-07-18 22:14:09 +04:00
2012-11-14 10:52:32 +04:00
/*
* This could be copying a leaf back into the root block in the case of
* there only being a single leaf block left in the tree . Hence we have
2012-11-14 10:54:40 +04:00
* to update the b_ops pointer as well to match the buffer type change
2013-04-24 12:58:02 +04:00
* that could occur . For dir3 blocks we also need to update the block
* number in the buffer header .
2012-11-14 10:52:32 +04:00
*/
2012-06-22 12:50:14 +04:00
memcpy ( root_blk - > bp - > b_addr , bp - > b_addr , state - > blocksize ) ;
2012-11-14 10:54:40 +04:00
root_blk - > bp - > b_ops = bp - > b_ops ;
2013-04-03 09:11:29 +04:00
xfs_trans_buf_copy_type ( root_blk - > bp , bp ) ;
2013-04-24 12:58:02 +04:00
if ( oldroothdr . magic = = XFS_DA3_NODE_MAGIC ) {
struct xfs_da3_blkinfo * da3 = root_blk - > bp - > b_addr ;
da3 - > blkno = cpu_to_be64 ( root_blk - > bp - > b_bn ) ;
}
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , root_blk - > bp , 0 , state - > blocksize - 1 ) ;
2005-04-17 02:20:36 +04:00
error = xfs_da_shrink_inode ( args , child , bp ) ;
return ( error ) ;
}
/*
* Check a node block and its neighbors to see if the block should be
* collapsed into one or the other neighbor . Always keep the block
* with the smaller block number .
* If the current block is over 50 % full , don ' t try to join it , return 0.
* If the block is empty , fill in the state structure and return 2.
* If it can be collapsed , fill in the state structure and return 1.
* If nothing can be done , return 0.
*/
STATIC int
2013-04-24 12:58:02 +04:00
xfs_da3_node_toosmall (
struct xfs_da_state * state ,
int * action )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * info ;
xfs_dablk_t blkno ;
struct xfs_buf * bp ;
struct xfs_da3_icnode_hdr nodehdr ;
int count ;
int forward ;
int error ;
int retval ;
int i ;
2013-10-29 15:11:52 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-11-12 15:53:53 +04:00
trace_xfs_da_node_toosmall ( state - > args ) ;
2005-04-17 02:20:36 +04:00
/*
* Check for the degenerate case of the block being over 50 % full .
* If so , it ' s not worth even looking to see if we might be able
* to coalesce with a sibling .
*/
blk = & state - > path . blk [ state - > path . active - 1 ] ;
2012-06-22 12:50:14 +04:00
info = blk - > bp - > b_addr ;
2005-04-17 02:20:36 +04:00
node = ( xfs_da_intnode_t * ) info ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-04-24 12:58:02 +04:00
if ( nodehdr . count > ( state - > node_ents > > 1 ) ) {
2005-04-17 02:20:36 +04:00
* action = 0 ; /* blk over 50%, don't try to join */
return ( 0 ) ; /* blk over 50%, don't try to join */
}
/*
* Check for the degenerate case of the block being empty .
* If the block is empty , we ' ll simply delete it , no need to
2006-03-29 02:55:14 +04:00
* coalesce it with a sibling block . We choose ( arbitrarily )
2005-04-17 02:20:36 +04:00
* to merge with the forward block unless it is NULL .
*/
2013-04-24 12:58:02 +04:00
if ( nodehdr . count = = 0 ) {
2005-04-17 02:20:36 +04:00
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop ( this one ) .
*/
2006-03-17 09:28:40 +03:00
forward = ( info - > forw ! = 0 ) ;
2005-04-17 02:20:36 +04:00
memcpy ( & state - > altpath , & state - > path , sizeof ( state - > path ) ) ;
2013-04-24 12:58:02 +04:00
error = xfs_da3_path_shift ( state , & state - > altpath , forward ,
2005-04-17 02:20:36 +04:00
0 , & retval ) ;
if ( error )
return ( error ) ;
if ( retval ) {
* action = 0 ;
} else {
* action = 2 ;
}
return ( 0 ) ;
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25 % free space to spare . We need to figure out
* whether to merge with the forward or the backward block .
* We prefer coalescing with the lower numbered sibling so as
* to shrink a directory over time .
*/
2013-04-24 12:58:02 +04:00
count = state - > node_ents ;
count - = state - > node_ents > > 2 ;
count - = nodehdr . count ;
2005-04-17 02:20:36 +04:00
/* start with smaller blk num */
2013-04-24 12:58:02 +04:00
forward = nodehdr . forw < nodehdr . back ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < 2 ; forward = ! forward , i + + ) {
2013-09-23 21:18:58 +04:00
struct xfs_da3_icnode_hdr thdr ;
2005-04-17 02:20:36 +04:00
if ( forward )
2013-04-24 12:58:02 +04:00
blkno = nodehdr . forw ;
2005-04-17 02:20:36 +04:00
else
2013-04-24 12:58:02 +04:00
blkno = nodehdr . back ;
2005-04-17 02:20:36 +04:00
if ( blkno = = 0 )
continue ;
2013-10-29 15:11:52 +04:00
error = xfs_da3_node_read ( state - > args - > trans , dp ,
2012-11-12 15:54:17 +04:00
blkno , - 1 , & bp , state - > args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
2012-06-22 12:50:14 +04:00
node = bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & thdr , node ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( state - > args - > trans , bp ) ;
2013-04-24 12:58:02 +04:00
2013-09-23 21:18:58 +04:00
if ( count - thdr . count > = 0 )
2005-04-17 02:20:36 +04:00
break ; /* fits with at least 25% to spare */
}
if ( i > = 2 ) {
* action = 0 ;
2013-04-24 12:58:02 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Make altpath point to the block we want to keep ( the lower
* numbered block ) and path point to the block we want to drop .
*/
memcpy ( & state - > altpath , & state - > path , sizeof ( state - > path ) ) ;
if ( blkno < blk - > blkno ) {
2013-04-24 12:58:02 +04:00
error = xfs_da3_path_shift ( state , & state - > altpath , forward ,
2005-04-17 02:20:36 +04:00
0 , & retval ) ;
} else {
2013-04-24 12:58:02 +04:00
error = xfs_da3_path_shift ( state , & state - > path , forward ,
2005-04-17 02:20:36 +04:00
0 , & retval ) ;
2013-04-24 12:58:02 +04:00
}
if ( error )
return error ;
if ( retval ) {
* action = 0 ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
* action = 1 ;
2013-04-24 12:58:02 +04:00
return 0 ;
}
/*
* Pick up the last hashvalue from an intermediate node .
*/
STATIC uint
xfs_da3_node_lasthash (
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp ,
2013-04-24 12:58:02 +04:00
struct xfs_buf * bp ,
int * count )
{
struct xfs_da_intnode * node ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
node = bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-04-24 12:58:02 +04:00
if ( count )
* count = nodehdr . count ;
if ( ! nodehdr . count )
return 0 ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
return be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Walk back up the tree adjusting hash values as necessary ,
* when we stop making changes , return .
*/
void
2013-04-24 12:58:02 +04:00
xfs_da3_fixhashpath (
struct xfs_da_state * state ,
struct xfs_da_state_path * path )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_state_blk * blk ;
struct xfs_da_intnode * node ;
struct xfs_da_node_entry * btree ;
xfs_dahash_t lasthash = 0 ;
int level ;
int count ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-11-12 15:53:53 +04:00
trace_xfs_da_fixhashpath ( state - > args ) ;
2005-04-17 02:20:36 +04:00
level = path - > active - 1 ;
blk = & path - > blk [ level ] ;
switch ( blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
lasthash = xfs_attr_leaf_lasthash ( blk - > bp , & count ) ;
if ( count = = 0 )
return ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
2013-10-29 15:11:51 +04:00
lasthash = xfs_dir2_leafn_lasthash ( dp , blk - > bp , & count ) ;
2005-04-17 02:20:36 +04:00
if ( count = = 0 )
return ;
break ;
case XFS_DA_NODE_MAGIC :
2013-10-29 15:11:51 +04:00
lasthash = xfs_da3_node_lasthash ( dp , blk - > bp , & count ) ;
2005-04-17 02:20:36 +04:00
if ( count = = 0 )
return ;
break ;
}
for ( blk - - , level - - ; level > = 0 ; blk - - , level - - ) {
2013-04-24 12:58:02 +04:00
struct xfs_da3_icnode_hdr nodehdr ;
2012-06-22 12:50:14 +04:00
node = blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2014-04-04 00:10:49 +04:00
if ( be32_to_cpu ( btree [ blk - > index ] . hashval ) = = lasthash )
2005-04-17 02:20:36 +04:00
break ;
blk - > hashval = lasthash ;
2013-04-24 12:58:02 +04:00
btree [ blk - > index ] . hashval = cpu_to_be32 ( lasthash ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , blk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( node , & btree [ blk - > index ] ,
sizeof ( * btree ) ) ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
lasthash = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Remove an entry from an intermediate node .
*/
STATIC void
2013-04-24 12:58:02 +04:00
xfs_da3_node_remove (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_node_entry * btree ;
int index ;
int tmp ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_remove ( state - > args ) ;
2012-06-22 12:50:14 +04:00
node = drop_blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-04-24 12:58:02 +04:00
ASSERT ( drop_blk - > index < nodehdr . count ) ;
2005-04-17 02:20:36 +04:00
ASSERT ( drop_blk - > index > = 0 ) ;
/*
* Copy over the offending entry , or just zero it out .
*/
2013-04-24 12:58:02 +04:00
index = drop_blk - > index ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
if ( index < nodehdr . count - 1 ) {
tmp = nodehdr . count - index - 1 ;
2005-04-17 02:20:36 +04:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 12:58:02 +04:00
memmove ( & btree [ index ] , & btree [ index + 1 ] , tmp ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( node , & btree [ index ] , tmp ) ) ;
index = nodehdr . count - 1 ;
2005-04-17 02:20:36 +04:00
}
2013-04-24 12:58:02 +04:00
memset ( & btree [ index ] , 0 , sizeof ( xfs_da_node_entry_t ) ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( node , & btree [ index ] , sizeof ( btree [ index ] ) ) ) ;
nodehdr . count - = 1 ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( node , & nodehdr ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2013-10-30 02:15:02 +04:00
XFS_DA_LOGRANGE ( node , & node - > hdr , dp - > d_ops - > node_hdr_size ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Copy the last hash value from the block to propagate upwards .
*/
2013-04-24 12:58:02 +04:00
drop_blk - > hashval = be32_to_cpu ( btree [ index - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
/*
2013-04-24 12:58:02 +04:00
* Unbalance the elements between two intermediate nodes ,
2005-04-17 02:20:36 +04:00
* move all Btree elements from one node into another .
*/
STATIC void
2013-04-24 12:58:02 +04:00
xfs_da3_node_unbalance (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk ,
struct xfs_da_state_blk * save_blk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_intnode * drop_node ;
struct xfs_da_intnode * save_node ;
struct xfs_da_node_entry * drop_btree ;
struct xfs_da_node_entry * save_btree ;
struct xfs_da3_icnode_hdr drop_hdr ;
struct xfs_da3_icnode_hdr save_hdr ;
struct xfs_trans * tp ;
int sindex ;
int tmp ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_node_unbalance ( state - > args ) ;
2012-06-22 12:50:14 +04:00
drop_node = drop_blk - > bp - > b_addr ;
save_node = save_blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & drop_hdr , drop_node ) ;
dp - > d_ops - > node_hdr_from_disk ( & save_hdr , save_node ) ;
2013-10-29 15:11:51 +04:00
drop_btree = dp - > d_ops - > node_tree_p ( drop_node ) ;
save_btree = dp - > d_ops - > node_tree_p ( save_node ) ;
2005-04-17 02:20:36 +04:00
tp = state - > args - > trans ;
/*
* If the dying block has lower hashvals , then move all the
* elements in the remaining block up to make a hole .
*/
2013-04-24 12:58:02 +04:00
if ( ( be32_to_cpu ( drop_btree [ 0 ] . hashval ) <
be32_to_cpu ( save_btree [ 0 ] . hashval ) ) | |
( be32_to_cpu ( drop_btree [ drop_hdr . count - 1 ] . hashval ) <
be32_to_cpu ( save_btree [ save_hdr . count - 1 ] . hashval ) ) ) {
/* XXX: check this - is memmove dst correct? */
tmp = save_hdr . count * sizeof ( xfs_da_node_entry_t ) ;
memmove ( & save_btree [ drop_hdr . count ] , & save_btree [ 0 ] , tmp ) ;
sindex = 0 ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( save_node , & save_btree [ 0 ] ,
( save_hdr . count + drop_hdr . count ) *
sizeof ( xfs_da_node_entry_t ) ) ) ;
2005-04-17 02:20:36 +04:00
} else {
2013-04-24 12:58:02 +04:00
sindex = save_hdr . count ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( save_node , & save_btree [ sindex ] ,
drop_hdr . count * sizeof ( xfs_da_node_entry_t ) ) ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Move all the B - tree elements from drop_blk to save_blk .
*/
2013-04-24 12:58:02 +04:00
tmp = drop_hdr . count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
memcpy ( & save_btree [ sindex ] , & drop_btree [ 0 ] , tmp ) ;
save_hdr . count + = drop_hdr . count ;
2005-04-17 02:20:36 +04:00
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_to_disk ( save_node , & save_hdr ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( save_node , & save_node - > hdr ,
2013-10-30 02:15:02 +04:00
dp - > d_ops - > node_hdr_size ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Save the last hashval in the remaining block for upward propagation .
*/
2013-04-24 12:58:02 +04:00
save_blk - > hashval = be32_to_cpu ( save_btree [ save_hdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
/*========================================================================
* Routines used for finding things in the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Walk down the Btree looking for a particular filename , filling
* in the state structure as we go .
*
* We will set the state structure to point to each of the elements
* in each of the nodes where either the hashval is or should be .
*
* We support duplicate hashval ' s so for each entry in the current
* node that could contain the desired hashval , descend . This is a
* pruned depth - first tree search .
*/
int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_node_lookup_int (
struct xfs_da_state * state ,
int * result )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * curr ;
struct xfs_da_intnode * node ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_args * args ;
xfs_dablk_t blkno ;
xfs_dahash_t hashval ;
xfs_dahash_t btreehashval ;
int probe ;
int span ;
int max ;
int error ;
int retval ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
args = state - > args ;
/*
* Descend thru the B - tree searching each level for the right
* node to use , until the right hashval is found .
*/
2006-06-20 07:04:51 +04:00
blkno = ( args - > whichfork = = XFS_DATA_FORK ) ? state - > mp - > m_dirleafblk : 0 ;
2005-04-17 02:20:36 +04:00
for ( blk = & state - > path . blk [ 0 ] , state - > path . active = 1 ;
state - > path . active < = XFS_DA_NODE_MAXDEPTH ;
blk + + , state - > path . active + + ) {
/*
* Read the next node down in the tree .
*/
blk - > blkno = blkno ;
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_read ( args - > trans , args - > dp , blkno ,
2012-11-12 15:54:17 +04:00
- 1 , & blk - > bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error ) {
blk - > blkno = 0 ;
state - > path . active - - ;
return ( error ) ;
}
2012-06-22 12:50:14 +04:00
curr = blk - > bp - > b_addr ;
2006-09-28 05:03:44 +04:00
blk - > magic = be16_to_cpu ( curr - > magic ) ;
2013-04-24 12:58:02 +04:00
2013-04-24 12:58:55 +04:00
if ( blk - > magic = = XFS_ATTR_LEAF_MAGIC | |
blk - > magic = = XFS_ATTR3_LEAF_MAGIC ) {
blk - > magic = XFS_ATTR_LEAF_MAGIC ;
2013-04-24 12:58:02 +04:00
blk - > hashval = xfs_attr_leaf_lasthash ( blk - > bp , NULL ) ;
break ;
}
if ( blk - > magic = = XFS_DIR2_LEAFN_MAGIC | |
blk - > magic = = XFS_DIR3_LEAFN_MAGIC ) {
blk - > magic = XFS_DIR2_LEAFN_MAGIC ;
2013-10-29 15:11:50 +04:00
blk - > hashval = xfs_dir2_leafn_lasthash ( args - > dp ,
blk - > bp , NULL ) ;
2013-04-24 12:58:02 +04:00
break ;
}
blk - > magic = XFS_DA_NODE_MAGIC ;
2005-04-17 02:20:36 +04:00
/*
* Search an intermediate node for a match .
*/
2013-04-24 12:58:02 +04:00
node = blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
max = nodehdr . count ;
blk - > hashval = be32_to_cpu ( btree [ max - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
/*
* Binary search . ( note : small blocks will skip loop )
*/
probe = span = max / 2 ;
hashval = args - > hashval ;
while ( span > 4 ) {
span / = 2 ;
btreehashval = be32_to_cpu ( btree [ probe ] . hashval ) ;
if ( btreehashval < hashval )
probe + = span ;
else if ( btreehashval > hashval )
probe - = span ;
else
break ;
}
ASSERT ( ( probe > = 0 ) & & ( probe < max ) ) ;
ASSERT ( ( span < = 4 ) | |
( be32_to_cpu ( btree [ probe ] . hashval ) = = hashval ) ) ;
2005-04-17 02:20:36 +04:00
2013-04-24 12:58:02 +04:00
/*
* Since we may have duplicate hashval ' s , find the first
* matching hashval in the node .
*/
while ( probe > 0 & &
be32_to_cpu ( btree [ probe ] . hashval ) > = hashval ) {
probe - - ;
}
while ( probe < max & &
be32_to_cpu ( btree [ probe ] . hashval ) < hashval ) {
probe + + ;
}
/*
* Pick the right block to descend on .
*/
if ( probe = = max ) {
blk - > index = max - 1 ;
blkno = be32_to_cpu ( btree [ max - 1 ] . before ) ;
} else {
blk - > index = probe ;
blkno = be32_to_cpu ( btree [ probe ] . before ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* A leaf block that ends in the hashval that we are interested in
* ( final hashval = = search hashval ) means that the next block may
* contain more entries with the same hashval , shift upward to the
* next leaf and keep searching .
*/
for ( ; ; ) {
2006-06-20 07:04:51 +04:00
if ( blk - > magic = = XFS_DIR2_LEAFN_MAGIC ) {
2005-04-17 02:20:36 +04:00
retval = xfs_dir2_leafn_lookup_int ( blk - > bp , args ,
& blk - > index , state ) ;
2006-09-28 05:03:44 +04:00
} else if ( blk - > magic = = XFS_ATTR_LEAF_MAGIC ) {
2013-04-24 12:58:55 +04:00
retval = xfs_attr3_leaf_lookup_int ( blk - > bp , args ) ;
2005-04-17 02:20:36 +04:00
blk - > index = args - > index ;
args - > blkno = blk - > blkno ;
2006-09-28 05:03:44 +04:00
} else {
ASSERT ( 0 ) ;
return XFS_ERROR ( EFSCORRUPTED ) ;
2005-04-17 02:20:36 +04:00
}
if ( ( ( retval = = ENOENT ) | | ( retval = = ENOATTR ) ) & &
( blk - > hashval = = args - > hashval ) ) {
2013-04-24 12:58:02 +04:00
error = xfs_da3_path_shift ( state , & state - > path , 1 , 1 ,
2005-04-17 02:20:36 +04:00
& retval ) ;
if ( error )
return ( error ) ;
if ( retval = = 0 ) {
continue ;
2006-09-28 05:03:44 +04:00
} else if ( blk - > magic = = XFS_ATTR_LEAF_MAGIC ) {
2005-04-17 02:20:36 +04:00
/* path_shift() gives ENOENT */
retval = XFS_ERROR ( ENOATTR ) ;
}
}
break ;
}
* result = retval ;
return ( 0 ) ;
}
/*========================================================================
* Utility routines .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
2013-04-24 12:58:02 +04:00
/*
* Compare two intermediate nodes for " order " .
*/
STATIC int
xfs_da3_node_order (
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp ,
2013-04-24 12:58:02 +04:00
struct xfs_buf * node1_bp ,
struct xfs_buf * node2_bp )
{
struct xfs_da_intnode * node1 ;
struct xfs_da_intnode * node2 ;
struct xfs_da_node_entry * btree1 ;
struct xfs_da_node_entry * btree2 ;
struct xfs_da3_icnode_hdr node1hdr ;
struct xfs_da3_icnode_hdr node2hdr ;
node1 = node1_bp - > b_addr ;
node2 = node2_bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & node1hdr , node1 ) ;
dp - > d_ops - > node_hdr_from_disk ( & node2hdr , node2 ) ;
2013-10-29 15:11:51 +04:00
btree1 = dp - > d_ops - > node_tree_p ( node1 ) ;
btree2 = dp - > d_ops - > node_tree_p ( node2 ) ;
2013-04-24 12:58:02 +04:00
if ( node1hdr . count > 0 & & node2hdr . count > 0 & &
( ( be32_to_cpu ( btree2 [ 0 ] . hashval ) < be32_to_cpu ( btree1 [ 0 ] . hashval ) ) | |
( be32_to_cpu ( btree2 [ node2hdr . count - 1 ] . hashval ) <
be32_to_cpu ( btree1 [ node1hdr . count - 1 ] . hashval ) ) ) ) {
return 1 ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* Link a new block into a doubly linked list of blocks ( of whatever type ) .
*/
int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_blk_link (
struct xfs_da_state * state ,
struct xfs_da_state_blk * old_blk ,
struct xfs_da_state_blk * new_blk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_blkinfo * old_info ;
struct xfs_da_blkinfo * new_info ;
struct xfs_da_blkinfo * tmp_info ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
int before = 0 ;
int error ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
/*
* Set up environment .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
2012-06-22 12:50:14 +04:00
old_info = old_blk - > bp - > b_addr ;
new_info = new_blk - > bp - > b_addr ;
2005-04-17 02:20:36 +04:00
ASSERT ( old_blk - > magic = = XFS_DA_NODE_MAGIC | |
2006-06-20 07:04:51 +04:00
old_blk - > magic = = XFS_DIR2_LEAFN_MAGIC | |
2005-04-17 02:20:36 +04:00
old_blk - > magic = = XFS_ATTR_LEAF_MAGIC ) ;
switch ( old_blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
before = xfs_attr_leaf_order ( old_blk - > bp , new_blk - > bp ) ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
2013-10-29 15:11:51 +04:00
before = xfs_dir2_leafn_order ( dp , old_blk - > bp , new_blk - > bp ) ;
2005-04-17 02:20:36 +04:00
break ;
case XFS_DA_NODE_MAGIC :
2013-10-29 15:11:51 +04:00
before = xfs_da3_node_order ( dp , old_blk - > bp , new_blk - > bp ) ;
2005-04-17 02:20:36 +04:00
break ;
}
/*
* Link blocks in appropriate order .
*/
if ( before ) {
/*
* Link new block in before existing block .
*/
2012-03-22 09:15:13 +04:00
trace_xfs_da_link_before ( args ) ;
2006-03-17 09:28:40 +03:00
new_info - > forw = cpu_to_be32 ( old_blk - > blkno ) ;
new_info - > back = old_info - > back ;
if ( old_info - > back ) {
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( args - > trans , dp ,
2006-03-17 09:28:40 +03:00
be32_to_cpu ( old_info - > back ) ,
2012-11-12 15:54:17 +04:00
- 1 , & bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
ASSERT ( bp ! = NULL ) ;
2012-06-22 12:50:14 +04:00
tmp_info = bp - > b_addr ;
2013-04-24 12:58:02 +04:00
ASSERT ( tmp_info - > magic = = old_info - > magic ) ;
2006-03-17 09:28:40 +03:00
ASSERT ( be32_to_cpu ( tmp_info - > forw ) = = old_blk - > blkno ) ;
tmp_info - > forw = cpu_to_be32 ( new_blk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-17 09:28:40 +03:00
old_info - > back = cpu_to_be32 ( new_blk - > blkno ) ;
2005-04-17 02:20:36 +04:00
} else {
/*
* Link new block in after existing block .
*/
2012-03-22 09:15:13 +04:00
trace_xfs_da_link_after ( args ) ;
2006-03-17 09:28:40 +03:00
new_info - > forw = old_info - > forw ;
new_info - > back = cpu_to_be32 ( old_blk - > blkno ) ;
if ( old_info - > forw ) {
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( args - > trans , dp ,
2006-03-17 09:28:40 +03:00
be32_to_cpu ( old_info - > forw ) ,
2012-11-12 15:54:17 +04:00
- 1 , & bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
ASSERT ( bp ! = NULL ) ;
2012-06-22 12:50:14 +04:00
tmp_info = bp - > b_addr ;
2006-03-17 09:28:40 +03:00
ASSERT ( tmp_info - > magic = = old_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > back ) = = old_blk - > blkno ) ;
tmp_info - > back = cpu_to_be32 ( new_blk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-17 09:28:40 +03:00
old_info - > forw = cpu_to_be32 ( new_blk - > blkno ) ;
2005-04-17 02:20:36 +04:00
}
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , old_blk - > bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
xfs_trans_log_buf ( args - > trans , new_blk - > bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2005-04-17 02:20:36 +04:00
return ( 0 ) ;
}
/*
* Unlink a block from a doubly linked list of blocks .
*/
2005-06-21 09:36:52 +04:00
STATIC int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_blk_unlink (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk ,
struct xfs_da_state_blk * save_blk )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_blkinfo * drop_info ;
struct xfs_da_blkinfo * save_info ;
struct xfs_da_blkinfo * tmp_info ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
int error ;
2005-04-17 02:20:36 +04:00
/*
* Set up environment .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
2012-06-22 12:50:14 +04:00
save_info = save_blk - > bp - > b_addr ;
drop_info = drop_blk - > bp - > b_addr ;
2005-04-17 02:20:36 +04:00
ASSERT ( save_blk - > magic = = XFS_DA_NODE_MAGIC | |
2006-06-20 07:04:51 +04:00
save_blk - > magic = = XFS_DIR2_LEAFN_MAGIC | |
2005-04-17 02:20:36 +04:00
save_blk - > magic = = XFS_ATTR_LEAF_MAGIC ) ;
ASSERT ( save_blk - > magic = = drop_blk - > magic ) ;
2006-03-17 09:28:40 +03:00
ASSERT ( ( be32_to_cpu ( save_info - > forw ) = = drop_blk - > blkno ) | |
( be32_to_cpu ( save_info - > back ) = = drop_blk - > blkno ) ) ;
ASSERT ( ( be32_to_cpu ( drop_info - > forw ) = = save_blk - > blkno ) | |
( be32_to_cpu ( drop_info - > back ) = = save_blk - > blkno ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Unlink the leaf block from the doubly linked chain of leaves .
*/
2006-03-17 09:28:40 +03:00
if ( be32_to_cpu ( save_info - > back ) = = drop_blk - > blkno ) {
2012-03-22 09:15:13 +04:00
trace_xfs_da_unlink_back ( args ) ;
2006-03-17 09:28:40 +03:00
save_info - > back = drop_info - > back ;
if ( drop_info - > back ) {
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_read ( args - > trans , args - > dp ,
2006-03-17 09:28:40 +03:00
be32_to_cpu ( drop_info - > back ) ,
2012-11-12 15:54:17 +04:00
- 1 , & bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
ASSERT ( bp ! = NULL ) ;
2012-06-22 12:50:14 +04:00
tmp_info = bp - > b_addr ;
2006-03-17 09:28:40 +03:00
ASSERT ( tmp_info - > magic = = save_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > forw ) = = drop_blk - > blkno ) ;
tmp_info - > forw = cpu_to_be32 ( save_blk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , bp , 0 ,
2005-04-17 02:20:36 +04:00
sizeof ( * tmp_info ) - 1 ) ;
}
} else {
2012-03-22 09:15:13 +04:00
trace_xfs_da_unlink_forward ( args ) ;
2006-03-17 09:28:40 +03:00
save_info - > forw = drop_info - > forw ;
if ( drop_info - > forw ) {
2013-04-24 12:58:02 +04:00
error = xfs_da3_node_read ( args - > trans , args - > dp ,
2006-03-17 09:28:40 +03:00
be32_to_cpu ( drop_info - > forw ) ,
2012-11-12 15:54:17 +04:00
- 1 , & bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
ASSERT ( bp ! = NULL ) ;
2012-06-22 12:50:14 +04:00
tmp_info = bp - > b_addr ;
2006-03-17 09:28:40 +03:00
ASSERT ( tmp_info - > magic = = save_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > back ) = = drop_blk - > blkno ) ;
tmp_info - > back = cpu_to_be32 ( save_blk - > blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , bp , 0 ,
2005-04-17 02:20:36 +04:00
sizeof ( * tmp_info ) - 1 ) ;
}
}
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( args - > trans , save_blk - > bp , 0 , sizeof ( * save_info ) - 1 ) ;
2005-04-17 02:20:36 +04:00
return ( 0 ) ;
}
/*
* Move a path " forward " or " !forward " one block at the current level .
*
* This routine will adjust a " path " to point to the next block
* " forward " ( higher hashvalues ) or " !forward " ( lower hashvals ) in the
* Btree , including updating pointers to the intermediate nodes between
* the new bottom and the root .
*/
int /* error */
2013-04-24 12:58:02 +04:00
xfs_da3_path_shift (
struct xfs_da_state * state ,
struct xfs_da_state_path * path ,
int forward ,
int release ,
int * result )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * info ;
struct xfs_da_intnode * node ;
struct xfs_da_args * args ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
xfs_dablk_t blkno = 0 ;
int level ;
int error ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-17 02:20:36 +04:00
2012-11-12 15:53:53 +04:00
trace_xfs_da_path_shift ( state - > args ) ;
2005-04-17 02:20:36 +04:00
/*
* Roll up the Btree looking for the first block where our
* current index is not at the edge of the block . Note that
* we skip the bottom layer because we want the sibling block .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
ASSERT ( path ! = NULL ) ;
ASSERT ( ( path - > active > 0 ) & & ( path - > active < XFS_DA_NODE_MAXDEPTH ) ) ;
level = ( path - > active - 1 ) - 1 ; /* skip bottom layer in path */
for ( blk = & path - > blk [ level ] ; level > = 0 ; blk - - , level - - ) {
2012-06-22 12:50:14 +04:00
node = blk - > bp - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
if ( forward & & ( blk - > index < nodehdr . count - 1 ) ) {
2005-04-17 02:20:36 +04:00
blk - > index + + ;
2013-04-24 12:58:02 +04:00
blkno = be32_to_cpu ( btree [ blk - > index ] . before ) ;
2005-04-17 02:20:36 +04:00
break ;
} else if ( ! forward & & ( blk - > index > 0 ) ) {
blk - > index - - ;
2013-04-24 12:58:02 +04:00
blkno = be32_to_cpu ( btree [ blk - > index ] . before ) ;
2005-04-17 02:20:36 +04:00
break ;
}
}
if ( level < 0 ) {
* result = XFS_ERROR ( ENOENT ) ; /* we're out of our tree */
2008-05-21 10:42:05 +04:00
ASSERT ( args - > op_flags & XFS_DA_OP_OKNOENT ) ;
2005-04-17 02:20:36 +04:00
return ( 0 ) ;
}
/*
* Roll down the edge of the subtree until we reach the
* same depth we were at originally .
*/
for ( blk + + , level + + ; level < path - > active ; blk + + , level + + ) {
/*
* Release the old block .
* ( if it ' s dirty , trans won ' t actually let go )
*/
if ( release )
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( args - > trans , blk - > bp ) ;
2005-04-17 02:20:36 +04:00
/*
* Read the next child block .
*/
blk - > blkno = blkno ;
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( args - > trans , dp , blkno , - 1 ,
2012-11-12 15:54:17 +04:00
& blk - > bp , args - > whichfork ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return ( error ) ;
2012-06-22 12:50:14 +04:00
info = blk - > bp - > b_addr ;
2011-07-08 16:36:05 +04:00
ASSERT ( info - > magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
2013-04-24 12:58:02 +04:00
info - > magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) | |
2011-07-08 16:36:05 +04:00
info - > magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
2013-04-12 01:30:21 +04:00
info - > magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) | |
2013-04-24 12:58:55 +04:00
info - > magic = = cpu_to_be16 ( XFS_ATTR_LEAF_MAGIC ) | |
info - > magic = = cpu_to_be16 ( XFS_ATTR3_LEAF_MAGIC ) ) ;
2013-04-24 12:58:02 +04:00
/*
* Note : we flatten the magic number to a single type so we
* don ' t have to compare against crc / non - crc types elsewhere .
*/
switch ( be16_to_cpu ( info - > magic ) ) {
case XFS_DA_NODE_MAGIC :
case XFS_DA3_NODE_MAGIC :
blk - > magic = XFS_DA_NODE_MAGIC ;
2005-04-17 02:20:36 +04:00
node = ( xfs_da_intnode_t * ) info ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & nodehdr , node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( node ) ;
2013-04-24 12:58:02 +04:00
blk - > hashval = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
if ( forward )
blk - > index = 0 ;
else
2013-04-24 12:58:02 +04:00
blk - > index = nodehdr . count - 1 ;
blkno = be32_to_cpu ( btree [ blk - > index ] . before ) ;
break ;
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 12:58:55 +04:00
case XFS_ATTR3_LEAF_MAGIC :
2013-04-24 12:58:02 +04:00
blk - > magic = XFS_ATTR_LEAF_MAGIC ;
2005-04-17 02:20:36 +04:00
ASSERT ( level = = path - > active - 1 ) ;
blk - > index = 0 ;
2013-10-29 15:11:50 +04:00
blk - > hashval = xfs_attr_leaf_lasthash ( blk - > bp , NULL ) ;
2013-04-24 12:58:02 +04:00
break ;
case XFS_DIR2_LEAFN_MAGIC :
case XFS_DIR3_LEAFN_MAGIC :
blk - > magic = XFS_DIR2_LEAFN_MAGIC ;
ASSERT ( level = = path - > active - 1 ) ;
blk - > index = 0 ;
2013-10-29 15:11:50 +04:00
blk - > hashval = xfs_dir2_leafn_lasthash ( args - > dp ,
blk - > bp , NULL ) ;
2013-04-24 12:58:02 +04:00
break ;
default :
ASSERT ( 0 ) ;
break ;
2005-04-17 02:20:36 +04:00
}
}
* result = 0 ;
2013-04-24 12:58:02 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*========================================================================
* Utility routines .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Implement a simple hash on a character string .
* Rotate the hash value by 7 bits , then XOR each character in .
* This is implemented with some source - level loop unrolling .
*/
xfs_dahash_t
2009-02-09 10:37:39 +03:00
xfs_da_hashname ( const __uint8_t * name , int namelen )
2005-04-17 02:20:36 +04:00
{
xfs_dahash_t hash ;
/*
* Do four characters at a time as long as we can .
*/
for ( hash = 0 ; namelen > = 4 ; namelen - = 4 , name + = 4 )
hash = ( name [ 0 ] < < 21 ) ^ ( name [ 1 ] < < 14 ) ^ ( name [ 2 ] < < 7 ) ^
( name [ 3 ] < < 0 ) ^ rol32 ( hash , 7 * 4 ) ;
/*
* Now do the rest of the characters .
*/
switch ( namelen ) {
case 3 :
return ( name [ 0 ] < < 14 ) ^ ( name [ 1 ] < < 7 ) ^ ( name [ 2 ] < < 0 ) ^
rol32 ( hash , 7 * 3 ) ;
case 2 :
return ( name [ 0 ] < < 7 ) ^ ( name [ 1 ] < < 0 ) ^ rol32 ( hash , 7 * 2 ) ;
case 1 :
return ( name [ 0 ] < < 0 ) ^ rol32 ( hash , 7 * 1 ) ;
2005-11-02 07:12:28 +03:00
default : /* case 0: */
2005-04-17 02:20:36 +04:00
return hash ;
}
}
2008-05-21 10:41:01 +04:00
enum xfs_dacmp
xfs_da_compname (
struct xfs_da_args * args ,
2010-01-20 02:47:17 +03:00
const unsigned char * name ,
int len )
2008-05-21 10:41:01 +04:00
{
return ( args - > namelen = = len & & memcmp ( args - > name , name , len ) = = 0 ) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT ;
}
static xfs_dahash_t
xfs_default_hashname (
struct xfs_name * name )
{
return xfs_da_hashname ( name - > name , name - > len ) ;
}
const struct xfs_nameops xfs_default_nameops = {
. hashname = xfs_default_hashname ,
. compname = xfs_da_compname
} ;
2005-04-17 02:20:36 +04:00
int
2011-07-13 15:43:49 +04:00
xfs_da_grow_inode_int (
struct xfs_da_args * args ,
xfs_fileoff_t * bno ,
int count )
2005-04-17 02:20:36 +04:00
{
2011-07-13 15:43:49 +04:00
struct xfs_trans * tp = args - > trans ;
struct xfs_inode * dp = args - > dp ;
int w = args - > whichfork ;
xfs_drfsbno_t nblks = dp - > i_d . di_nblocks ;
struct xfs_bmbt_irec map , * mapp ;
int nmap , error , got , i , mapi ;
2005-04-17 02:20:36 +04:00
/*
* Find a spot in the file space to put the new block .
*/
2011-07-13 15:43:49 +04:00
error = xfs_bmap_first_unused ( tp , dp , count , bno , w ) ;
if ( error )
2005-04-17 02:20:36 +04:00
return error ;
2011-07-13 15:43:49 +04:00
2005-04-17 02:20:36 +04:00
/*
* Try mapping it in one filesystem block .
*/
nmap = 1 ;
ASSERT ( args - > firstblock ! = NULL ) ;
2011-09-19 00:40:52 +04:00
error = xfs_bmapi_write ( tp , dp , * bno , count ,
xfs_bmapi_aflag ( w ) | XFS_BMAPI_METADATA | XFS_BMAPI_CONTIG ,
2005-04-17 02:20:36 +04:00
args - > firstblock , args - > total , & map , & nmap ,
2011-07-13 15:43:49 +04:00
args - > flist ) ;
if ( error )
2005-04-17 02:20:36 +04:00
return error ;
2011-07-13 15:43:49 +04:00
2005-04-17 02:20:36 +04:00
ASSERT ( nmap < = 1 ) ;
if ( nmap = = 1 ) {
mapp = & map ;
mapi = 1 ;
2011-07-13 15:43:49 +04:00
} else if ( nmap = = 0 & & count > 1 ) {
xfs_fileoff_t b ;
int c ;
/*
* If we didn ' t get it and the block might work if fragmented ,
* try without the CONTIG flag . Loop until we get it all .
*/
2005-04-17 02:20:36 +04:00
mapp = kmem_alloc ( sizeof ( * mapp ) * count , KM_SLEEP ) ;
2011-07-13 15:43:49 +04:00
for ( b = * bno , mapi = 0 ; b < * bno + count ; ) {
2005-04-17 02:20:36 +04:00
nmap = MIN ( XFS_BMAP_MAX_NMAP , count ) ;
2011-07-13 15:43:49 +04:00
c = ( int ) ( * bno + count - b ) ;
2011-09-19 00:40:52 +04:00
error = xfs_bmapi_write ( tp , dp , b , c ,
xfs_bmapi_aflag ( w ) | XFS_BMAPI_METADATA ,
2005-04-17 02:20:36 +04:00
args - > firstblock , args - > total ,
2011-07-13 15:43:49 +04:00
& mapp [ mapi ] , & nmap , args - > flist ) ;
if ( error )
goto out_free_map ;
2005-04-17 02:20:36 +04:00
if ( nmap < 1 )
break ;
mapi + = nmap ;
b = mapp [ mapi - 1 ] . br_startoff +
mapp [ mapi - 1 ] . br_blockcount ;
}
} else {
mapi = 0 ;
mapp = NULL ;
}
2011-07-13 15:43:49 +04:00
2005-04-17 02:20:36 +04:00
/*
* Count the blocks we got , make sure it matches the total .
*/
for ( i = 0 , got = 0 ; i < mapi ; i + + )
got + = mapp [ i ] . br_blockcount ;
2011-07-13 15:43:49 +04:00
if ( got ! = count | | mapp [ 0 ] . br_startoff ! = * bno | |
2005-04-17 02:20:36 +04:00
mapp [ mapi - 1 ] . br_startoff + mapp [ mapi - 1 ] . br_blockcount ! =
2011-07-13 15:43:49 +04:00
* bno + count ) {
error = XFS_ERROR ( ENOSPC ) ;
goto out_free_map ;
2005-04-17 02:20:36 +04:00
}
2011-07-13 15:43:49 +04:00
2008-10-30 09:38:12 +03:00
/* account for newly allocated blocks in reserved blocks total */
args - > total - = dp - > i_d . di_nblocks - nblks ;
2011-07-13 15:43:49 +04:00
out_free_map :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
return error ;
}
/*
* Add a block to the btree ahead of the file .
* Return the new block number to the caller .
*/
int
xfs_da_grow_inode (
struct xfs_da_args * args ,
xfs_dablk_t * new_blkno )
{
xfs_fileoff_t bno ;
int count ;
int error ;
2012-03-22 09:15:13 +04:00
trace_xfs_da_grow_inode ( args ) ;
2011-07-13 15:43:49 +04:00
if ( args - > whichfork = = XFS_DATA_FORK ) {
bno = args - > dp - > i_mount - > m_dirleafblk ;
count = args - > dp - > i_mount - > m_dirblkfsbs ;
} else {
bno = 0 ;
count = 1 ;
}
error = xfs_da_grow_inode_int ( args , & bno , count ) ;
if ( ! error )
* new_blkno = ( xfs_dablk_t ) bno ;
return error ;
2005-04-17 02:20:36 +04:00
}
/*
* Ick . We need to always be able to remove a btree block , even
* if there ' s no space reservation because the filesystem is full .
* This is called if xfs_bunmapi on a btree block fails due to ENOSPC .
* It swaps the target block with the last block in the file . The
* last block in the file can always be removed since it can ' t cause
* a bmap btree split to do that .
*/
STATIC int
2013-04-24 12:58:02 +04:00
xfs_da3_swap_lastblock (
struct xfs_da_args * args ,
xfs_dablk_t * dead_blknop ,
struct xfs_buf * * dead_bufp )
2005-04-17 02:20:36 +04:00
{
2013-04-24 12:58:02 +04:00
struct xfs_da_blkinfo * dead_info ;
struct xfs_da_blkinfo * sib_info ;
struct xfs_da_intnode * par_node ;
struct xfs_da_intnode * dead_node ;
struct xfs_dir2_leaf * dead_leaf2 ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr par_hdr ;
2013-10-29 15:11:51 +04:00
struct xfs_inode * dp ;
2013-04-24 12:58:02 +04:00
struct xfs_trans * tp ;
struct xfs_mount * mp ;
struct xfs_buf * dead_buf ;
struct xfs_buf * last_buf ;
struct xfs_buf * sib_buf ;
struct xfs_buf * par_buf ;
xfs_dahash_t dead_hash ;
xfs_fileoff_t lastoff ;
xfs_dablk_t dead_blkno ;
xfs_dablk_t last_blkno ;
xfs_dablk_t sib_blkno ;
xfs_dablk_t par_blkno ;
int error ;
int w ;
int entno ;
int level ;
int dead_level ;
2005-04-17 02:20:36 +04:00
2012-03-22 09:15:13 +04:00
trace_xfs_da_swap_lastblock ( args ) ;
2005-04-17 02:20:36 +04:00
dead_buf = * dead_bufp ;
dead_blkno = * dead_blknop ;
tp = args - > trans ;
2013-10-29 15:11:51 +04:00
dp = args - > dp ;
2005-04-17 02:20:36 +04:00
w = args - > whichfork ;
ASSERT ( w = = XFS_DATA_FORK ) ;
2013-10-29 15:11:51 +04:00
mp = dp - > i_mount ;
2006-06-20 07:04:51 +04:00
lastoff = mp - > m_dirfreeblk ;
2013-10-29 15:11:51 +04:00
error = xfs_bmap_last_before ( tp , dp , & lastoff , w ) ;
2005-04-17 02:20:36 +04:00
if ( error )
return error ;
if ( unlikely ( lastoff = = 0 ) ) {
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(1) " , XFS_ERRLEVEL_LOW ,
mp ) ;
return XFS_ERROR ( EFSCORRUPTED ) ;
}
/*
* Read the last block in the btree space .
*/
last_blkno = ( xfs_dablk_t ) lastoff - mp - > m_dirblkfsbs ;
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( tp , dp , last_blkno , - 1 , & last_buf , w ) ;
2012-11-12 15:54:10 +04:00
if ( error )
2005-04-17 02:20:36 +04:00
return error ;
/*
* Copy the last block into the dead buffer and log it .
*/
2012-06-22 12:50:14 +04:00
memcpy ( dead_buf - > b_addr , last_buf - > b_addr , mp - > m_dirblksize ) ;
xfs_trans_log_buf ( tp , dead_buf , 0 , mp - > m_dirblksize - 1 ) ;
dead_info = dead_buf - > b_addr ;
2005-04-17 02:20:36 +04:00
/*
* Get values from the moved block .
*/
2013-04-12 01:30:21 +04:00
if ( dead_info - > magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
dead_info - > magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
struct xfs_dir3_icleaf_hdr leafhdr ;
struct xfs_dir2_leaf_entry * ents ;
2005-04-17 02:20:36 +04:00
dead_leaf2 = ( xfs_dir2_leaf_t * ) dead_info ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > leaf_hdr_from_disk ( & leafhdr , dead_leaf2 ) ;
2013-10-29 15:11:51 +04:00
ents = dp - > d_ops - > leaf_ents_p ( dead_leaf2 ) ;
2005-04-17 02:20:36 +04:00
dead_level = 0 ;
2013-04-12 01:30:21 +04:00
dead_hash = be32_to_cpu ( ents [ leafhdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
} else {
2013-04-24 12:58:02 +04:00
struct xfs_da3_icnode_hdr deadhdr ;
2005-04-17 02:20:36 +04:00
dead_node = ( xfs_da_intnode_t * ) dead_info ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & deadhdr , dead_node ) ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( dead_node ) ;
2013-04-24 12:58:02 +04:00
dead_level = deadhdr . level ;
dead_hash = be32_to_cpu ( btree [ deadhdr . count - 1 ] . hashval ) ;
2005-04-17 02:20:36 +04:00
}
sib_buf = par_buf = NULL ;
/*
* If the moved block has a left sibling , fix up the pointers .
*/
2006-03-17 09:28:40 +03:00
if ( ( sib_blkno = be32_to_cpu ( dead_info - > back ) ) ) {
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( tp , dp , sib_blkno , - 1 , & sib_buf , w ) ;
2012-11-12 15:54:10 +04:00
if ( error )
2005-04-17 02:20:36 +04:00
goto done ;
2012-06-22 12:50:14 +04:00
sib_info = sib_buf - > b_addr ;
2005-04-17 02:20:36 +04:00
if ( unlikely (
2006-03-17 09:28:40 +03:00
be32_to_cpu ( sib_info - > forw ) ! = last_blkno | |
sib_info - > magic ! = dead_info - > magic ) ) {
2005-04-17 02:20:36 +04:00
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(2) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2006-03-17 09:28:40 +03:00
sib_info - > forw = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , sib_buf ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( sib_info , & sib_info - > forw ,
sizeof ( sib_info - > forw ) ) ) ;
sib_buf = NULL ;
}
/*
* If the moved block has a right sibling , fix up the pointers .
*/
2006-03-17 09:28:40 +03:00
if ( ( sib_blkno = be32_to_cpu ( dead_info - > forw ) ) ) {
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( tp , dp , sib_blkno , - 1 , & sib_buf , w ) ;
2012-11-12 15:54:10 +04:00
if ( error )
2005-04-17 02:20:36 +04:00
goto done ;
2012-06-22 12:50:14 +04:00
sib_info = sib_buf - > b_addr ;
2005-04-17 02:20:36 +04:00
if ( unlikely (
2006-03-17 09:28:40 +03:00
be32_to_cpu ( sib_info - > back ) ! = last_blkno | |
sib_info - > magic ! = dead_info - > magic ) ) {
2005-04-17 02:20:36 +04:00
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(3) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2006-03-17 09:28:40 +03:00
sib_info - > back = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , sib_buf ,
2005-04-17 02:20:36 +04:00
XFS_DA_LOGRANGE ( sib_info , & sib_info - > back ,
sizeof ( sib_info - > back ) ) ) ;
sib_buf = NULL ;
}
2006-06-20 07:04:51 +04:00
par_blkno = mp - > m_dirleafblk ;
2005-04-17 02:20:36 +04:00
level = - 1 ;
/*
* Walk down the tree looking for the parent of the moved block .
*/
for ( ; ; ) {
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( tp , dp , par_blkno , - 1 , & par_buf , w ) ;
2012-11-12 15:54:10 +04:00
if ( error )
2005-04-17 02:20:36 +04:00
goto done ;
2012-06-22 12:50:14 +04:00
par_node = par_buf - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & par_hdr , par_node ) ;
2013-04-24 12:58:02 +04:00
if ( level > = 0 & & level ! = par_hdr . level + 1 ) {
2005-04-17 02:20:36 +04:00
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(4) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2013-04-24 12:58:02 +04:00
level = par_hdr . level ;
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( par_node ) ;
2005-04-17 02:20:36 +04:00
for ( entno = 0 ;
2013-04-24 12:58:02 +04:00
entno < par_hdr . count & &
be32_to_cpu ( btree [ entno ] . hashval ) < dead_hash ;
2005-04-17 02:20:36 +04:00
entno + + )
continue ;
2013-04-24 12:58:02 +04:00
if ( entno = = par_hdr . count ) {
2005-04-17 02:20:36 +04:00
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(5) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2013-04-24 12:58:02 +04:00
par_blkno = be32_to_cpu ( btree [ entno ] . before ) ;
2005-04-17 02:20:36 +04:00
if ( level = = dead_level + 1 )
break ;
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-17 02:20:36 +04:00
par_buf = NULL ;
}
/*
* We ' re in the right parent block .
* Look for the right entry .
*/
for ( ; ; ) {
for ( ;
2013-04-24 12:58:02 +04:00
entno < par_hdr . count & &
be32_to_cpu ( btree [ entno ] . before ) ! = last_blkno ;
2005-04-17 02:20:36 +04:00
entno + + )
continue ;
2013-04-24 12:58:02 +04:00
if ( entno < par_hdr . count )
2005-04-17 02:20:36 +04:00
break ;
2013-04-24 12:58:02 +04:00
par_blkno = par_hdr . forw ;
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-17 02:20:36 +04:00
par_buf = NULL ;
if ( unlikely ( par_blkno = = 0 ) ) {
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(6) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2013-10-29 15:11:51 +04:00
error = xfs_da3_node_read ( tp , dp , par_blkno , - 1 , & par_buf , w ) ;
2012-11-12 15:54:10 +04:00
if ( error )
2005-04-17 02:20:36 +04:00
goto done ;
2012-06-22 12:50:14 +04:00
par_node = par_buf - > b_addr ;
2013-10-29 15:11:52 +04:00
dp - > d_ops - > node_hdr_from_disk ( & par_hdr , par_node ) ;
2013-04-24 12:58:02 +04:00
if ( par_hdr . level ! = level ) {
2005-04-17 02:20:36 +04:00
XFS_ERROR_REPORT ( " xfs_da_swap_lastblock(7) " ,
XFS_ERRLEVEL_LOW , mp ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
goto done ;
}
2013-10-29 15:11:51 +04:00
btree = dp - > d_ops - > node_tree_p ( par_node ) ;
2005-04-17 02:20:36 +04:00
entno = 0 ;
}
/*
* Update the parent entry pointing to the moved block .
*/
2013-04-24 12:58:02 +04:00
btree [ entno ] . before = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_log_buf ( tp , par_buf ,
2013-04-24 12:58:02 +04:00
XFS_DA_LOGRANGE ( par_node , & btree [ entno ] . before ,
sizeof ( btree [ entno ] . before ) ) ) ;
2005-04-17 02:20:36 +04:00
* dead_blknop = last_blkno ;
* dead_bufp = last_buf ;
return 0 ;
done :
if ( par_buf )
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-17 02:20:36 +04:00
if ( sib_buf )
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( tp , sib_buf ) ;
xfs_trans_brelse ( tp , last_buf ) ;
2005-04-17 02:20:36 +04:00
return error ;
}
/*
* Remove a btree block from a directory or attribute .
*/
int
2012-06-22 12:50:14 +04:00
xfs_da_shrink_inode (
xfs_da_args_t * args ,
xfs_dablk_t dead_blkno ,
struct xfs_buf * dead_buf )
2005-04-17 02:20:36 +04:00
{
xfs_inode_t * dp ;
int done , error , w , count ;
xfs_trans_t * tp ;
xfs_mount_t * mp ;
2012-03-22 09:15:13 +04:00
trace_xfs_da_shrink_inode ( args ) ;
2005-04-17 02:20:36 +04:00
dp = args - > dp ;
w = args - > whichfork ;
tp = args - > trans ;
mp = dp - > i_mount ;
2006-06-20 07:04:51 +04:00
if ( w = = XFS_DATA_FORK )
2005-04-17 02:20:36 +04:00
count = mp - > m_dirblkfsbs ;
else
count = 1 ;
for ( ; ; ) {
/*
* Remove extents . If we get ENOSPC for a dir we have to move
* the last block to the place we want to kill .
*/
2013-04-24 12:58:02 +04:00
error = xfs_bunmapi ( tp , dp , dead_blkno , count ,
xfs_bmapi_aflag ( w ) | XFS_BMAPI_METADATA ,
0 , args - > firstblock , args - > flist , & done ) ;
if ( error = = ENOSPC ) {
2005-04-17 02:20:36 +04:00
if ( w ! = XFS_DATA_FORK )
2006-06-20 07:04:51 +04:00
break ;
2013-04-24 12:58:02 +04:00
error = xfs_da3_swap_lastblock ( args , & dead_blkno ,
& dead_buf ) ;
if ( error )
2006-06-20 07:04:51 +04:00
break ;
} else {
2005-04-17 02:20:36 +04:00
break ;
}
}
2012-06-22 12:50:14 +04:00
xfs_trans_binval ( tp , dead_buf ) ;
2005-04-17 02:20:36 +04:00
return error ;
}
/*
* See if the mapping ( s ) for this btree block are valid , i . e .
* don ' t contain holes , are logically contiguous , and cover the whole range .
*/
STATIC int
xfs_da_map_covers_blocks (
int nmap ,
xfs_bmbt_irec_t * mapp ,
xfs_dablk_t bno ,
int count )
{
int i ;
xfs_fileoff_t off ;
for ( i = 0 , off = bno ; i < nmap ; i + + ) {
if ( mapp [ i ] . br_startblock = = HOLESTARTBLOCK | |
mapp [ i ] . br_startblock = = DELAYSTARTBLOCK ) {
return 0 ;
}
if ( off ! = mapp [ i ] . br_startoff ) {
return 0 ;
}
off + = mapp [ i ] . br_blockcount ;
}
return off = = bno + count ;
}
/*
2012-06-22 12:50:13 +04:00
* Convert a struct xfs_bmbt_irec to a struct xfs_buf_map .
*
* For the single map case , it is assumed that the caller has provided a pointer
* to a valid xfs_buf_map . For the multiple map case , this function will
* allocate the xfs_buf_map to hold all the maps and replace the caller ' s single
* map pointer with the allocated map .
2005-04-17 02:20:36 +04:00
*/
2012-06-22 12:50:13 +04:00
static int
xfs_buf_map_from_irec (
struct xfs_mount * mp ,
struct xfs_buf_map * * mapp ,
2013-08-12 14:49:44 +04:00
int * nmaps ,
2012-06-22 12:50:13 +04:00
struct xfs_bmbt_irec * irecs ,
2013-08-12 14:49:44 +04:00
int nirecs )
2005-04-17 02:20:36 +04:00
{
2012-06-22 12:50:13 +04:00
struct xfs_buf_map * map ;
int i ;
ASSERT ( * nmaps = = 1 ) ;
ASSERT ( nirecs > = 1 ) ;
if ( nirecs > 1 ) {
2013-05-20 03:51:12 +04:00
map = kmem_zalloc ( nirecs * sizeof ( struct xfs_buf_map ) ,
KM_SLEEP | KM_NOFS ) ;
2012-06-22 12:50:13 +04:00
if ( ! map )
return ENOMEM ;
* mapp = map ;
}
* nmaps = nirecs ;
map = * mapp ;
for ( i = 0 ; i < * nmaps ; i + + ) {
ASSERT ( irecs [ i ] . br_startblock ! = DELAYSTARTBLOCK & &
irecs [ i ] . br_startblock ! = HOLESTARTBLOCK ) ;
map [ i ] . bm_bn = XFS_FSB_TO_DADDR ( mp , irecs [ i ] . br_startblock ) ;
map [ i ] . bm_len = XFS_FSB_TO_BB ( mp , irecs [ i ] . br_blockcount ) ;
}
return 0 ;
}
/*
* Map the block we are given ready for reading . There are three possible return
* values :
* - 1 - will be returned if we land in a hole and mappedbno = = - 2 so the
* caller knows not to execute a subsequent read .
* 0 - if we mapped the block successfully
* > 0 - positive error number if there was an error .
*/
static int
xfs_dabuf_map (
struct xfs_inode * dp ,
xfs_dablk_t bno ,
xfs_daddr_t mappedbno ,
int whichfork ,
struct xfs_buf_map * * map ,
int * nmaps )
{
struct xfs_mount * mp = dp - > i_mount ;
int nfsb ;
int error = 0 ;
struct xfs_bmbt_irec irec ;
struct xfs_bmbt_irec * irecs = & irec ;
int nirecs ;
ASSERT ( map & & * map ) ;
ASSERT ( * nmaps = = 1 ) ;
2005-04-17 02:20:36 +04:00
2006-06-20 07:04:51 +04:00
nfsb = ( whichfork = = XFS_DATA_FORK ) ? mp - > m_dirblkfsbs : 1 ;
2012-06-22 12:50:13 +04:00
2005-04-17 02:20:36 +04:00
/*
* Caller doesn ' t have a mapping . - 2 means don ' t complain
* if we land in a hole .
*/
if ( mappedbno = = - 1 | | mappedbno = = - 2 ) {
/*
* Optimize the one - block case .
*/
2012-06-22 12:50:13 +04:00
if ( nfsb ! = 1 )
2013-05-20 03:51:12 +04:00
irecs = kmem_zalloc ( sizeof ( irec ) * nfsb ,
KM_SLEEP | KM_NOFS ) ;
2011-09-19 00:40:46 +04:00
2012-06-22 12:50:13 +04:00
nirecs = nfsb ;
error = xfs_bmapi_read ( dp , ( xfs_fileoff_t ) bno , nfsb , irecs ,
& nirecs , xfs_bmapi_aflag ( whichfork ) ) ;
2011-09-19 00:40:46 +04:00
if ( error )
2012-06-22 12:50:13 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
} else {
2012-06-22 12:50:13 +04:00
irecs - > br_startblock = XFS_DADDR_TO_FSB ( mp , mappedbno ) ;
irecs - > br_startoff = ( xfs_fileoff_t ) bno ;
irecs - > br_blockcount = nfsb ;
irecs - > br_state = 0 ;
nirecs = 1 ;
2005-04-17 02:20:36 +04:00
}
2012-06-22 12:50:13 +04:00
if ( ! xfs_da_map_covers_blocks ( nirecs , irecs , bno , nfsb ) ) {
error = mappedbno = = - 2 ? - 1 : XFS_ERROR ( EFSCORRUPTED ) ;
2005-04-17 02:20:36 +04:00
if ( unlikely ( error = = EFSCORRUPTED ) ) {
if ( xfs_error_level > = XFS_ERRLEVEL_LOW ) {
2012-06-22 12:50:13 +04:00
int i ;
2011-03-07 02:08:35 +03:00
xfs_alert ( mp , " %s: bno %lld dir: inode %lld " ,
__func__ , ( long long ) bno ,
2005-04-17 02:20:36 +04:00
( long long ) dp - > i_ino ) ;
2012-06-22 12:50:13 +04:00
for ( i = 0 ; i < * nmaps ; i + + ) {
2011-03-07 02:08:35 +03:00
xfs_alert ( mp ,
" [%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d " ,
2005-04-17 02:20:36 +04:00
i ,
2012-06-22 12:50:13 +04:00
( long long ) irecs [ i ] . br_startoff ,
( long long ) irecs [ i ] . br_startblock ,
( long long ) irecs [ i ] . br_blockcount ,
irecs [ i ] . br_state ) ;
2005-04-17 02:20:36 +04:00
}
}
XFS_ERROR_REPORT ( " xfs_da_do_buf(1) " ,
XFS_ERRLEVEL_LOW , mp ) ;
}
2012-06-22 12:50:13 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2012-06-22 12:50:13 +04:00
error = xfs_buf_map_from_irec ( mp , map , nmaps , irecs , nirecs ) ;
out :
if ( irecs ! = & irec )
kmem_free ( irecs ) ;
return error ;
}
/*
* Get a buffer for the dir / attr block .
*/
int
xfs_da_get_buf (
struct xfs_trans * trans ,
struct xfs_inode * dp ,
xfs_dablk_t bno ,
xfs_daddr_t mappedbno ,
2012-06-22 12:50:14 +04:00
struct xfs_buf * * bpp ,
2012-06-22 12:50:13 +04:00
int whichfork )
{
struct xfs_buf * bp ;
struct xfs_buf_map map ;
struct xfs_buf_map * mapp ;
int nmap ;
int error ;
* bpp = NULL ;
mapp = & map ;
nmap = 1 ;
2014-04-14 13:01:59 +04:00
error = xfs_dabuf_map ( dp , bno , mappedbno , whichfork ,
2012-06-22 12:50:13 +04:00
& mapp , & nmap ) ;
if ( error ) {
/* mapping a hole is not an error, but we don't continue */
if ( error = = - 1 )
2005-04-17 02:20:36 +04:00
error = 0 ;
2012-06-22 12:50:13 +04:00
goto out_free ;
2005-04-17 02:20:36 +04:00
}
2012-06-22 12:50:13 +04:00
bp = xfs_trans_get_buf_map ( trans , dp - > i_mount - > m_ddev_targp ,
mapp , nmap , 0 ) ;
error = bp ? bp - > b_error : XFS_ERROR ( EIO ) ;
if ( error ) {
xfs_trans_brelse ( trans , bp ) ;
goto out_free ;
}
2012-06-22 12:50:14 +04:00
* bpp = bp ;
2012-06-22 12:50:13 +04:00
out_free :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
return error ;
}
/*
* Get a buffer for the dir / attr block , fill in the contents .
*/
int
xfs_da_read_buf (
struct xfs_trans * trans ,
struct xfs_inode * dp ,
xfs_dablk_t bno ,
xfs_daddr_t mappedbno ,
2012-06-22 12:50:14 +04:00
struct xfs_buf * * bpp ,
2012-11-12 15:54:10 +04:00
int whichfork ,
2012-11-14 10:54:40 +04:00
const struct xfs_buf_ops * ops )
2012-06-22 12:50:13 +04:00
{
struct xfs_buf * bp ;
struct xfs_buf_map map ;
struct xfs_buf_map * mapp ;
int nmap ;
int error ;
* bpp = NULL ;
mapp = & map ;
nmap = 1 ;
2014-04-14 13:01:59 +04:00
error = xfs_dabuf_map ( dp , bno , mappedbno , whichfork ,
2012-06-22 12:50:13 +04:00
& mapp , & nmap ) ;
if ( error ) {
/* mapping a hole is not an error, but we don't continue */
if ( error = = - 1 )
error = 0 ;
goto out_free ;
}
error = xfs_trans_read_buf_map ( dp - > i_mount , trans ,
dp - > i_mount - > m_ddev_targp ,
2012-11-14 10:54:40 +04:00
mapp , nmap , 0 , & bp , ops ) ;
2012-06-22 12:50:13 +04:00
if ( error )
goto out_free ;
if ( whichfork = = XFS_ATTR_FORK )
xfs_buf_set_ref ( bp , XFS_ATTR_BTREE_REF ) ;
2005-04-17 02:20:36 +04:00
else
2012-06-22 12:50:13 +04:00
xfs_buf_set_ref ( bp , XFS_DIR_BTREE_REF ) ;
2005-04-17 02:20:36 +04:00
/*
2012-06-22 12:50:13 +04:00
* This verification code will be moved to a CRC verification callback
* function so just leave it here unchanged until then .
2005-04-17 02:20:36 +04:00
*/
2012-06-22 12:50:13 +04:00
{
2012-06-22 12:50:14 +04:00
xfs_dir2_data_hdr_t * hdr = bp - > b_addr ;
xfs_dir2_free_t * free = bp - > b_addr ;
xfs_da_blkinfo_t * info = bp - > b_addr ;
2005-04-17 02:20:36 +04:00
uint magic , magic1 ;
2012-06-22 12:50:13 +04:00
struct xfs_mount * mp = dp - > i_mount ;
2005-04-17 02:20:36 +04:00
2006-03-17 09:28:40 +03:00
magic = be16_to_cpu ( info - > magic ) ;
2011-07-08 16:35:38 +04:00
magic1 = be32_to_cpu ( hdr - > magic ) ;
2005-04-17 02:20:36 +04:00
if ( unlikely (
XFS_TEST_ERROR ( ( magic ! = XFS_DA_NODE_MAGIC ) & &
2013-04-24 12:58:02 +04:00
( magic ! = XFS_DA3_NODE_MAGIC ) & &
2005-04-17 02:20:36 +04:00
( magic ! = XFS_ATTR_LEAF_MAGIC ) & &
2013-04-24 12:58:55 +04:00
( magic ! = XFS_ATTR3_LEAF_MAGIC ) & &
2005-04-17 02:20:36 +04:00
( magic ! = XFS_DIR2_LEAF1_MAGIC ) & &
2013-04-12 01:30:21 +04:00
( magic ! = XFS_DIR3_LEAF1_MAGIC ) & &
2005-04-17 02:20:36 +04:00
( magic ! = XFS_DIR2_LEAFN_MAGIC ) & &
2013-04-12 01:30:21 +04:00
( magic ! = XFS_DIR3_LEAFN_MAGIC ) & &
2005-04-17 02:20:36 +04:00
( magic1 ! = XFS_DIR2_BLOCK_MAGIC ) & &
2013-04-12 01:30:21 +04:00
( magic1 ! = XFS_DIR3_BLOCK_MAGIC ) & &
2005-04-17 02:20:36 +04:00
( magic1 ! = XFS_DIR2_DATA_MAGIC ) & &
2013-04-12 01:30:21 +04:00
( magic1 ! = XFS_DIR3_DATA_MAGIC ) & &
( free - > hdr . magic ! =
cpu_to_be32 ( XFS_DIR2_FREE_MAGIC ) ) & &
( free - > hdr . magic ! =
cpu_to_be32 ( XFS_DIR3_FREE_MAGIC ) ) ,
2005-04-17 02:20:36 +04:00
mp , XFS_ERRTAG_DA_READ_BUF ,
XFS_RANDOM_DA_READ_BUF ) ) ) {
2012-06-22 12:50:13 +04:00
trace_xfs_da_btree_corrupt ( bp , _RET_IP_ ) ;
2005-04-17 02:20:36 +04:00
XFS_CORRUPTION_ERROR ( " xfs_da_do_buf(2) " ,
XFS_ERRLEVEL_LOW , mp , info ) ;
error = XFS_ERROR ( EFSCORRUPTED ) ;
2012-06-22 12:50:14 +04:00
xfs_trans_brelse ( trans , bp ) ;
2012-06-22 12:50:13 +04:00
goto out_free ;
2005-04-17 02:20:36 +04:00
}
}
2012-06-22 12:50:14 +04:00
* bpp = bp ;
2012-06-22 12:50:13 +04:00
out_free :
2005-04-17 02:20:36 +04:00
if ( mapp ! = & map )
2008-05-19 10:31:57 +04:00
kmem_free ( mapp ) ;
2005-04-17 02:20:36 +04:00
2012-06-22 12:50:13 +04:00
return error ;
2005-04-17 02:20:36 +04:00
}
/*
* Readahead the dir / attr block .
*/
xfs_daddr_t
xfs_da_reada_buf (
2012-06-22 12:50:13 +04:00
struct xfs_inode * dp ,
xfs_dablk_t bno ,
2012-11-12 15:54:18 +04:00
xfs_daddr_t mappedbno ,
2012-11-12 15:54:10 +04:00
int whichfork ,
2012-11-14 10:54:40 +04:00
const struct xfs_buf_ops * ops )
2005-04-17 02:20:36 +04:00
{
2012-06-22 12:50:13 +04:00
struct xfs_buf_map map ;
struct xfs_buf_map * mapp ;
int nmap ;
int error ;
mapp = & map ;
nmap = 1 ;
2014-04-14 13:01:59 +04:00
error = xfs_dabuf_map ( dp , bno , mappedbno , whichfork ,
2012-06-22 12:50:13 +04:00
& mapp , & nmap ) ;
if ( error ) {
/* mapping a hole is not an error, but we don't continue */
if ( error = = - 1 )
error = 0 ;
goto out_free ;
}
2005-04-17 02:20:36 +04:00
2012-06-22 12:50:13 +04:00
mappedbno = mapp [ 0 ] . bm_bn ;
2012-11-14 10:54:40 +04:00
xfs_buf_readahead_map ( dp - > i_mount - > m_ddev_targp , mapp , nmap , ops ) ;
2012-06-22 12:50:13 +04:00
out_free :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
if ( error )
2005-04-17 02:20:36 +04:00
return - 1 ;
2012-06-22 12:50:13 +04:00
return mappedbno ;
2005-04-17 02:20:36 +04:00
}