2018-06-05 19:42:14 -07:00
// SPDX-License-Identifier: GPL-2.0
2005-04-16 15:20:36 -07:00
/*
2005-11-02 14:58:39 +11:00
* Copyright ( c ) 2000 - 2005 Silicon Graphics , Inc .
2013-04-24 18:58:02 +10:00
* Copyright ( c ) 2013 Red Hat , Inc .
2005-11-02 14:58:39 +11:00
* All Rights Reserved .
2005-04-16 15:20:36 -07:00
*/
# include "xfs.h"
2005-11-02 14:38:42 +11:00
# include "xfs_fs.h"
2013-10-23 10:36:05 +11:00
# include "xfs_shared.h"
2013-10-23 10:50:10 +11:00
# include "xfs_format.h"
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-11-02 14:38:42 +11:00
# include "xfs_bit.h"
2005-04-16 15:20:36 -07:00
# include "xfs_mount.h"
2019-11-11 12:59:26 -08:00
# include "xfs_inode.h"
2013-08-12 20:49:37 +10:00
# include "xfs_dir2.h"
2011-07-13 13:43:48 +02:00
# include "xfs_dir2_priv.h"
2013-10-23 10:50:10 +11:00
# include "xfs_trans.h"
2005-04-16 15:20:36 -07:00
# include "xfs_bmap.h"
# include "xfs_attr_leaf.h"
# include "xfs_error.h"
2009-12-14 23:14:59 +00:00
# include "xfs_trace.h"
2013-04-24 18:58:02 +10:00
# include "xfs_buf_item.h"
2015-10-12 15:59:25 +11:00
# include "xfs_log.h"
2005-04-16 15:20:36 -07:00
/*
* xfs_da_btree . c
*
* Routines to implement directories as Btrees of hashed names .
*/
/*========================================================================
* Function prototypes for the kernel .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Routines used for growing the Btree .
*/
2013-04-24 18:58:02 +10:00
STATIC int xfs_da3_root_split ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * existing_root ,
xfs_da_state_blk_t * new_child ) ;
2013-04-24 18:58:02 +10:00
STATIC int xfs_da3_node_split ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * existing_blk ,
xfs_da_state_blk_t * split_blk ,
xfs_da_state_blk_t * blk_to_add ,
int treelevel ,
int * result ) ;
2013-04-24 18:58:02 +10:00
STATIC void xfs_da3_node_rebalance ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * node_blk_1 ,
xfs_da_state_blk_t * node_blk_2 ) ;
2013-04-24 18:58:02 +10:00
STATIC void xfs_da3_node_add ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * old_node_blk ,
xfs_da_state_blk_t * new_node_blk ) ;
/*
* Routines used for shrinking the Btree .
*/
2013-04-24 18:58:02 +10:00
STATIC int xfs_da3_root_join ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * root_blk ) ;
2013-04-24 18:58:02 +10:00
STATIC int xfs_da3_node_toosmall ( xfs_da_state_t * state , int * retval ) ;
STATIC void xfs_da3_node_remove ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * drop_blk ) ;
2013-04-24 18:58:02 +10:00
STATIC void xfs_da3_node_unbalance ( xfs_da_state_t * state ,
2005-04-16 15:20:36 -07:00
xfs_da_state_blk_t * src_node_blk ,
xfs_da_state_blk_t * dst_node_blk ) ;
/*
* Utility routines .
*/
2013-04-24 18:58:02 +10:00
STATIC int xfs_da3_blk_unlink ( xfs_da_state_t * state ,
2005-06-21 15:36:52 +10:00
xfs_da_state_blk_t * drop_blk ,
xfs_da_state_blk_t * save_blk ) ;
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
2021-09-27 14:26:19 -07:00
struct kmem_cache * xfs_da_state_zone ; /* anchor for state struct zone */
2013-04-24 18:58:02 +10:00
/*
* Allocate a dir - state structure .
* We don ' t put them on the stack since they ' re large .
*/
2020-07-22 09:23:18 -07:00
struct xfs_da_state *
xfs_da_state_alloc (
struct xfs_da_args * args )
2013-04-24 18:58:02 +10:00
{
2020-07-22 09:23:18 -07:00
struct xfs_da_state * state ;
state = kmem_cache_zalloc ( xfs_da_state_zone , GFP_NOFS | __GFP_NOFAIL ) ;
state - > args = args ;
state - > mp = args - > dp - > i_mount ;
return state ;
2013-04-24 18:58:02 +10:00
}
/*
* Kill the altpath contents of a da - state structure .
*/
STATIC void
xfs_da_state_kill_altpath ( xfs_da_state_t * state )
{
int i ;
for ( i = 0 ; i < state - > altpath . active ; i + + )
state - > altpath . blk [ i ] . bp = NULL ;
state - > altpath . active = 0 ;
}
/*
* Free a da - state structure .
*/
void
xfs_da_state_free ( xfs_da_state_t * state )
{
xfs_da_state_kill_altpath ( state ) ;
# ifdef DEBUG
memset ( ( char * ) state , 0 , sizeof ( * state ) ) ;
# endif /* DEBUG */
2019-11-14 12:43:04 -08:00
kmem_cache_free ( xfs_da_state_zone , state ) ;
2013-04-24 18:58:02 +10:00
}
2019-11-20 09:46:00 -08:00
static inline int xfs_dabuf_nfsb ( struct xfs_mount * mp , int whichfork )
{
if ( whichfork = = XFS_DATA_FORK )
return mp - > m_dir_geo - > fsbcount ;
return mp - > m_attr_geo - > fsbcount ;
}
2019-11-08 14:53:00 -08:00
void
xfs_da3_node_hdr_from_disk (
struct xfs_mount * mp ,
struct xfs_da3_icnode_hdr * to ,
struct xfs_da_intnode * from )
{
2021-08-18 18:46:55 -07:00
if ( xfs_has_crc ( mp ) ) {
2019-11-08 14:53:00 -08:00
struct xfs_da3_intnode * from3 = ( struct xfs_da3_intnode * ) from ;
to - > forw = be32_to_cpu ( from3 - > hdr . info . hdr . forw ) ;
to - > back = be32_to_cpu ( from3 - > hdr . info . hdr . back ) ;
to - > magic = be16_to_cpu ( from3 - > hdr . info . hdr . magic ) ;
to - > count = be16_to_cpu ( from3 - > hdr . __count ) ;
to - > level = be16_to_cpu ( from3 - > hdr . __level ) ;
2019-11-08 14:57:48 -08:00
to - > btree = from3 - > __btree ;
2019-11-08 14:53:00 -08:00
ASSERT ( to - > magic = = XFS_DA3_NODE_MAGIC ) ;
} else {
to - > forw = be32_to_cpu ( from - > hdr . info . forw ) ;
to - > back = be32_to_cpu ( from - > hdr . info . back ) ;
to - > magic = be16_to_cpu ( from - > hdr . info . magic ) ;
to - > count = be16_to_cpu ( from - > hdr . __count ) ;
to - > level = be16_to_cpu ( from - > hdr . __level ) ;
2019-11-08 14:57:48 -08:00
to - > btree = from - > __btree ;
2019-11-08 14:53:00 -08:00
ASSERT ( to - > magic = = XFS_DA_NODE_MAGIC ) ;
}
}
2019-11-08 14:57:48 -08:00
void
xfs_da3_node_hdr_to_disk (
struct xfs_mount * mp ,
struct xfs_da_intnode * to ,
struct xfs_da3_icnode_hdr * from )
{
2021-08-18 18:46:55 -07:00
if ( xfs_has_crc ( mp ) ) {
2019-11-08 14:57:48 -08:00
struct xfs_da3_intnode * to3 = ( struct xfs_da3_intnode * ) to ;
ASSERT ( from - > magic = = XFS_DA3_NODE_MAGIC ) ;
to3 - > hdr . info . hdr . forw = cpu_to_be32 ( from - > forw ) ;
to3 - > hdr . info . hdr . back = cpu_to_be32 ( from - > back ) ;
to3 - > hdr . info . hdr . magic = cpu_to_be16 ( from - > magic ) ;
to3 - > hdr . __count = cpu_to_be16 ( from - > count ) ;
to3 - > hdr . __level = cpu_to_be16 ( from - > level ) ;
} else {
ASSERT ( from - > magic = = XFS_DA_NODE_MAGIC ) ;
to - > hdr . info . forw = cpu_to_be32 ( from - > forw ) ;
to - > hdr . info . back = cpu_to_be32 ( from - > back ) ;
to - > hdr . info . magic = cpu_to_be16 ( from - > magic ) ;
to - > hdr . __count = cpu_to_be16 ( from - > count ) ;
to - > hdr . __level = cpu_to_be16 ( from - > level ) ;
}
}
2019-02-07 10:45:48 -08:00
/*
* Verify an xfs_da3_blkinfo structure . Note that the da3 fields are only
* accessible on v5 filesystems . This header format is common across da node ,
* attr leaf and dir leaf blocks .
*/
xfs_failaddr_t
xfs_da3_blkinfo_verify (
struct xfs_buf * bp ,
struct xfs_da3_blkinfo * hdr3 )
{
2019-06-28 19:27:29 -07:00
struct xfs_mount * mp = bp - > b_mount ;
2019-02-07 10:45:48 -08:00
struct xfs_da_blkinfo * hdr = & hdr3 - > hdr ;
2019-02-16 11:47:28 -08:00
if ( ! xfs_verify_magic16 ( bp , hdr - > magic ) )
2019-02-07 10:45:48 -08:00
return __this_address ;
2021-08-18 18:46:55 -07:00
if ( xfs_has_crc ( mp ) ) {
2019-02-07 10:45:48 -08:00
if ( ! uuid_equal ( & hdr3 - > uuid , & mp - > m_sb . sb_meta_uuid ) )
return __this_address ;
2021-08-18 18:47:05 -07:00
if ( be64_to_cpu ( hdr3 - > blkno ) ! = xfs_buf_daddr ( bp ) )
2019-02-07 10:45:48 -08:00
return __this_address ;
if ( ! xfs_log_check_lsn ( mp , be64_to_cpu ( hdr3 - > lsn ) ) )
return __this_address ;
}
2019-02-16 11:47:28 -08:00
return NULL ;
2019-02-07 10:45:48 -08:00
}
2018-01-08 10:51:03 -08:00
static xfs_failaddr_t
2013-04-24 18:58:02 +10:00
xfs_da3_node_verify (
2012-11-12 22:54:17 +11:00
struct xfs_buf * bp )
{
2019-06-28 19:27:29 -07:00
struct xfs_mount * mp = bp - > b_mount ;
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * hdr = bp - > b_addr ;
struct xfs_da3_icnode_hdr ichdr ;
2019-02-07 10:45:48 -08:00
xfs_failaddr_t fa ;
2013-10-29 22:11:52 +11:00
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( mp , & ichdr , hdr ) ;
2013-04-24 18:58:02 +10:00
2019-02-07 10:45:48 -08:00
fa = xfs_da3_blkinfo_verify ( bp , bp - > b_addr ) ;
if ( fa )
return fa ;
2019-02-07 10:45:48 -08:00
2013-04-24 18:58:02 +10:00
if ( ichdr . level = = 0 )
2018-01-08 10:51:03 -08:00
return __this_address ;
2013-04-24 18:58:02 +10:00
if ( ichdr . level > XFS_DA_NODE_MAXDEPTH )
2018-01-08 10:51:03 -08:00
return __this_address ;
2013-04-24 18:58:02 +10:00
if ( ichdr . count = = 0 )
2018-01-08 10:51:03 -08:00
return __this_address ;
2013-04-24 18:58:02 +10:00
/*
* we don ' t know if the node is for and attribute or directory tree ,
* so only fail if the count is outside both bounds
*/
2014-06-06 15:20:02 +10:00
if ( ichdr . count > mp - > m_dir_geo - > node_ents & &
ichdr . count > mp - > m_attr_geo - > node_ents )
2018-01-08 10:51:03 -08:00
return __this_address ;
2013-04-24 18:58:02 +10:00
/* XXX: hash order check? */
2012-11-12 22:54:17 +11:00
2018-01-08 10:51:03 -08:00
return NULL ;
2012-11-12 22:54:17 +11:00
}
static void
2013-04-24 18:58:02 +10:00
xfs_da3_node_write_verify (
2012-11-14 17:52:32 +11:00
struct xfs_buf * bp )
{
2019-06-28 19:27:29 -07:00
struct xfs_mount * mp = bp - > b_mount ;
2018-01-24 13:38:48 -08:00
struct xfs_buf_log_item * bip = bp - > b_log_item ;
2013-04-24 18:58:02 +10:00
struct xfs_da3_node_hdr * hdr3 = bp - > b_addr ;
2018-01-08 10:51:03 -08:00
xfs_failaddr_t fa ;
2013-04-24 18:58:02 +10:00
2018-01-08 10:51:03 -08:00
fa = xfs_da3_node_verify ( bp ) ;
if ( fa ) {
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
2013-04-24 18:58:02 +10:00
return ;
}
2021-08-18 18:46:37 -07:00
if ( ! xfs_has_crc ( mp ) )
2013-04-24 18:58:02 +10:00
return ;
if ( bip )
hdr3 - > info . lsn = cpu_to_be64 ( bip - > bli_item . li_lsn ) ;
2014-02-27 15:18:23 +11:00
xfs_buf_update_cksum ( bp , XFS_DA3_NODE_CRC_OFF ) ;
2012-11-14 17:52:32 +11:00
}
2012-11-14 17:54:40 +11:00
/*
* leaf / node format detection on trees is sketchy , so a node read can be done on
* leaf level blocks when detection identifies the tree as a node format tree
* incorrectly . In this case , we need to swap the verifier to match the correct
* format of the block being read .
*/
2012-11-14 17:52:32 +11:00
static void
2013-04-24 18:58:02 +10:00
xfs_da3_node_read_verify (
2012-11-12 22:54:17 +11:00
struct xfs_buf * bp )
{
struct xfs_da_blkinfo * info = bp - > b_addr ;
2018-01-08 10:51:03 -08:00
xfs_failaddr_t fa ;
2012-11-12 22:54:17 +11:00
switch ( be16_to_cpu ( info - > magic ) ) {
2013-04-24 18:58:02 +10:00
case XFS_DA3_NODE_MAGIC :
2014-02-27 15:23:10 +11:00
if ( ! xfs_buf_verify_cksum ( bp , XFS_DA3_NODE_CRC_OFF ) ) {
2018-01-08 10:51:03 -08:00
xfs_verifier_error ( bp , - EFSBADCRC ,
__this_address ) ;
2013-04-24 18:58:02 +10:00
break ;
2014-02-27 15:23:10 +11:00
}
2021-04-20 17:54:36 -05:00
fallthrough ;
2012-11-12 22:54:17 +11:00
case XFS_DA_NODE_MAGIC :
2018-01-08 10:51:03 -08:00
fa = xfs_da3_node_verify ( bp ) ;
if ( fa )
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
2013-04-24 18:58:02 +10:00
return ;
2012-11-12 22:54:17 +11:00
case XFS_ATTR_LEAF_MAGIC :
2013-05-20 09:51:13 +10:00
case XFS_ATTR3_LEAF_MAGIC :
2013-04-24 18:58:55 +10:00
bp - > b_ops = & xfs_attr3_leaf_buf_ops ;
2012-11-14 17:54:40 +11:00
bp - > b_ops - > verify_read ( bp ) ;
2012-11-12 22:54:17 +11:00
return ;
case XFS_DIR2_LEAFN_MAGIC :
2013-04-12 07:30:21 +10:00
case XFS_DIR3_LEAFN_MAGIC :
bp - > b_ops = & xfs_dir3_leafn_buf_ops ;
2012-11-14 17:54:40 +11:00
bp - > b_ops - > verify_read ( bp ) ;
2012-11-12 22:54:17 +11:00
return ;
default :
2018-01-08 10:51:03 -08:00
xfs_verifier_error ( bp , - EFSCORRUPTED , __this_address ) ;
2012-11-12 22:54:17 +11:00
break ;
}
}
2018-01-08 10:51:08 -08:00
/* Verify the structure of a da3 block. */
static xfs_failaddr_t
xfs_da3_node_verify_struct (
struct xfs_buf * bp )
{
struct xfs_da_blkinfo * info = bp - > b_addr ;
switch ( be16_to_cpu ( info - > magic ) ) {
case XFS_DA3_NODE_MAGIC :
case XFS_DA_NODE_MAGIC :
return xfs_da3_node_verify ( bp ) ;
case XFS_ATTR_LEAF_MAGIC :
case XFS_ATTR3_LEAF_MAGIC :
bp - > b_ops = & xfs_attr3_leaf_buf_ops ;
return bp - > b_ops - > verify_struct ( bp ) ;
case XFS_DIR2_LEAFN_MAGIC :
case XFS_DIR3_LEAFN_MAGIC :
bp - > b_ops = & xfs_dir3_leafn_buf_ops ;
return bp - > b_ops - > verify_struct ( bp ) ;
default :
return __this_address ;
}
}
2013-04-24 18:58:02 +10:00
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
2016-01-04 16:10:19 +11:00
. name = " xfs_da3_node " ,
2019-02-16 11:47:28 -08:00
. magic16 = { cpu_to_be16 ( XFS_DA_NODE_MAGIC ) ,
cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) } ,
2013-04-24 18:58:02 +10:00
. verify_read = xfs_da3_node_read_verify ,
. verify_write = xfs_da3_node_write_verify ,
2018-01-08 10:51:08 -08:00
. verify_struct = xfs_da3_node_verify_struct ,
2012-11-14 17:54:40 +11:00
} ;
2019-11-20 09:46:04 -08:00
static int
xfs_da3_node_set_type (
struct xfs_trans * tp ,
struct xfs_buf * bp )
{
struct xfs_da_blkinfo * info = bp - > b_addr ;
switch ( be16_to_cpu ( info - > magic ) ) {
case XFS_DA_NODE_MAGIC :
case XFS_DA3_NODE_MAGIC :
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DA_NODE_BUF ) ;
return 0 ;
case XFS_ATTR_LEAF_MAGIC :
case XFS_ATTR3_LEAF_MAGIC :
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_ATTR_LEAF_BUF ) ;
return 0 ;
case XFS_DIR2_LEAFN_MAGIC :
case XFS_DIR3_LEAFN_MAGIC :
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DIR_LEAFN_BUF ) ;
return 0 ;
default :
XFS_CORRUPTION_ERROR ( __func__ , XFS_ERRLEVEL_LOW , tp - > t_mountp ,
info , sizeof ( * info ) ) ;
xfs_trans_brelse ( tp , bp ) ;
return - EFSCORRUPTED ;
}
}
2012-11-12 22:54:17 +11:00
int
2013-04-24 18:58:02 +10:00
xfs_da3_node_read (
2012-11-12 22:54:17 +11:00
struct xfs_trans * tp ,
struct xfs_inode * dp ,
xfs_dablk_t bno ,
struct xfs_buf * * bpp ,
2019-11-20 09:46:04 -08:00
int whichfork )
2012-11-12 22:54:17 +11:00
{
2019-11-20 09:46:04 -08:00
int error ;
2013-04-03 16:11:29 +11:00
2019-11-20 09:46:04 -08:00
error = xfs_da_read_buf ( tp , dp , bno , 0 , bpp , whichfork ,
2019-11-20 09:46:04 -08:00
& xfs_da3_node_buf_ops ) ;
if ( error | | ! * bpp | | ! tp )
return error ;
return xfs_da3_node_set_type ( tp , * bpp ) ;
}
2013-04-03 16:11:29 +11:00
2019-11-20 09:46:04 -08:00
int
xfs_da3_node_read_mapped (
struct xfs_trans * tp ,
struct xfs_inode * dp ,
xfs_daddr_t mappedbno ,
struct xfs_buf * * bpp ,
int whichfork )
{
2019-11-20 09:46:04 -08:00
struct xfs_mount * mp = dp - > i_mount ;
2019-11-20 09:46:04 -08:00
int error ;
2019-11-20 09:46:04 -08:00
error = xfs_trans_read_buf ( mp , tp , mp - > m_ddev_targp , mappedbno ,
XFS_FSB_TO_BB ( mp , xfs_dabuf_nfsb ( mp , whichfork ) ) , 0 ,
bpp , & xfs_da3_node_buf_ops ) ;
if ( error | | ! * bpp )
2019-11-20 09:46:04 -08:00
return error ;
2019-11-20 09:46:04 -08:00
if ( whichfork = = XFS_ATTR_FORK )
xfs_buf_set_ref ( * bpp , XFS_ATTR_BTREE_REF ) ;
else
xfs_buf_set_ref ( * bpp , XFS_DIR_BTREE_REF ) ;
if ( ! tp )
return 0 ;
2019-11-20 09:46:04 -08:00
return xfs_da3_node_set_type ( tp , * bpp ) ;
2012-11-12 22:54:17 +11:00
}
2005-04-16 15:20:36 -07:00
/*========================================================================
* Routines used for growing the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Create the initial contents of an intermediate node .
*/
int
2013-04-24 18:58:02 +10:00
xfs_da3_node_create (
struct xfs_da_args * args ,
xfs_dablk_t blkno ,
int level ,
struct xfs_buf * * bpp ,
int whichfork )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_trans * tp = args - > trans ;
struct xfs_mount * mp = tp - > t_mountp ;
struct xfs_da3_icnode_hdr ichdr = { 0 } ;
struct xfs_buf * bp ;
int error ;
2013-10-29 22:11:52 +11:00
struct xfs_inode * dp = args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_create ( args ) ;
2013-04-24 18:58:02 +10:00
ASSERT ( level < = XFS_DA_NODE_MAXDEPTH ) ;
2012-03-22 05:15:13 +00:00
2019-11-20 09:46:05 -08:00
error = xfs_da_get_buf ( tp , dp , blkno , & bp , whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2013-04-03 16:11:29 +11:00
bp - > b_ops = & xfs_da3_node_buf_ops ;
2013-04-03 16:11:30 +11:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DA_NODE_BUF ) ;
2012-06-22 18:50:14 +10:00
node = bp - > b_addr ;
2005-04-16 15:20:36 -07:00
2021-08-18 18:46:37 -07:00
if ( xfs_has_crc ( mp ) ) {
2013-04-24 18:58:02 +10:00
struct xfs_da3_node_hdr * hdr3 = bp - > b_addr ;
2015-10-12 15:59:25 +11:00
memset ( hdr3 , 0 , sizeof ( struct xfs_da3_node_hdr ) ) ;
2013-04-24 18:58:02 +10:00
ichdr . magic = XFS_DA3_NODE_MAGIC ;
2021-08-18 18:47:05 -07:00
hdr3 - > info . blkno = cpu_to_be64 ( xfs_buf_daddr ( bp ) ) ;
2013-04-24 18:58:02 +10:00
hdr3 - > info . owner = cpu_to_be64 ( args - > dp - > i_ino ) ;
2015-07-29 11:53:31 +10:00
uuid_copy ( & hdr3 - > info . uuid , & mp - > m_sb . sb_meta_uuid ) ;
2013-04-24 18:58:02 +10:00
} else {
ichdr . magic = XFS_DA_NODE_MAGIC ;
}
ichdr . level = level ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node , & ichdr ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , bp ,
2019-11-08 14:57:49 -08:00
XFS_DA_LOGRANGE ( node , & node - > hdr , args - > geo - > node_hdr_size ) ) ;
2005-04-16 15:20:36 -07:00
* bpp = bp ;
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Split a leaf node , rebalance , then possibly split
* intermediate nodes , rebalance , etc .
*/
int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_split (
struct xfs_da_state * state )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_state_blk * oldblk ;
struct xfs_da_state_blk * newblk ;
struct xfs_da_state_blk * addblk ;
struct xfs_da_intnode * node ;
int max ;
2013-08-12 20:49:44 +10:00
int action = 0 ;
2013-04-24 18:58:02 +10:00
int error ;
int i ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_split ( state - > args ) ;
2005-04-16 15:20:36 -07:00
/*
* Walk back up the tree splitting / inserting / adjusting as necessary .
* If we need to insert and there isn ' t room , split the node , then
* decide which fragment to insert the new block from below into .
* Note that we may split the root this way , but we need more fixup .
*/
max = state - > path . active - 1 ;
ASSERT ( ( max > = 0 ) & & ( max < XFS_DA_NODE_MAXDEPTH ) ) ;
ASSERT ( state - > path . blk [ max ] . magic = = XFS_ATTR_LEAF_MAGIC | |
2006-06-20 13:04:51 +10:00
state - > path . blk [ max ] . magic = = XFS_DIR2_LEAFN_MAGIC ) ;
2005-04-16 15:20:36 -07:00
addblk = & state - > path . blk [ max ] ; /* initial dummy value */
for ( i = max ; ( i > = 0 ) & & addblk ; state - > path . active - - , i - - ) {
oldblk = & state - > path . blk [ i ] ;
newblk = & state - > altpath . blk [ i ] ;
/*
* If a leaf node then
* Allocate a new leaf node , then rebalance across them .
* else if an intermediate node then
* We split on the last layer , must we split the node ?
*/
switch ( oldblk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 18:58:55 +10:00
error = xfs_attr3_leaf_split ( state , oldblk , newblk ) ;
2014-06-25 14:58:08 +10:00
if ( ( error ! = 0 ) & & ( error ! = - ENOSPC ) ) {
2014-06-22 15:03:54 +10:00
return error ; /* GROT: attr is inconsistent */
2005-04-16 15:20:36 -07:00
}
if ( ! error ) {
addblk = newblk ;
break ;
}
/*
2016-07-22 09:51:05 +10:00
* Entry wouldn ' t fit , split the leaf again . The new
* extrablk will be consumed by xfs_da3_node_split if
* the node is split .
2005-04-16 15:20:36 -07:00
*/
state - > extravalid = 1 ;
if ( state - > inleaf ) {
state - > extraafter = 0 ; /* before newblk */
2012-03-22 05:15:13 +00:00
trace_xfs_attr_leaf_split_before ( state - > args ) ;
2013-04-24 18:58:55 +10:00
error = xfs_attr3_leaf_split ( state , oldblk ,
2005-04-16 15:20:36 -07:00
& state - > extrablk ) ;
} else {
state - > extraafter = 1 ; /* after newblk */
2012-03-22 05:15:13 +00:00
trace_xfs_attr_leaf_split_after ( state - > args ) ;
2013-04-24 18:58:55 +10:00
error = xfs_attr3_leaf_split ( state , newblk ,
2005-04-16 15:20:36 -07:00
& state - > extrablk ) ;
}
if ( error )
2014-06-22 15:03:54 +10:00
return error ; /* GROT: attr inconsistent */
2005-04-16 15:20:36 -07:00
addblk = newblk ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
error = xfs_dir2_leafn_split ( state , oldblk , newblk ) ;
if ( error )
return error ;
addblk = newblk ;
break ;
case XFS_DA_NODE_MAGIC :
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_split ( state , oldblk , newblk , addblk ,
2005-04-16 15:20:36 -07:00
max - i , & action ) ;
addblk - > bp = NULL ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ; /* GROT: dir is inconsistent */
2005-04-16 15:20:36 -07:00
/*
* Record the newly split block for the next time thru ?
*/
if ( action )
addblk = newblk ;
else
addblk = NULL ;
break ;
}
/*
* Update the btree to show the new hashval for this child .
*/
2013-04-24 18:58:02 +10:00
xfs_da3_fixhashpath ( state , & state - > path ) ;
2005-04-16 15:20:36 -07:00
}
if ( ! addblk )
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
2016-07-22 09:51:05 +10:00
/*
* xfs_da3_node_split ( ) should have consumed any extra blocks we added
* during a double leaf split in the attr fork . This is guaranteed as
* we can ' t be here if the attr fork only has a single leaf block .
*/
ASSERT ( state - > extravalid = = 0 | |
state - > path . blk [ max ] . magic = = XFS_DIR2_LEAFN_MAGIC ) ;
2005-04-16 15:20:36 -07:00
/*
* Split the root node .
*/
ASSERT ( state - > path . active = = 0 ) ;
oldblk = & state - > path . blk [ 0 ] ;
2013-04-24 18:58:02 +10:00
error = xfs_da3_root_split ( state , oldblk , addblk ) ;
2019-08-11 15:52:26 -07:00
if ( error )
goto out ;
2005-04-16 15:20:36 -07:00
/*
2016-07-22 09:51:05 +10:00
* Update pointers to the node which used to be block 0 and just got
* bumped because of the addition of a new root node . Note that the
* original block 0 could be at any position in the list of blocks in
* the tree .
2013-04-24 18:58:02 +10:00
*
2016-07-22 09:51:05 +10:00
* Note : the magic numbers and sibling pointers are in the same physical
* place for both v2 and v3 headers ( by design ) . Hence it doesn ' t matter
* which version of the xfs_da_intnode structure we use here as the
* result will be the same using either structure .
2005-04-16 15:20:36 -07:00
*/
2012-06-22 18:50:14 +10:00
node = oldblk - > bp - > b_addr ;
2005-04-16 15:20:36 -07:00
if ( node - > hdr . info . forw ) {
2019-08-11 15:52:26 -07:00
if ( be32_to_cpu ( node - > hdr . info . forw ) ! = addblk - > blkno ) {
2020-03-11 10:37:54 -07:00
xfs_buf_mark_corrupt ( oldblk - > bp ) ;
2019-08-11 15:52:26 -07:00
error = - EFSCORRUPTED ;
goto out ;
}
2016-07-22 09:51:05 +10:00
node = addblk - > bp - > b_addr ;
2006-03-17 17:28:40 +11:00
node - > hdr . info . back = cpu_to_be32 ( oldblk - > blkno ) ;
2016-07-22 09:51:05 +10:00
xfs_trans_log_buf ( state - > args - > trans , addblk - > bp ,
XFS_DA_LOGRANGE ( node , & node - > hdr . info ,
sizeof ( node - > hdr . info ) ) ) ;
2005-04-16 15:20:36 -07:00
}
2012-06-22 18:50:14 +10:00
node = oldblk - > bp - > b_addr ;
2006-03-17 17:28:40 +11:00
if ( node - > hdr . info . back ) {
2019-08-11 15:52:26 -07:00
if ( be32_to_cpu ( node - > hdr . info . back ) ! = addblk - > blkno ) {
2020-03-11 10:37:54 -07:00
xfs_buf_mark_corrupt ( oldblk - > bp ) ;
2019-08-11 15:52:26 -07:00
error = - EFSCORRUPTED ;
goto out ;
}
2016-07-22 09:51:05 +10:00
node = addblk - > bp - > b_addr ;
2006-03-17 17:28:40 +11:00
node - > hdr . info . forw = cpu_to_be32 ( oldblk - > blkno ) ;
2016-07-22 09:51:05 +10:00
xfs_trans_log_buf ( state - > args - > trans , addblk - > bp ,
XFS_DA_LOGRANGE ( node , & node - > hdr . info ,
sizeof ( node - > hdr . info ) ) ) ;
2005-04-16 15:20:36 -07:00
}
2019-08-11 15:52:26 -07:00
out :
2005-04-16 15:20:36 -07:00
addblk - > bp = NULL ;
2019-08-11 15:52:26 -07:00
return error ;
2005-04-16 15:20:36 -07:00
}
/*
* Split the root . We have to create a new root and point to the two
* parts ( the split old root ) that we just created . Copy block zero to
* the EOF , extending the inode in process .
*/
STATIC int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_root_split (
struct xfs_da_state * state ,
struct xfs_da_state_blk * blk1 ,
struct xfs_da_state_blk * blk2 )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_da_intnode * oldroot ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
struct xfs_inode * dp ;
struct xfs_trans * tp ;
struct xfs_dir2_leaf * leaf ;
xfs_dablk_t blkno ;
int level ;
int error ;
int size ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_root_split ( state - > args ) ;
2005-04-16 15:20:36 -07:00
/*
* Copy the existing ( incorrect ) block from the root node position
* to a free space somewhere .
*/
args = state - > args ;
error = xfs_da_grow_inode ( args , & blkno ) ;
if ( error )
2013-04-24 18:58:02 +10:00
return error ;
2005-04-16 15:20:36 -07:00
dp = args - > dp ;
tp = args - > trans ;
2019-11-20 09:46:05 -08:00
error = xfs_da_get_buf ( tp , dp , blkno , & bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2013-04-24 18:58:02 +10:00
return error ;
2012-06-22 18:50:14 +10:00
node = bp - > b_addr ;
oldroot = blk1 - > bp - > b_addr ;
2013-04-24 18:58:02 +10:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) ) {
2015-03-25 14:55:25 +11:00
struct xfs_da3_icnode_hdr icnodehdr ;
2013-04-24 18:58:02 +10:00
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & icnodehdr , oldroot ) ;
2019-11-08 14:57:48 -08:00
btree = icnodehdr . btree ;
2015-03-25 14:55:25 +11:00
size = ( int ) ( ( char * ) & btree [ icnodehdr . count ] - ( char * ) oldroot ) ;
level = icnodehdr . level ;
2013-04-03 16:11:29 +11:00
/*
* we are about to copy oldroot to bp , so set up the type
* of bp while we know exactly what it will be .
*/
2013-04-03 16:11:30 +11:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DA_NODE_BUF ) ;
2005-04-16 15:20:36 -07:00
} else {
2013-04-12 07:30:21 +10:00
struct xfs_dir3_icleaf_hdr leafhdr ;
2005-04-16 15:20:36 -07:00
leaf = ( xfs_dir2_leaf_t * ) oldroot ;
2019-11-08 14:57:49 -08:00
xfs_dir2_leaf_hdr_from_disk ( dp - > i_mount , & leafhdr , leaf ) ;
2013-04-12 07:30:21 +10:00
ASSERT ( leafhdr . magic = = XFS_DIR2_LEAFN_MAGIC | |
leafhdr . magic = = XFS_DIR3_LEAFN_MAGIC ) ;
2019-11-08 14:57:50 -08:00
size = ( int ) ( ( char * ) & leafhdr . ents [ leafhdr . count ] -
( char * ) leaf ) ;
2013-04-24 18:58:02 +10:00
level = 0 ;
2013-04-03 16:11:29 +11:00
/*
* we are about to copy oldroot to bp , so set up the type
* of bp while we know exactly what it will be .
*/
2013-04-03 16:11:30 +11:00
xfs_trans_buf_set_type ( tp , bp , XFS_BLFT_DIR_LEAFN_BUF ) ;
2005-04-16 15:20:36 -07:00
}
2013-04-24 18:58:02 +10:00
/*
* we can copy most of the information in the node from one block to
* another , but for CRC enabled headers we have to make sure that the
* block specific identifiers are kept intact . We update the buffer
* directly for this .
*/
2005-04-16 15:20:36 -07:00
memcpy ( node , oldroot , size ) ;
2013-04-24 18:58:02 +10:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
struct xfs_da3_intnode * node3 = ( struct xfs_da3_intnode * ) node ;
2021-08-18 18:47:05 -07:00
node3 - > hdr . info . blkno = cpu_to_be64 ( xfs_buf_daddr ( bp ) ) ;
2013-04-24 18:58:02 +10:00
}
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , bp , 0 , size - 1 ) ;
2012-11-14 17:53:49 +11:00
2012-11-14 17:54:40 +11:00
bp - > b_ops = blk1 - > bp - > b_ops ;
2013-09-02 10:32:01 +10:00
xfs_trans_buf_copy_type ( bp , blk1 - > bp ) ;
2005-04-16 15:20:36 -07:00
blk1 - > bp = bp ;
blk1 - > blkno = blkno ;
/*
* Set up the new root node .
*/
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_create ( args ,
2014-06-06 15:11:18 +10:00
( args - > whichfork = = XFS_DATA_FORK ) ? args - > geo - > leafblk : 0 ,
2013-04-24 18:58:02 +10:00
level + 1 , & bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2013-04-24 18:58:02 +10:00
return error ;
2012-06-22 18:50:14 +10:00
node = bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2019-11-08 14:57:48 -08:00
btree = nodehdr . btree ;
2013-04-24 18:58:02 +10:00
btree [ 0 ] . hashval = cpu_to_be32 ( blk1 - > hashval ) ;
btree [ 0 ] . before = cpu_to_be32 ( blk1 - > blkno ) ;
btree [ 1 ] . hashval = cpu_to_be32 ( blk2 - > hashval ) ;
btree [ 1 ] . before = cpu_to_be32 ( blk2 - > blkno ) ;
nodehdr . count = 2 ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node , & nodehdr ) ;
2005-04-16 15:20:36 -07:00
# ifdef DEBUG
2013-04-12 07:30:21 +10:00
if ( oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
oldroot - > hdr . info . magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
2014-06-06 15:11:18 +10:00
ASSERT ( blk1 - > blkno > = args - > geo - > leafblk & &
blk1 - > blkno < args - > geo - > freeblk ) ;
ASSERT ( blk2 - > blkno > = args - > geo - > leafblk & &
blk2 - > blkno < args - > geo - > freeblk ) ;
2005-04-16 15:20:36 -07:00
}
# endif
/* Header is already logged by xfs_da_node_create */
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( node , btree , sizeof ( xfs_da_node_entry_t ) * 2 ) ) ;
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Split the node , rebalance , then add the new entry .
*/
STATIC int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_node_split (
struct xfs_da_state * state ,
struct xfs_da_state_blk * oldblk ,
struct xfs_da_state_blk * newblk ,
struct xfs_da_state_blk * addblk ,
int treelevel ,
int * result )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
xfs_dablk_t blkno ;
int newcount ;
int error ;
int useextra ;
2013-10-29 22:11:52 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_split ( state - > args ) ;
2012-06-22 18:50:14 +10:00
node = oldblk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2005-04-16 15:20:36 -07:00
/*
2006-06-20 13:04:51 +10:00
* With V2 dirs the extra block is data or freespace .
2005-04-16 15:20:36 -07:00
*/
2006-06-20 13:04:51 +10:00
useextra = state - > extravalid & & state - > args - > whichfork = = XFS_ATTR_FORK ;
2005-04-16 15:20:36 -07:00
newcount = 1 + useextra ;
/*
* Do we have to split the node ?
*/
2014-06-06 15:22:04 +10:00
if ( nodehdr . count + newcount > state - > args - > geo - > node_ents ) {
2005-04-16 15:20:36 -07:00
/*
* Allocate a new node , add to the doubly linked chain of
* nodes , then move some of our excess entries into it .
*/
error = xfs_da_grow_inode ( state - > args , & blkno ) ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ; /* GROT: dir is inconsistent */
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_create ( state - > args , blkno , treelevel ,
2005-04-16 15:20:36 -07:00
& newblk - > bp , state - > args - > whichfork ) ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ; /* GROT: dir is inconsistent */
2005-04-16 15:20:36 -07:00
newblk - > blkno = blkno ;
newblk - > magic = XFS_DA_NODE_MAGIC ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_rebalance ( state , oldblk , newblk ) ;
error = xfs_da3_blk_link ( state , oldblk , newblk ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
* result = 1 ;
} else {
* result = 0 ;
}
/*
* Insert the new entry ( s ) into the correct block
* ( updating last hashval in the process ) .
*
2013-04-24 18:58:02 +10:00
* xfs_da3_node_add ( ) inserts BEFORE the given index ,
2005-04-16 15:20:36 -07:00
* and as a result of using node_lookup_int ( ) we always
* point to a valid entry ( not after one ) , but a split
* operation always results in a new block whose hashvals
* FOLLOW the current block .
*
* If we had double - split op below us , then add the extra block too .
*/
2012-06-22 18:50:14 +10:00
node = oldblk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2013-04-24 18:58:02 +10:00
if ( oldblk - > index < = nodehdr . count ) {
2005-04-16 15:20:36 -07:00
oldblk - > index + + ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_add ( state , oldblk , addblk ) ;
2005-04-16 15:20:36 -07:00
if ( useextra ) {
if ( state - > extraafter )
oldblk - > index + + ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_add ( state , oldblk , & state - > extrablk ) ;
2005-04-16 15:20:36 -07:00
state - > extravalid = 0 ;
}
} else {
newblk - > index + + ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_add ( state , newblk , addblk ) ;
2005-04-16 15:20:36 -07:00
if ( useextra ) {
if ( state - > extraafter )
newblk - > index + + ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_add ( state , newblk , & state - > extrablk ) ;
2005-04-16 15:20:36 -07:00
state - > extravalid = 0 ;
}
}
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Balance the btree elements between two intermediate nodes ,
* usually one full and one empty .
*
* NOTE : if blk2 is empty , then it will get the upper half of blk1 .
*/
STATIC void
2013-04-24 18:58:02 +10:00
xfs_da3_node_rebalance (
struct xfs_da_state * state ,
struct xfs_da_state_blk * blk1 ,
struct xfs_da_state_blk * blk2 )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node1 ;
struct xfs_da_intnode * node2 ;
struct xfs_da_intnode * tmpnode ;
struct xfs_da_node_entry * btree1 ;
struct xfs_da_node_entry * btree2 ;
struct xfs_da_node_entry * btree_s ;
struct xfs_da_node_entry * btree_d ;
struct xfs_da3_icnode_hdr nodehdr1 ;
struct xfs_da3_icnode_hdr nodehdr2 ;
struct xfs_trans * tp ;
int count ;
int tmp ;
int swap = 0 ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_rebalance ( state - > args ) ;
2012-06-22 18:50:14 +10:00
node1 = blk1 - > bp - > b_addr ;
node2 = blk2 - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr1 , node1 ) ;
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr2 , node2 ) ;
2019-11-08 14:57:48 -08:00
btree1 = nodehdr1 . btree ;
btree2 = nodehdr2 . btree ;
2013-04-24 18:58:02 +10:00
2005-04-16 15:20:36 -07:00
/*
* Figure out how many entries need to move , and in which direction .
* Swap the nodes around if that makes it simpler .
*/
2013-04-24 18:58:02 +10:00
if ( nodehdr1 . count > 0 & & nodehdr2 . count > 0 & &
( ( be32_to_cpu ( btree2 [ 0 ] . hashval ) < be32_to_cpu ( btree1 [ 0 ] . hashval ) ) | |
( be32_to_cpu ( btree2 [ nodehdr2 . count - 1 ] . hashval ) <
be32_to_cpu ( btree1 [ nodehdr1 . count - 1 ] . hashval ) ) ) ) {
2005-04-16 15:20:36 -07:00
tmpnode = node1 ;
node1 = node2 ;
node2 = tmpnode ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr1 , node1 ) ;
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr2 , node2 ) ;
2019-11-08 14:57:48 -08:00
btree1 = nodehdr1 . btree ;
btree2 = nodehdr2 . btree ;
2013-04-24 18:58:02 +10:00
swap = 1 ;
2005-04-16 15:20:36 -07:00
}
2013-04-24 18:58:02 +10:00
count = ( nodehdr1 . count - nodehdr2 . count ) / 2 ;
2005-04-16 15:20:36 -07:00
if ( count = = 0 )
return ;
tp = state - > args - > trans ;
/*
* Two cases : high - to - low and low - to - high .
*/
if ( count > 0 ) {
/*
* Move elements in node2 up to make a hole .
*/
2013-04-24 18:58:02 +10:00
tmp = nodehdr2 . count ;
if ( tmp > 0 ) {
2005-04-16 15:20:36 -07:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 18:58:02 +10:00
btree_s = & btree2 [ 0 ] ;
btree_d = & btree2 [ count ] ;
2005-04-16 15:20:36 -07:00
memmove ( btree_d , btree_s , tmp ) ;
}
/*
* Move the req ' d B - tree elements from high in node1 to
* low in node2 .
*/
2013-04-24 18:58:02 +10:00
nodehdr2 . count + = count ;
2005-04-16 15:20:36 -07:00
tmp = count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 18:58:02 +10:00
btree_s = & btree1 [ nodehdr1 . count - count ] ;
btree_d = & btree2 [ 0 ] ;
2005-04-16 15:20:36 -07:00
memcpy ( btree_d , btree_s , tmp ) ;
2013-04-24 18:58:02 +10:00
nodehdr1 . count - = count ;
2005-04-16 15:20:36 -07:00
} else {
/*
* Move the req ' d B - tree elements from low in node2 to
* high in node1 .
*/
count = - count ;
tmp = count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 18:58:02 +10:00
btree_s = & btree2 [ 0 ] ;
btree_d = & btree1 [ nodehdr1 . count ] ;
2005-04-16 15:20:36 -07:00
memcpy ( btree_d , btree_s , tmp ) ;
2013-04-24 18:58:02 +10:00
nodehdr1 . count + = count ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , blk1 - > bp ,
2005-04-16 15:20:36 -07:00
XFS_DA_LOGRANGE ( node1 , btree_d , tmp ) ) ;
/*
* Move elements in node2 down to fill the hole .
*/
2013-04-24 18:58:02 +10:00
tmp = nodehdr2 . count - count ;
2005-04-16 15:20:36 -07:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 18:58:02 +10:00
btree_s = & btree2 [ count ] ;
btree_d = & btree2 [ 0 ] ;
2005-04-16 15:20:36 -07:00
memmove ( btree_d , btree_s , tmp ) ;
2013-04-24 18:58:02 +10:00
nodehdr2 . count - = count ;
2005-04-16 15:20:36 -07:00
}
/*
* Log header of node 1 and all current bits of node 2.
*/
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node1 , & nodehdr1 ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , blk1 - > bp ,
2019-11-08 14:57:49 -08:00
XFS_DA_LOGRANGE ( node1 , & node1 - > hdr ,
state - > args - > geo - > node_hdr_size ) ) ;
2013-04-24 18:58:02 +10:00
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node2 , & nodehdr2 ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , blk2 - > bp ,
2005-04-16 15:20:36 -07:00
XFS_DA_LOGRANGE ( node2 , & node2 - > hdr ,
2019-11-08 14:57:49 -08:00
state - > args - > geo - > node_hdr_size +
2013-04-24 18:58:02 +10:00
( sizeof ( btree2 [ 0 ] ) * nodehdr2 . count ) ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Record the last hashval from each block for upward propagation .
* ( note : don ' t use the swapped node pointers )
*/
2013-04-24 18:58:02 +10:00
if ( swap ) {
node1 = blk1 - > bp - > b_addr ;
node2 = blk2 - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr1 , node1 ) ;
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr2 , node2 ) ;
2019-11-08 14:57:48 -08:00
btree1 = nodehdr1 . btree ;
btree2 = nodehdr2 . btree ;
2013-04-24 18:58:02 +10:00
}
blk1 - > hashval = be32_to_cpu ( btree1 [ nodehdr1 . count - 1 ] . hashval ) ;
blk2 - > hashval = be32_to_cpu ( btree2 [ nodehdr2 . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
/*
* Adjust the expected index for insertion .
*/
2013-04-24 18:58:02 +10:00
if ( blk1 - > index > = nodehdr1 . count ) {
blk2 - > index = blk1 - > index - nodehdr1 . count ;
blk1 - > index = nodehdr1 . count + 1 ; /* make it invalid */
2005-04-16 15:20:36 -07:00
}
}
/*
* Add a new entry to an intermediate node .
*/
STATIC void
2013-04-24 18:58:02 +10:00
xfs_da3_node_add (
struct xfs_da_state * state ,
struct xfs_da_state_blk * oldblk ,
struct xfs_da_state_blk * newblk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_node_entry * btree ;
int tmp ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_add ( state - > args ) ;
2012-06-22 18:50:14 +10:00
node = oldblk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2019-11-08 14:57:48 -08:00
btree = nodehdr . btree ;
2013-04-24 18:58:02 +10:00
ASSERT ( oldblk - > index > = 0 & & oldblk - > index < = nodehdr . count ) ;
2005-04-16 15:20:36 -07:00
ASSERT ( newblk - > blkno ! = 0 ) ;
2006-06-20 13:04:51 +10:00
if ( state - > args - > whichfork = = XFS_DATA_FORK )
2014-06-06 15:11:18 +10:00
ASSERT ( newblk - > blkno > = state - > args - > geo - > leafblk & &
newblk - > blkno < state - > args - > geo - > freeblk ) ;
2005-04-16 15:20:36 -07:00
/*
* We may need to make some room before we insert the new node .
*/
tmp = 0 ;
2013-04-24 18:58:02 +10:00
if ( oldblk - > index < nodehdr . count ) {
tmp = ( nodehdr . count - oldblk - > index ) * ( uint ) sizeof ( * btree ) ;
memmove ( & btree [ oldblk - > index + 1 ] , & btree [ oldblk - > index ] , tmp ) ;
2005-04-16 15:20:36 -07:00
}
2013-04-24 18:58:02 +10:00
btree [ oldblk - > index ] . hashval = cpu_to_be32 ( newblk - > hashval ) ;
btree [ oldblk - > index ] . before = cpu_to_be32 ( newblk - > blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , oldblk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( node , & btree [ oldblk - > index ] ,
tmp + sizeof ( * btree ) ) ) ;
nodehdr . count + = 1 ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node , & nodehdr ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , oldblk - > bp ,
2019-11-08 14:57:49 -08:00
XFS_DA_LOGRANGE ( node , & node - > hdr ,
state - > args - > geo - > node_hdr_size ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Copy the last hash value from the oldblk to propagate upwards .
*/
2013-04-24 18:58:02 +10:00
oldblk - > hashval = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
/*========================================================================
* Routines used for shrinking the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Deallocate an empty leaf node , remove it from its parent ,
* possibly deallocating that block , etc . . .
*/
int
2013-04-24 18:58:02 +10:00
xfs_da3_join (
struct xfs_da_state * state )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_state_blk * drop_blk ;
struct xfs_da_state_blk * save_blk ;
int action = 0 ;
int error ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_join ( state - > args ) ;
2005-04-16 15:20:36 -07:00
drop_blk = & state - > path . blk [ state - > path . active - 1 ] ;
save_blk = & state - > altpath . blk [ state - > path . active - 1 ] ;
ASSERT ( state - > path . blk [ 0 ] . magic = = XFS_DA_NODE_MAGIC ) ;
ASSERT ( drop_blk - > magic = = XFS_ATTR_LEAF_MAGIC | |
2006-06-20 13:04:51 +10:00
drop_blk - > magic = = XFS_DIR2_LEAFN_MAGIC ) ;
2005-04-16 15:20:36 -07:00
/*
* Walk back up the tree joining / deallocating as necessary .
* When we stop dropping blocks , break out .
*/
for ( ; state - > path . active > = 2 ; drop_blk - - , save_blk - - ,
state - > path . active - - ) {
/*
* See if we can combine the block with a neighbor .
* ( action = = 0 ) = > no options , just leave
* ( action = = 1 ) = > coalesce , then unlink
* ( action = = 2 ) = > block empty , unlink it
*/
switch ( drop_blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 18:58:55 +10:00
error = xfs_attr3_leaf_toosmall ( state , & action ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
if ( action = = 0 )
2014-06-22 15:03:54 +10:00
return 0 ;
2013-04-24 18:58:55 +10:00
xfs_attr3_leaf_unbalance ( state , drop_blk , save_blk ) ;
2005-04-16 15:20:36 -07:00
break ;
case XFS_DIR2_LEAFN_MAGIC :
error = xfs_dir2_leafn_toosmall ( state , & action ) ;
if ( error )
return error ;
if ( action = = 0 )
return 0 ;
xfs_dir2_leafn_unbalance ( state , drop_blk , save_blk ) ;
break ;
case XFS_DA_NODE_MAGIC :
/*
* Remove the offending node , fixup hashvals ,
* check for a toosmall neighbor .
*/
2013-04-24 18:58:02 +10:00
xfs_da3_node_remove ( state , drop_blk ) ;
xfs_da3_fixhashpath ( state , & state - > path ) ;
error = xfs_da3_node_toosmall ( state , & action ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
if ( action = = 0 )
return 0 ;
2013-04-24 18:58:02 +10:00
xfs_da3_node_unbalance ( state , drop_blk , save_blk ) ;
2005-04-16 15:20:36 -07:00
break ;
}
2013-04-24 18:58:02 +10:00
xfs_da3_fixhashpath ( state , & state - > altpath ) ;
error = xfs_da3_blk_unlink ( state , drop_blk , save_blk ) ;
2005-04-16 15:20:36 -07:00
xfs_da_state_kill_altpath ( state ) ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
error = xfs_da_shrink_inode ( state - > args , drop_blk - > blkno ,
drop_blk - > bp ) ;
drop_blk - > bp = NULL ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
}
/*
* We joined all the way to the top . If it turns out that
* we only have one entry in the root , make the child block
* the new root .
*/
2013-04-24 18:58:02 +10:00
xfs_da3_node_remove ( state , drop_blk ) ;
xfs_da3_fixhashpath ( state , & state - > path ) ;
error = xfs_da3_root_join ( state , & state - > path . blk [ 0 ] ) ;
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
}
2011-07-18 18:14:09 +00:00
# ifdef DEBUG
static void
xfs_da_blkinfo_onlychild_validate ( struct xfs_da_blkinfo * blkinfo , __u16 level )
{
__be16 magic = blkinfo - > magic ;
if ( level = = 1 ) {
ASSERT ( magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
2013-04-12 07:30:21 +10:00
magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) | |
2013-04-24 18:58:55 +10:00
magic = = cpu_to_be16 ( XFS_ATTR_LEAF_MAGIC ) | |
magic = = cpu_to_be16 ( XFS_ATTR3_LEAF_MAGIC ) ) ;
2013-04-24 18:58:02 +10:00
} else {
ASSERT ( magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) ) ;
}
2011-07-18 18:14:09 +00:00
ASSERT ( ! blkinfo - > forw ) ;
ASSERT ( ! blkinfo - > back ) ;
}
# else /* !DEBUG */
# define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
# endif /* !DEBUG */
2005-04-16 15:20:36 -07:00
/*
* We have only one entry in the root . Copy the only remaining child of
* the old root to block 0 as the new root node .
*/
STATIC int
2013-04-24 18:58:02 +10:00
xfs_da3_root_join (
struct xfs_da_state * state ,
struct xfs_da_state_blk * root_blk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * oldroot ;
struct xfs_da_args * args ;
xfs_dablk_t child ;
struct xfs_buf * bp ;
struct xfs_da3_icnode_hdr oldroothdr ;
int error ;
2013-10-29 22:11:52 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_root_join ( state - > args ) ;
2005-04-16 15:20:36 -07:00
ASSERT ( root_blk - > magic = = XFS_DA_NODE_MAGIC ) ;
2013-04-24 18:58:02 +10:00
args = state - > args ;
2012-06-22 18:50:14 +10:00
oldroot = root_blk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & oldroothdr , oldroot ) ;
2013-04-24 18:58:02 +10:00
ASSERT ( oldroothdr . forw = = 0 ) ;
ASSERT ( oldroothdr . back = = 0 ) ;
2005-04-16 15:20:36 -07:00
/*
* If the root has more than one child , then don ' t do anything .
*/
2013-04-24 18:58:02 +10:00
if ( oldroothdr . count > 1 )
return 0 ;
2005-04-16 15:20:36 -07:00
/*
* Read in the ( only ) child block , then copy those bytes into
* the root block ' s buffer and free the original child block .
*/
2019-11-08 14:57:48 -08:00
child = be32_to_cpu ( oldroothdr . btree [ 0 ] . before ) ;
2005-04-16 15:20:36 -07:00
ASSERT ( child ! = 0 ) ;
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( args - > trans , dp , child , & bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2013-04-24 18:58:02 +10:00
return error ;
xfs_da_blkinfo_onlychild_validate ( bp - > b_addr , oldroothdr . level ) ;
2011-07-18 18:14:09 +00:00
2012-11-14 17:52:32 +11:00
/*
* This could be copying a leaf back into the root block in the case of
* there only being a single leaf block left in the tree . Hence we have
2012-11-14 17:54:40 +11:00
* to update the b_ops pointer as well to match the buffer type change
2013-04-24 18:58:02 +10:00
* that could occur . For dir3 blocks we also need to update the block
* number in the buffer header .
2012-11-14 17:52:32 +11:00
*/
2014-06-06 15:22:04 +10:00
memcpy ( root_blk - > bp - > b_addr , bp - > b_addr , args - > geo - > blksize ) ;
2012-11-14 17:54:40 +11:00
root_blk - > bp - > b_ops = bp - > b_ops ;
2013-04-03 16:11:29 +11:00
xfs_trans_buf_copy_type ( root_blk - > bp , bp ) ;
2013-04-24 18:58:02 +10:00
if ( oldroothdr . magic = = XFS_DA3_NODE_MAGIC ) {
struct xfs_da3_blkinfo * da3 = root_blk - > bp - > b_addr ;
2021-08-18 18:47:05 -07:00
da3 - > blkno = cpu_to_be64 ( xfs_buf_daddr ( root_blk - > bp ) ) ;
2013-04-24 18:58:02 +10:00
}
2014-06-06 15:22:04 +10:00
xfs_trans_log_buf ( args - > trans , root_blk - > bp , 0 ,
args - > geo - > blksize - 1 ) ;
2005-04-16 15:20:36 -07:00
error = xfs_da_shrink_inode ( args , child , bp ) ;
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
}
/*
* Check a node block and its neighbors to see if the block should be
* collapsed into one or the other neighbor . Always keep the block
* with the smaller block number .
* If the current block is over 50 % full , don ' t try to join it , return 0.
* If the block is empty , fill in the state structure and return 2.
* If it can be collapsed , fill in the state structure and return 1.
* If nothing can be done , return 0.
*/
STATIC int
2013-04-24 18:58:02 +10:00
xfs_da3_node_toosmall (
struct xfs_da_state * state ,
int * action )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * info ;
xfs_dablk_t blkno ;
struct xfs_buf * bp ;
struct xfs_da3_icnode_hdr nodehdr ;
int count ;
int forward ;
int error ;
int retval ;
int i ;
2013-10-29 22:11:52 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-11-12 22:53:53 +11:00
trace_xfs_da_node_toosmall ( state - > args ) ;
2005-04-16 15:20:36 -07:00
/*
* Check for the degenerate case of the block being over 50 % full .
* If so , it ' s not worth even looking to see if we might be able
* to coalesce with a sibling .
*/
blk = & state - > path . blk [ state - > path . active - 1 ] ;
2012-06-22 18:50:14 +10:00
info = blk - > bp - > b_addr ;
2005-04-16 15:20:36 -07:00
node = ( xfs_da_intnode_t * ) info ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2014-06-06 15:22:04 +10:00
if ( nodehdr . count > ( state - > args - > geo - > node_ents > > 1 ) ) {
2005-04-16 15:20:36 -07:00
* action = 0 ; /* blk over 50%, don't try to join */
2014-06-22 15:03:54 +10:00
return 0 ; /* blk over 50%, don't try to join */
2005-04-16 15:20:36 -07:00
}
/*
* Check for the degenerate case of the block being empty .
* If the block is empty , we ' ll simply delete it , no need to
2006-03-29 08:55:14 +10:00
* coalesce it with a sibling block . We choose ( arbitrarily )
2005-04-16 15:20:36 -07:00
* to merge with the forward block unless it is NULL .
*/
2013-04-24 18:58:02 +10:00
if ( nodehdr . count = = 0 ) {
2005-04-16 15:20:36 -07:00
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop ( this one ) .
*/
2006-03-17 17:28:40 +11:00
forward = ( info - > forw ! = 0 ) ;
2005-04-16 15:20:36 -07:00
memcpy ( & state - > altpath , & state - > path , sizeof ( state - > path ) ) ;
2013-04-24 18:58:02 +10:00
error = xfs_da3_path_shift ( state , & state - > altpath , forward ,
2005-04-16 15:20:36 -07:00
0 , & retval ) ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
if ( retval ) {
* action = 0 ;
} else {
* action = 2 ;
}
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25 % free space to spare . We need to figure out
* whether to merge with the forward or the backward block .
* We prefer coalescing with the lower numbered sibling so as
* to shrink a directory over time .
*/
2014-06-06 15:22:04 +10:00
count = state - > args - > geo - > node_ents ;
count - = state - > args - > geo - > node_ents > > 2 ;
2013-04-24 18:58:02 +10:00
count - = nodehdr . count ;
2005-04-16 15:20:36 -07:00
/* start with smaller blk num */
2013-04-24 18:58:02 +10:00
forward = nodehdr . forw < nodehdr . back ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < 2 ; forward = ! forward , i + + ) {
2013-09-23 12:18:58 -05:00
struct xfs_da3_icnode_hdr thdr ;
2005-04-16 15:20:36 -07:00
if ( forward )
2013-04-24 18:58:02 +10:00
blkno = nodehdr . forw ;
2005-04-16 15:20:36 -07:00
else
2013-04-24 18:58:02 +10:00
blkno = nodehdr . back ;
2005-04-16 15:20:36 -07:00
if ( blkno = = 0 )
continue ;
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( state - > args - > trans , dp , blkno , & bp ,
state - > args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
2012-06-22 18:50:14 +10:00
node = bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & thdr , node ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_brelse ( state - > args - > trans , bp ) ;
2013-04-24 18:58:02 +10:00
2013-09-23 12:18:58 -05:00
if ( count - thdr . count > = 0 )
2005-04-16 15:20:36 -07:00
break ; /* fits with at least 25% to spare */
}
if ( i > = 2 ) {
* action = 0 ;
2013-04-24 18:58:02 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Make altpath point to the block we want to keep ( the lower
* numbered block ) and path point to the block we want to drop .
*/
memcpy ( & state - > altpath , & state - > path , sizeof ( state - > path ) ) ;
if ( blkno < blk - > blkno ) {
2013-04-24 18:58:02 +10:00
error = xfs_da3_path_shift ( state , & state - > altpath , forward ,
2005-04-16 15:20:36 -07:00
0 , & retval ) ;
} else {
2013-04-24 18:58:02 +10:00
error = xfs_da3_path_shift ( state , & state - > path , forward ,
2005-04-16 15:20:36 -07:00
0 , & retval ) ;
2013-04-24 18:58:02 +10:00
}
if ( error )
return error ;
if ( retval ) {
* action = 0 ;
return 0 ;
2005-04-16 15:20:36 -07:00
}
* action = 1 ;
2013-04-24 18:58:02 +10:00
return 0 ;
}
/*
* Pick up the last hashvalue from an intermediate node .
*/
STATIC uint
xfs_da3_node_lasthash (
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp ,
2013-04-24 18:58:02 +10:00
struct xfs_buf * bp ,
int * count )
{
struct xfs_da3_icnode_hdr nodehdr ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , bp - > b_addr ) ;
2013-04-24 18:58:02 +10:00
if ( count )
* count = nodehdr . count ;
if ( ! nodehdr . count )
return 0 ;
2019-11-08 14:57:48 -08:00
return be32_to_cpu ( nodehdr . btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Walk back up the tree adjusting hash values as necessary ,
* when we stop making changes , return .
*/
void
2013-04-24 18:58:02 +10:00
xfs_da3_fixhashpath (
struct xfs_da_state * state ,
struct xfs_da_state_path * path )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_state_blk * blk ;
struct xfs_da_intnode * node ;
struct xfs_da_node_entry * btree ;
xfs_dahash_t lasthash = 0 ;
int level ;
int count ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-11-12 22:53:53 +11:00
trace_xfs_da_fixhashpath ( state - > args ) ;
2005-04-16 15:20:36 -07:00
level = path - > active - 1 ;
blk = & path - > blk [ level ] ;
switch ( blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
lasthash = xfs_attr_leaf_lasthash ( blk - > bp , & count ) ;
if ( count = = 0 )
return ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
2017-06-16 11:00:13 -07:00
lasthash = xfs_dir2_leaf_lasthash ( dp , blk - > bp , & count ) ;
2005-04-16 15:20:36 -07:00
if ( count = = 0 )
return ;
break ;
case XFS_DA_NODE_MAGIC :
2013-10-29 22:11:51 +11:00
lasthash = xfs_da3_node_lasthash ( dp , blk - > bp , & count ) ;
2005-04-16 15:20:36 -07:00
if ( count = = 0 )
return ;
break ;
}
for ( blk - - , level - - ; level > = 0 ; blk - - , level - - ) {
2013-04-24 18:58:02 +10:00
struct xfs_da3_icnode_hdr nodehdr ;
2012-06-22 18:50:14 +10:00
node = blk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2019-11-08 14:57:48 -08:00
btree = nodehdr . btree ;
2014-04-04 07:10:49 +11:00
if ( be32_to_cpu ( btree [ blk - > index ] . hashval ) = = lasthash )
2005-04-16 15:20:36 -07:00
break ;
blk - > hashval = lasthash ;
2013-04-24 18:58:02 +10:00
btree [ blk - > index ] . hashval = cpu_to_be32 ( lasthash ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , blk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( node , & btree [ blk - > index ] ,
sizeof ( * btree ) ) ) ;
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
lasthash = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
}
/*
* Remove an entry from an intermediate node .
*/
STATIC void
2013-04-24 18:58:02 +10:00
xfs_da3_node_remove (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * node ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_node_entry * btree ;
int index ;
int tmp ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_remove ( state - > args ) ;
2012-06-22 18:50:14 +10:00
node = drop_blk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2013-04-24 18:58:02 +10:00
ASSERT ( drop_blk - > index < nodehdr . count ) ;
2005-04-16 15:20:36 -07:00
ASSERT ( drop_blk - > index > = 0 ) ;
/*
* Copy over the offending entry , or just zero it out .
*/
2013-04-24 18:58:02 +10:00
index = drop_blk - > index ;
2019-11-08 14:57:48 -08:00
btree = nodehdr . btree ;
2013-04-24 18:58:02 +10:00
if ( index < nodehdr . count - 1 ) {
tmp = nodehdr . count - index - 1 ;
2005-04-16 15:20:36 -07:00
tmp * = ( uint ) sizeof ( xfs_da_node_entry_t ) ;
2013-04-24 18:58:02 +10:00
memmove ( & btree [ index ] , & btree [ index + 1 ] , tmp ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( node , & btree [ index ] , tmp ) ) ;
index = nodehdr . count - 1 ;
2005-04-16 15:20:36 -07:00
}
2013-04-24 18:58:02 +10:00
memset ( & btree [ index ] , 0 , sizeof ( xfs_da_node_entry_t ) ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( node , & btree [ index ] , sizeof ( btree [ index ] ) ) ) ;
nodehdr . count - = 1 ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , node , & nodehdr ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( state - > args - > trans , drop_blk - > bp ,
2019-11-08 14:57:49 -08:00
XFS_DA_LOGRANGE ( node , & node - > hdr , state - > args - > geo - > node_hdr_size ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Copy the last hash value from the block to propagate upwards .
*/
2013-04-24 18:58:02 +10:00
drop_blk - > hashval = be32_to_cpu ( btree [ index - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
/*
2013-04-24 18:58:02 +10:00
* Unbalance the elements between two intermediate nodes ,
2005-04-16 15:20:36 -07:00
* move all Btree elements from one node into another .
*/
STATIC void
2013-04-24 18:58:02 +10:00
xfs_da3_node_unbalance (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk ,
struct xfs_da_state_blk * save_blk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_intnode * drop_node ;
struct xfs_da_intnode * save_node ;
struct xfs_da_node_entry * drop_btree ;
struct xfs_da_node_entry * save_btree ;
struct xfs_da3_icnode_hdr drop_hdr ;
struct xfs_da3_icnode_hdr save_hdr ;
struct xfs_trans * tp ;
int sindex ;
int tmp ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_node_unbalance ( state - > args ) ;
2012-06-22 18:50:14 +10:00
drop_node = drop_blk - > bp - > b_addr ;
save_node = save_blk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & drop_hdr , drop_node ) ;
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & save_hdr , save_node ) ;
2019-11-08 14:57:48 -08:00
drop_btree = drop_hdr . btree ;
save_btree = save_hdr . btree ;
2005-04-16 15:20:36 -07:00
tp = state - > args - > trans ;
/*
* If the dying block has lower hashvals , then move all the
* elements in the remaining block up to make a hole .
*/
2013-04-24 18:58:02 +10:00
if ( ( be32_to_cpu ( drop_btree [ 0 ] . hashval ) <
be32_to_cpu ( save_btree [ 0 ] . hashval ) ) | |
( be32_to_cpu ( drop_btree [ drop_hdr . count - 1 ] . hashval ) <
be32_to_cpu ( save_btree [ save_hdr . count - 1 ] . hashval ) ) ) {
/* XXX: check this - is memmove dst correct? */
tmp = save_hdr . count * sizeof ( xfs_da_node_entry_t ) ;
memmove ( & save_btree [ drop_hdr . count ] , & save_btree [ 0 ] , tmp ) ;
sindex = 0 ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( save_node , & save_btree [ 0 ] ,
( save_hdr . count + drop_hdr . count ) *
sizeof ( xfs_da_node_entry_t ) ) ) ;
2005-04-16 15:20:36 -07:00
} else {
2013-04-24 18:58:02 +10:00
sindex = save_hdr . count ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( save_node , & save_btree [ sindex ] ,
drop_hdr . count * sizeof ( xfs_da_node_entry_t ) ) ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Move all the B - tree elements from drop_blk to save_blk .
*/
2013-04-24 18:58:02 +10:00
tmp = drop_hdr . count * ( uint ) sizeof ( xfs_da_node_entry_t ) ;
memcpy ( & save_btree [ sindex ] , & drop_btree [ 0 ] , tmp ) ;
save_hdr . count + = drop_hdr . count ;
2005-04-16 15:20:36 -07:00
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_to_disk ( dp - > i_mount , save_node , & save_hdr ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , save_blk - > bp ,
2005-04-16 15:20:36 -07:00
XFS_DA_LOGRANGE ( save_node , & save_node - > hdr ,
2019-11-08 14:57:49 -08:00
state - > args - > geo - > node_hdr_size ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Save the last hashval in the remaining block for upward propagation .
*/
2013-04-24 18:58:02 +10:00
save_blk - > hashval = be32_to_cpu ( save_btree [ save_hdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
/*========================================================================
* Routines used for finding things in the Btree .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Walk down the Btree looking for a particular filename , filling
* in the state structure as we go .
*
* We will set the state structure to point to each of the elements
* in each of the nodes where either the hashval is or should be .
*
* We support duplicate hashval ' s so for each entry in the current
* node that could contain the desired hashval , descend . This is a
* pruned depth - first tree search .
*/
int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_node_lookup_int (
struct xfs_da_state * state ,
int * result )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * curr ;
struct xfs_da_intnode * node ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
struct xfs_da_args * args ;
xfs_dablk_t blkno ;
xfs_dahash_t hashval ;
xfs_dahash_t btreehashval ;
int probe ;
int span ;
int max ;
int error ;
int retval ;
2017-10-25 16:59:43 -07:00
unsigned int expected_level = 0 ;
2018-08-01 07:40:48 -07:00
uint16_t magic ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
args = state - > args ;
/*
* Descend thru the B - tree searching each level for the right
* node to use , until the right hashval is found .
*/
2017-10-25 16:59:43 -07:00
blkno = args - > geo - > leafblk ;
2005-04-16 15:20:36 -07:00
for ( blk = & state - > path . blk [ 0 ] , state - > path . active = 1 ;
state - > path . active < = XFS_DA_NODE_MAXDEPTH ;
blk + + , state - > path . active + + ) {
/*
* Read the next node down in the tree .
*/
blk - > blkno = blkno ;
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_read ( args - > trans , args - > dp , blkno ,
2019-11-20 09:46:04 -08:00
& blk - > bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error ) {
blk - > blkno = 0 ;
state - > path . active - - ;
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
}
2012-06-22 18:50:14 +10:00
curr = blk - > bp - > b_addr ;
2018-08-01 07:40:48 -07:00
magic = be16_to_cpu ( curr - > magic ) ;
2013-04-24 18:58:02 +10:00
2018-08-01 07:40:48 -07:00
if ( magic = = XFS_ATTR_LEAF_MAGIC | |
magic = = XFS_ATTR3_LEAF_MAGIC ) {
2013-04-24 18:58:55 +10:00
blk - > magic = XFS_ATTR_LEAF_MAGIC ;
2013-04-24 18:58:02 +10:00
blk - > hashval = xfs_attr_leaf_lasthash ( blk - > bp , NULL ) ;
break ;
}
2018-08-01 07:40:48 -07:00
if ( magic = = XFS_DIR2_LEAFN_MAGIC | |
magic = = XFS_DIR3_LEAFN_MAGIC ) {
2013-04-24 18:58:02 +10:00
blk - > magic = XFS_DIR2_LEAFN_MAGIC ;
2017-06-16 11:00:13 -07:00
blk - > hashval = xfs_dir2_leaf_lasthash ( args - > dp ,
blk - > bp , NULL ) ;
2013-04-24 18:58:02 +10:00
break ;
}
2019-11-02 09:40:53 -07:00
if ( magic ! = XFS_DA_NODE_MAGIC & & magic ! = XFS_DA3_NODE_MAGIC ) {
2020-03-11 10:37:54 -07:00
xfs_buf_mark_corrupt ( blk - > bp ) ;
2018-08-01 07:40:48 -07:00
return - EFSCORRUPTED ;
2019-11-02 09:40:53 -07:00
}
2013-04-24 18:58:02 +10:00
2018-08-01 07:40:48 -07:00
blk - > magic = XFS_DA_NODE_MAGIC ;
2005-04-16 15:20:36 -07:00
/*
* Search an intermediate node for a match .
*/
2013-04-24 18:58:02 +10:00
node = blk - > bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr , node ) ;
2019-11-08 14:57:48 -08:00
btree = nodehdr . btree ;
2005-04-16 15:20:36 -07:00
2017-10-25 16:59:43 -07:00
/* Tree taller than we can handle; bail out! */
2019-11-02 09:40:53 -07:00
if ( nodehdr . level > = XFS_DA_NODE_MAXDEPTH ) {
2020-03-11 10:37:54 -07:00
xfs_buf_mark_corrupt ( blk - > bp ) ;
2017-10-25 16:59:43 -07:00
return - EFSCORRUPTED ;
2019-11-02 09:40:53 -07:00
}
2017-10-25 16:59:43 -07:00
/* Check the level from the root. */
if ( blkno = = args - > geo - > leafblk )
expected_level = nodehdr . level - 1 ;
2019-11-02 09:40:53 -07:00
else if ( expected_level ! = nodehdr . level ) {
2020-03-11 10:37:54 -07:00
xfs_buf_mark_corrupt ( blk - > bp ) ;
2017-10-25 16:59:43 -07:00
return - EFSCORRUPTED ;
2019-11-02 09:40:53 -07:00
} else
2017-10-25 16:59:43 -07:00
expected_level - - ;
2013-04-24 18:58:02 +10:00
max = nodehdr . count ;
blk - > hashval = be32_to_cpu ( btree [ max - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
/*
* Binary search . ( note : small blocks will skip loop )
*/
probe = span = max / 2 ;
hashval = args - > hashval ;
while ( span > 4 ) {
span / = 2 ;
btreehashval = be32_to_cpu ( btree [ probe ] . hashval ) ;
if ( btreehashval < hashval )
probe + = span ;
else if ( btreehashval > hashval )
probe - = span ;
else
break ;
}
ASSERT ( ( probe > = 0 ) & & ( probe < max ) ) ;
ASSERT ( ( span < = 4 ) | |
( be32_to_cpu ( btree [ probe ] . hashval ) = = hashval ) ) ;
2005-04-16 15:20:36 -07:00
2013-04-24 18:58:02 +10:00
/*
* Since we may have duplicate hashval ' s , find the first
* matching hashval in the node .
*/
while ( probe > 0 & &
be32_to_cpu ( btree [ probe ] . hashval ) > = hashval ) {
probe - - ;
}
while ( probe < max & &
be32_to_cpu ( btree [ probe ] . hashval ) < hashval ) {
probe + + ;
}
/*
* Pick the right block to descend on .
*/
if ( probe = = max ) {
blk - > index = max - 1 ;
blkno = be32_to_cpu ( btree [ max - 1 ] . before ) ;
} else {
blk - > index = probe ;
blkno = be32_to_cpu ( btree [ probe ] . before ) ;
2005-04-16 15:20:36 -07:00
}
2017-10-25 16:59:43 -07:00
/* We can't point back to the root. */
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( dp - > i_mount , blkno = = args - > geo - > leafblk ) )
2017-10-25 16:59:43 -07:00
return - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
}
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( dp - > i_mount , expected_level ! = 0 ) )
2017-10-25 16:59:43 -07:00
return - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
/*
* A leaf block that ends in the hashval that we are interested in
* ( final hashval = = search hashval ) means that the next block may
* contain more entries with the same hashval , shift upward to the
* next leaf and keep searching .
*/
for ( ; ; ) {
2006-06-20 13:04:51 +10:00
if ( blk - > magic = = XFS_DIR2_LEAFN_MAGIC ) {
2005-04-16 15:20:36 -07:00
retval = xfs_dir2_leafn_lookup_int ( blk - > bp , args ,
& blk - > index , state ) ;
2006-09-28 11:03:44 +10:00
} else if ( blk - > magic = = XFS_ATTR_LEAF_MAGIC ) {
2013-04-24 18:58:55 +10:00
retval = xfs_attr3_leaf_lookup_int ( blk - > bp , args ) ;
2005-04-16 15:20:36 -07:00
blk - > index = args - > index ;
args - > blkno = blk - > blkno ;
2006-09-28 11:03:44 +10:00
} else {
ASSERT ( 0 ) ;
2014-06-25 14:58:08 +10:00
return - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
}
2014-06-25 14:58:08 +10:00
if ( ( ( retval = = - ENOENT ) | | ( retval = = - ENOATTR ) ) & &
2005-04-16 15:20:36 -07:00
( blk - > hashval = = args - > hashval ) ) {
2013-04-24 18:58:02 +10:00
error = xfs_da3_path_shift ( state , & state - > path , 1 , 1 ,
2005-04-16 15:20:36 -07:00
& retval ) ;
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
if ( retval = = 0 ) {
continue ;
2006-09-28 11:03:44 +10:00
} else if ( blk - > magic = = XFS_ATTR_LEAF_MAGIC ) {
2005-04-16 15:20:36 -07:00
/* path_shift() gives ENOENT */
2014-06-25 14:58:08 +10:00
retval = - ENOATTR ;
2005-04-16 15:20:36 -07:00
}
}
break ;
}
* result = retval ;
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*========================================================================
* Utility routines .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
2013-04-24 18:58:02 +10:00
/*
* Compare two intermediate nodes for " order " .
*/
STATIC int
xfs_da3_node_order (
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp ,
2013-04-24 18:58:02 +10:00
struct xfs_buf * node1_bp ,
struct xfs_buf * node2_bp )
{
struct xfs_da_intnode * node1 ;
struct xfs_da_intnode * node2 ;
struct xfs_da_node_entry * btree1 ;
struct xfs_da_node_entry * btree2 ;
struct xfs_da3_icnode_hdr node1hdr ;
struct xfs_da3_icnode_hdr node2hdr ;
node1 = node1_bp - > b_addr ;
node2 = node2_bp - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & node1hdr , node1 ) ;
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & node2hdr , node2 ) ;
2019-11-08 14:57:48 -08:00
btree1 = node1hdr . btree ;
btree2 = node2hdr . btree ;
2013-04-24 18:58:02 +10:00
if ( node1hdr . count > 0 & & node2hdr . count > 0 & &
( ( be32_to_cpu ( btree2 [ 0 ] . hashval ) < be32_to_cpu ( btree1 [ 0 ] . hashval ) ) | |
( be32_to_cpu ( btree2 [ node2hdr . count - 1 ] . hashval ) <
be32_to_cpu ( btree1 [ node1hdr . count - 1 ] . hashval ) ) ) ) {
return 1 ;
}
return 0 ;
}
2005-04-16 15:20:36 -07:00
/*
* Link a new block into a doubly linked list of blocks ( of whatever type ) .
*/
int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_blk_link (
struct xfs_da_state * state ,
struct xfs_da_state_blk * old_blk ,
struct xfs_da_state_blk * new_blk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_blkinfo * old_info ;
struct xfs_da_blkinfo * new_info ;
struct xfs_da_blkinfo * tmp_info ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
int before = 0 ;
int error ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
/*
* Set up environment .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
2012-06-22 18:50:14 +10:00
old_info = old_blk - > bp - > b_addr ;
new_info = new_blk - > bp - > b_addr ;
2005-04-16 15:20:36 -07:00
ASSERT ( old_blk - > magic = = XFS_DA_NODE_MAGIC | |
2006-06-20 13:04:51 +10:00
old_blk - > magic = = XFS_DIR2_LEAFN_MAGIC | |
2005-04-16 15:20:36 -07:00
old_blk - > magic = = XFS_ATTR_LEAF_MAGIC ) ;
switch ( old_blk - > magic ) {
case XFS_ATTR_LEAF_MAGIC :
before = xfs_attr_leaf_order ( old_blk - > bp , new_blk - > bp ) ;
break ;
case XFS_DIR2_LEAFN_MAGIC :
2013-10-29 22:11:51 +11:00
before = xfs_dir2_leafn_order ( dp , old_blk - > bp , new_blk - > bp ) ;
2005-04-16 15:20:36 -07:00
break ;
case XFS_DA_NODE_MAGIC :
2013-10-29 22:11:51 +11:00
before = xfs_da3_node_order ( dp , old_blk - > bp , new_blk - > bp ) ;
2005-04-16 15:20:36 -07:00
break ;
}
/*
* Link blocks in appropriate order .
*/
if ( before ) {
/*
* Link new block in before existing block .
*/
2012-03-22 05:15:13 +00:00
trace_xfs_da_link_before ( args ) ;
2006-03-17 17:28:40 +11:00
new_info - > forw = cpu_to_be32 ( old_blk - > blkno ) ;
new_info - > back = old_info - > back ;
if ( old_info - > back ) {
2013-10-29 22:11:51 +11:00
error = xfs_da3_node_read ( args - > trans , dp ,
2006-03-17 17:28:40 +11:00
be32_to_cpu ( old_info - > back ) ,
2019-11-20 09:46:04 -08:00
& bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
ASSERT ( bp ! = NULL ) ;
2012-06-22 18:50:14 +10:00
tmp_info = bp - > b_addr ;
2013-04-24 18:58:02 +10:00
ASSERT ( tmp_info - > magic = = old_info - > magic ) ;
2006-03-17 17:28:40 +11:00
ASSERT ( be32_to_cpu ( tmp_info - > forw ) = = old_blk - > blkno ) ;
tmp_info - > forw = cpu_to_be32 ( new_blk - > blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2005-04-16 15:20:36 -07:00
}
2006-03-17 17:28:40 +11:00
old_info - > back = cpu_to_be32 ( new_blk - > blkno ) ;
2005-04-16 15:20:36 -07:00
} else {
/*
* Link new block in after existing block .
*/
2012-03-22 05:15:13 +00:00
trace_xfs_da_link_after ( args ) ;
2006-03-17 17:28:40 +11:00
new_info - > forw = old_info - > forw ;
new_info - > back = cpu_to_be32 ( old_blk - > blkno ) ;
if ( old_info - > forw ) {
2013-10-29 22:11:51 +11:00
error = xfs_da3_node_read ( args - > trans , dp ,
2006-03-17 17:28:40 +11:00
be32_to_cpu ( old_info - > forw ) ,
2019-11-20 09:46:04 -08:00
& bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
ASSERT ( bp ! = NULL ) ;
2012-06-22 18:50:14 +10:00
tmp_info = bp - > b_addr ;
2006-03-17 17:28:40 +11:00
ASSERT ( tmp_info - > magic = = old_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > back ) = = old_blk - > blkno ) ;
tmp_info - > back = cpu_to_be32 ( new_blk - > blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2005-04-16 15:20:36 -07:00
}
2006-03-17 17:28:40 +11:00
old_info - > forw = cpu_to_be32 ( new_blk - > blkno ) ;
2005-04-16 15:20:36 -07:00
}
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , old_blk - > bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
xfs_trans_log_buf ( args - > trans , new_blk - > bp , 0 , sizeof ( * tmp_info ) - 1 ) ;
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Unlink a block from a doubly linked list of blocks .
*/
2005-06-21 15:36:52 +10:00
STATIC int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_blk_unlink (
struct xfs_da_state * state ,
struct xfs_da_state_blk * drop_blk ,
struct xfs_da_state_blk * save_blk )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_blkinfo * drop_info ;
struct xfs_da_blkinfo * save_info ;
struct xfs_da_blkinfo * tmp_info ;
struct xfs_da_args * args ;
struct xfs_buf * bp ;
int error ;
2005-04-16 15:20:36 -07:00
/*
* Set up environment .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
2012-06-22 18:50:14 +10:00
save_info = save_blk - > bp - > b_addr ;
drop_info = drop_blk - > bp - > b_addr ;
2005-04-16 15:20:36 -07:00
ASSERT ( save_blk - > magic = = XFS_DA_NODE_MAGIC | |
2006-06-20 13:04:51 +10:00
save_blk - > magic = = XFS_DIR2_LEAFN_MAGIC | |
2005-04-16 15:20:36 -07:00
save_blk - > magic = = XFS_ATTR_LEAF_MAGIC ) ;
ASSERT ( save_blk - > magic = = drop_blk - > magic ) ;
2006-03-17 17:28:40 +11:00
ASSERT ( ( be32_to_cpu ( save_info - > forw ) = = drop_blk - > blkno ) | |
( be32_to_cpu ( save_info - > back ) = = drop_blk - > blkno ) ) ;
ASSERT ( ( be32_to_cpu ( drop_info - > forw ) = = save_blk - > blkno ) | |
( be32_to_cpu ( drop_info - > back ) = = save_blk - > blkno ) ) ;
2005-04-16 15:20:36 -07:00
/*
* Unlink the leaf block from the doubly linked chain of leaves .
*/
2006-03-17 17:28:40 +11:00
if ( be32_to_cpu ( save_info - > back ) = = drop_blk - > blkno ) {
2012-03-22 05:15:13 +00:00
trace_xfs_da_unlink_back ( args ) ;
2006-03-17 17:28:40 +11:00
save_info - > back = drop_info - > back ;
if ( drop_info - > back ) {
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_read ( args - > trans , args - > dp ,
2006-03-17 17:28:40 +11:00
be32_to_cpu ( drop_info - > back ) ,
2019-11-20 09:46:04 -08:00
& bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
ASSERT ( bp ! = NULL ) ;
2012-06-22 18:50:14 +10:00
tmp_info = bp - > b_addr ;
2006-03-17 17:28:40 +11:00
ASSERT ( tmp_info - > magic = = save_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > forw ) = = drop_blk - > blkno ) ;
tmp_info - > forw = cpu_to_be32 ( save_blk - > blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , bp , 0 ,
2005-04-16 15:20:36 -07:00
sizeof ( * tmp_info ) - 1 ) ;
}
} else {
2012-03-22 05:15:13 +00:00
trace_xfs_da_unlink_forward ( args ) ;
2006-03-17 17:28:40 +11:00
save_info - > forw = drop_info - > forw ;
if ( drop_info - > forw ) {
2013-04-24 18:58:02 +10:00
error = xfs_da3_node_read ( args - > trans , args - > dp ,
2006-03-17 17:28:40 +11:00
be32_to_cpu ( drop_info - > forw ) ,
2019-11-20 09:46:04 -08:00
& bp , args - > whichfork ) ;
2005-04-16 15:20:36 -07:00
if ( error )
2014-06-22 15:03:54 +10:00
return error ;
2005-04-16 15:20:36 -07:00
ASSERT ( bp ! = NULL ) ;
2012-06-22 18:50:14 +10:00
tmp_info = bp - > b_addr ;
2006-03-17 17:28:40 +11:00
ASSERT ( tmp_info - > magic = = save_info - > magic ) ;
ASSERT ( be32_to_cpu ( tmp_info - > back ) = = drop_blk - > blkno ) ;
tmp_info - > back = cpu_to_be32 ( save_blk - > blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , bp , 0 ,
2005-04-16 15:20:36 -07:00
sizeof ( * tmp_info ) - 1 ) ;
}
}
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( args - > trans , save_blk - > bp , 0 , sizeof ( * save_info ) - 1 ) ;
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Move a path " forward " or " !forward " one block at the current level .
*
* This routine will adjust a " path " to point to the next block
* " forward " ( higher hashvalues ) or " !forward " ( lower hashvals ) in the
* Btree , including updating pointers to the intermediate nodes between
* the new bottom and the root .
*/
int /* error */
2013-04-24 18:58:02 +10:00
xfs_da3_path_shift (
struct xfs_da_state * state ,
struct xfs_da_state_path * path ,
int forward ,
int release ,
int * result )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_state_blk * blk ;
struct xfs_da_blkinfo * info ;
struct xfs_da_args * args ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr nodehdr ;
2015-08-19 10:32:33 +10:00
struct xfs_buf * bp ;
2013-04-24 18:58:02 +10:00
xfs_dablk_t blkno = 0 ;
int level ;
int error ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp = state - > args - > dp ;
2005-04-16 15:20:36 -07:00
2012-11-12 22:53:53 +11:00
trace_xfs_da_path_shift ( state - > args ) ;
2005-04-16 15:20:36 -07:00
/*
* Roll up the Btree looking for the first block where our
* current index is not at the edge of the block . Note that
* we skip the bottom layer because we want the sibling block .
*/
args = state - > args ;
ASSERT ( args ! = NULL ) ;
ASSERT ( path ! = NULL ) ;
ASSERT ( ( path - > active > 0 ) & & ( path - > active < XFS_DA_NODE_MAXDEPTH ) ) ;
level = ( path - > active - 1 ) - 1 ; /* skip bottom layer in path */
2020-02-26 09:36:44 -08:00
for ( ; level > = 0 ; level - - ) {
blk = & path - > blk [ level ] ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr ,
blk - > bp - > b_addr ) ;
2013-04-24 18:58:02 +10:00
if ( forward & & ( blk - > index < nodehdr . count - 1 ) ) {
2005-04-16 15:20:36 -07:00
blk - > index + + ;
2019-11-08 14:57:48 -08:00
blkno = be32_to_cpu ( nodehdr . btree [ blk - > index ] . before ) ;
2005-04-16 15:20:36 -07:00
break ;
} else if ( ! forward & & ( blk - > index > 0 ) ) {
blk - > index - - ;
2019-11-08 14:57:48 -08:00
blkno = be32_to_cpu ( nodehdr . btree [ blk - > index ] . before ) ;
2005-04-16 15:20:36 -07:00
break ;
}
}
if ( level < 0 ) {
2014-06-25 14:58:08 +10:00
* result = - ENOENT ; /* we're out of our tree */
2008-05-21 16:42:05 +10:00
ASSERT ( args - > op_flags & XFS_DA_OP_OKNOENT ) ;
2014-06-22 15:03:54 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*
* Roll down the edge of the subtree until we reach the
* same depth we were at originally .
*/
for ( blk + + , level + + ; level < path - > active ; blk + + , level + + ) {
/*
2015-08-19 10:32:33 +10:00
* Read the next child block into a local buffer .
2005-04-16 15:20:36 -07:00
*/
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( args - > trans , dp , blkno , & bp ,
2015-08-19 10:32:33 +10:00
args - > whichfork ) ;
if ( error )
return error ;
2005-04-16 15:20:36 -07:00
/*
2015-08-19 10:32:33 +10:00
* Release the old block ( if it ' s dirty , the trans doesn ' t
* actually let go ) and swap the local buffer into the path
* structure . This ensures failure of the above read doesn ' t set
* a NULL buffer in an active slot in the path .
2005-04-16 15:20:36 -07:00
*/
2015-08-19 10:32:33 +10:00
if ( release )
xfs_trans_brelse ( args - > trans , blk - > bp ) ;
2005-04-16 15:20:36 -07:00
blk - > blkno = blkno ;
2015-08-19 10:32:33 +10:00
blk - > bp = bp ;
2012-06-22 18:50:14 +10:00
info = blk - > bp - > b_addr ;
2011-07-08 14:36:05 +02:00
ASSERT ( info - > magic = = cpu_to_be16 ( XFS_DA_NODE_MAGIC ) | |
2013-04-24 18:58:02 +10:00
info - > magic = = cpu_to_be16 ( XFS_DA3_NODE_MAGIC ) | |
2011-07-08 14:36:05 +02:00
info - > magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
2013-04-12 07:30:21 +10:00
info - > magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) | |
2013-04-24 18:58:55 +10:00
info - > magic = = cpu_to_be16 ( XFS_ATTR_LEAF_MAGIC ) | |
info - > magic = = cpu_to_be16 ( XFS_ATTR3_LEAF_MAGIC ) ) ;
2013-04-24 18:58:02 +10:00
/*
* Note : we flatten the magic number to a single type so we
* don ' t have to compare against crc / non - crc types elsewhere .
*/
switch ( be16_to_cpu ( info - > magic ) ) {
case XFS_DA_NODE_MAGIC :
case XFS_DA3_NODE_MAGIC :
blk - > magic = XFS_DA_NODE_MAGIC ;
2019-11-08 14:57:48 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & nodehdr ,
bp - > b_addr ) ;
btree = nodehdr . btree ;
2013-04-24 18:58:02 +10:00
blk - > hashval = be32_to_cpu ( btree [ nodehdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
if ( forward )
blk - > index = 0 ;
else
2013-04-24 18:58:02 +10:00
blk - > index = nodehdr . count - 1 ;
blkno = be32_to_cpu ( btree [ blk - > index ] . before ) ;
break ;
case XFS_ATTR_LEAF_MAGIC :
2013-04-24 18:58:55 +10:00
case XFS_ATTR3_LEAF_MAGIC :
2013-04-24 18:58:02 +10:00
blk - > magic = XFS_ATTR_LEAF_MAGIC ;
2005-04-16 15:20:36 -07:00
ASSERT ( level = = path - > active - 1 ) ;
blk - > index = 0 ;
2013-10-29 22:11:50 +11:00
blk - > hashval = xfs_attr_leaf_lasthash ( blk - > bp , NULL ) ;
2013-04-24 18:58:02 +10:00
break ;
case XFS_DIR2_LEAFN_MAGIC :
case XFS_DIR3_LEAFN_MAGIC :
blk - > magic = XFS_DIR2_LEAFN_MAGIC ;
ASSERT ( level = = path - > active - 1 ) ;
blk - > index = 0 ;
2017-06-16 11:00:13 -07:00
blk - > hashval = xfs_dir2_leaf_lasthash ( args - > dp ,
blk - > bp , NULL ) ;
2013-04-24 18:58:02 +10:00
break ;
default :
ASSERT ( 0 ) ;
break ;
2005-04-16 15:20:36 -07:00
}
}
* result = 0 ;
2013-04-24 18:58:02 +10:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
/*========================================================================
* Utility routines .
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = */
/*
* Implement a simple hash on a character string .
* Rotate the hash value by 7 bits , then XOR each character in .
* This is implemented with some source - level loop unrolling .
*/
xfs_dahash_t
2017-06-16 11:00:05 -07:00
xfs_da_hashname ( const uint8_t * name , int namelen )
2005-04-16 15:20:36 -07:00
{
xfs_dahash_t hash ;
/*
* Do four characters at a time as long as we can .
*/
for ( hash = 0 ; namelen > = 4 ; namelen - = 4 , name + = 4 )
hash = ( name [ 0 ] < < 21 ) ^ ( name [ 1 ] < < 14 ) ^ ( name [ 2 ] < < 7 ) ^
( name [ 3 ] < < 0 ) ^ rol32 ( hash , 7 * 4 ) ;
/*
* Now do the rest of the characters .
*/
switch ( namelen ) {
case 3 :
return ( name [ 0 ] < < 14 ) ^ ( name [ 1 ] < < 7 ) ^ ( name [ 2 ] < < 0 ) ^
rol32 ( hash , 7 * 3 ) ;
case 2 :
return ( name [ 0 ] < < 7 ) ^ ( name [ 1 ] < < 0 ) ^ rol32 ( hash , 7 * 2 ) ;
case 1 :
return ( name [ 0 ] < < 0 ) ^ rol32 ( hash , 7 * 1 ) ;
2005-11-02 15:12:28 +11:00
default : /* case 0: */
2005-04-16 15:20:36 -07:00
return hash ;
}
}
2008-05-21 16:41:01 +10:00
enum xfs_dacmp
xfs_da_compname (
struct xfs_da_args * args ,
2010-01-20 10:47:17 +11:00
const unsigned char * name ,
int len )
2008-05-21 16:41:01 +10:00
{
return ( args - > namelen = = len & & memcmp ( args - > name , name , len ) = = 0 ) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT ;
}
2005-04-16 15:20:36 -07:00
int
2011-07-13 13:43:49 +02:00
xfs_da_grow_inode_int (
struct xfs_da_args * args ,
xfs_fileoff_t * bno ,
int count )
2005-04-16 15:20:36 -07:00
{
2011-07-13 13:43:49 +02:00
struct xfs_trans * tp = args - > trans ;
struct xfs_inode * dp = args - > dp ;
int w = args - > whichfork ;
2021-03-29 11:11:40 -07:00
xfs_rfsblock_t nblks = dp - > i_nblocks ;
2011-07-13 13:43:49 +02:00
struct xfs_bmbt_irec map , * mapp ;
int nmap , error , got , i , mapi ;
2005-04-16 15:20:36 -07:00
/*
* Find a spot in the file space to put the new block .
*/
2011-07-13 13:43:49 +02:00
error = xfs_bmap_first_unused ( tp , dp , count , bno , w ) ;
if ( error )
2005-04-16 15:20:36 -07:00
return error ;
2011-07-13 13:43:49 +02:00
2005-04-16 15:20:36 -07:00
/*
* Try mapping it in one filesystem block .
*/
nmap = 1 ;
2011-09-18 20:40:52 +00:00
error = xfs_bmapi_write ( tp , dp , * bno , count ,
xfs_bmapi_aflag ( w ) | XFS_BMAPI_METADATA | XFS_BMAPI_CONTIG ,
2018-07-11 22:26:25 -07:00
args - > total , & map , & nmap ) ;
2011-07-13 13:43:49 +02:00
if ( error )
2005-04-16 15:20:36 -07:00
return error ;
2011-07-13 13:43:49 +02:00
2005-04-16 15:20:36 -07:00
ASSERT ( nmap < = 1 ) ;
if ( nmap = = 1 ) {
mapp = & map ;
mapi = 1 ;
2011-07-13 13:43:49 +02:00
} else if ( nmap = = 0 & & count > 1 ) {
xfs_fileoff_t b ;
int c ;
/*
* If we didn ' t get it and the block might work if fragmented ,
* try without the CONTIG flag . Loop until we get it all .
*/
2019-08-26 12:06:22 -07:00
mapp = kmem_alloc ( sizeof ( * mapp ) * count , 0 ) ;
2011-07-13 13:43:49 +02:00
for ( b = * bno , mapi = 0 ; b < * bno + count ; ) {
2018-06-07 07:54:02 -07:00
nmap = min ( XFS_BMAP_MAX_NMAP , count ) ;
2011-07-13 13:43:49 +02:00
c = ( int ) ( * bno + count - b ) ;
2011-09-18 20:40:52 +00:00
error = xfs_bmapi_write ( tp , dp , b , c ,
xfs_bmapi_aflag ( w ) | XFS_BMAPI_METADATA ,
2018-07-11 22:26:25 -07:00
args - > total , & mapp [ mapi ] , & nmap ) ;
2011-07-13 13:43:49 +02:00
if ( error )
goto out_free_map ;
2005-04-16 15:20:36 -07:00
if ( nmap < 1 )
break ;
mapi + = nmap ;
b = mapp [ mapi - 1 ] . br_startoff +
mapp [ mapi - 1 ] . br_blockcount ;
}
} else {
mapi = 0 ;
mapp = NULL ;
}
2011-07-13 13:43:49 +02:00
2005-04-16 15:20:36 -07:00
/*
* Count the blocks we got , make sure it matches the total .
*/
for ( i = 0 , got = 0 ; i < mapi ; i + + )
got + = mapp [ i ] . br_blockcount ;
2011-07-13 13:43:49 +02:00
if ( got ! = count | | mapp [ 0 ] . br_startoff ! = * bno | |
2005-04-16 15:20:36 -07:00
mapp [ mapi - 1 ] . br_startoff + mapp [ mapi - 1 ] . br_blockcount ! =
2011-07-13 13:43:49 +02:00
* bno + count ) {
2014-06-25 14:58:08 +10:00
error = - ENOSPC ;
2011-07-13 13:43:49 +02:00
goto out_free_map ;
2005-04-16 15:20:36 -07:00
}
2011-07-13 13:43:49 +02:00
2008-10-30 17:38:12 +11:00
/* account for newly allocated blocks in reserved blocks total */
2021-03-29 11:11:40 -07:00
args - > total - = dp - > i_nblocks - nblks ;
2011-07-13 13:43:49 +02:00
out_free_map :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
return error ;
}
/*
* Add a block to the btree ahead of the file .
* Return the new block number to the caller .
*/
int
xfs_da_grow_inode (
struct xfs_da_args * args ,
xfs_dablk_t * new_blkno )
{
xfs_fileoff_t bno ;
int error ;
2012-03-22 05:15:13 +00:00
trace_xfs_da_grow_inode ( args ) ;
2014-06-06 15:14:11 +10:00
bno = args - > geo - > leafblk ;
error = xfs_da_grow_inode_int ( args , & bno , args - > geo - > fsbcount ) ;
2011-07-13 13:43:49 +02:00
if ( ! error )
* new_blkno = ( xfs_dablk_t ) bno ;
return error ;
2005-04-16 15:20:36 -07:00
}
/*
* Ick . We need to always be able to remove a btree block , even
* if there ' s no space reservation because the filesystem is full .
* This is called if xfs_bunmapi on a btree block fails due to ENOSPC .
* It swaps the target block with the last block in the file . The
* last block in the file can always be removed since it can ' t cause
* a bmap btree split to do that .
*/
STATIC int
2013-04-24 18:58:02 +10:00
xfs_da3_swap_lastblock (
struct xfs_da_args * args ,
xfs_dablk_t * dead_blknop ,
struct xfs_buf * * dead_bufp )
2005-04-16 15:20:36 -07:00
{
2013-04-24 18:58:02 +10:00
struct xfs_da_blkinfo * dead_info ;
struct xfs_da_blkinfo * sib_info ;
struct xfs_da_intnode * par_node ;
struct xfs_da_intnode * dead_node ;
struct xfs_dir2_leaf * dead_leaf2 ;
struct xfs_da_node_entry * btree ;
struct xfs_da3_icnode_hdr par_hdr ;
2013-10-29 22:11:51 +11:00
struct xfs_inode * dp ;
2013-04-24 18:58:02 +10:00
struct xfs_trans * tp ;
struct xfs_mount * mp ;
struct xfs_buf * dead_buf ;
struct xfs_buf * last_buf ;
struct xfs_buf * sib_buf ;
struct xfs_buf * par_buf ;
xfs_dahash_t dead_hash ;
xfs_fileoff_t lastoff ;
xfs_dablk_t dead_blkno ;
xfs_dablk_t last_blkno ;
xfs_dablk_t sib_blkno ;
xfs_dablk_t par_blkno ;
int error ;
int w ;
int entno ;
int level ;
int dead_level ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_swap_lastblock ( args ) ;
2005-04-16 15:20:36 -07:00
dead_buf = * dead_bufp ;
dead_blkno = * dead_blknop ;
tp = args - > trans ;
2013-10-29 22:11:51 +11:00
dp = args - > dp ;
2005-04-16 15:20:36 -07:00
w = args - > whichfork ;
ASSERT ( w = = XFS_DATA_FORK ) ;
2013-10-29 22:11:51 +11:00
mp = dp - > i_mount ;
2014-06-06 15:11:18 +10:00
lastoff = args - > geo - > freeblk ;
2013-10-29 22:11:51 +11:00
error = xfs_bmap_last_before ( tp , dp , & lastoff , w ) ;
2005-04-16 15:20:36 -07:00
if ( error )
return error ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp , lastoff = = 0 ) )
2014-06-25 14:58:08 +10:00
return - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
/*
* Read the last block in the btree space .
*/
2014-06-06 15:14:11 +10:00
last_blkno = ( xfs_dablk_t ) lastoff - args - > geo - > fsbcount ;
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( tp , dp , last_blkno , & last_buf , w ) ;
2012-11-12 22:54:10 +11:00
if ( error )
2005-04-16 15:20:36 -07:00
return error ;
/*
* Copy the last block into the dead buffer and log it .
*/
2014-06-06 15:15:59 +10:00
memcpy ( dead_buf - > b_addr , last_buf - > b_addr , args - > geo - > blksize ) ;
xfs_trans_log_buf ( tp , dead_buf , 0 , args - > geo - > blksize - 1 ) ;
2012-06-22 18:50:14 +10:00
dead_info = dead_buf - > b_addr ;
2005-04-16 15:20:36 -07:00
/*
* Get values from the moved block .
*/
2013-04-12 07:30:21 +10:00
if ( dead_info - > magic = = cpu_to_be16 ( XFS_DIR2_LEAFN_MAGIC ) | |
dead_info - > magic = = cpu_to_be16 ( XFS_DIR3_LEAFN_MAGIC ) ) {
struct xfs_dir3_icleaf_hdr leafhdr ;
struct xfs_dir2_leaf_entry * ents ;
2005-04-16 15:20:36 -07:00
dead_leaf2 = ( xfs_dir2_leaf_t * ) dead_info ;
2019-11-08 14:57:49 -08:00
xfs_dir2_leaf_hdr_from_disk ( dp - > i_mount , & leafhdr ,
dead_leaf2 ) ;
2019-11-08 14:57:50 -08:00
ents = leafhdr . ents ;
2005-04-16 15:20:36 -07:00
dead_level = 0 ;
2013-04-12 07:30:21 +10:00
dead_hash = be32_to_cpu ( ents [ leafhdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
} else {
2013-04-24 18:58:02 +10:00
struct xfs_da3_icnode_hdr deadhdr ;
2005-04-16 15:20:36 -07:00
dead_node = ( xfs_da_intnode_t * ) dead_info ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & deadhdr , dead_node ) ;
2019-11-08 14:57:48 -08:00
btree = deadhdr . btree ;
2013-04-24 18:58:02 +10:00
dead_level = deadhdr . level ;
dead_hash = be32_to_cpu ( btree [ deadhdr . count - 1 ] . hashval ) ;
2005-04-16 15:20:36 -07:00
}
sib_buf = par_buf = NULL ;
/*
* If the moved block has a left sibling , fix up the pointers .
*/
2006-03-17 17:28:40 +11:00
if ( ( sib_blkno = be32_to_cpu ( dead_info - > back ) ) ) {
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( tp , dp , sib_blkno , & sib_buf , w ) ;
2012-11-12 22:54:10 +11:00
if ( error )
2005-04-16 15:20:36 -07:00
goto done ;
2012-06-22 18:50:14 +10:00
sib_info = sib_buf - > b_addr ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp ,
be32_to_cpu ( sib_info - > forw ) ! = last_blkno | |
sib_info - > magic ! = dead_info - > magic ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2006-03-17 17:28:40 +11:00
sib_info - > forw = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , sib_buf ,
2005-04-16 15:20:36 -07:00
XFS_DA_LOGRANGE ( sib_info , & sib_info - > forw ,
sizeof ( sib_info - > forw ) ) ) ;
sib_buf = NULL ;
}
/*
* If the moved block has a right sibling , fix up the pointers .
*/
2006-03-17 17:28:40 +11:00
if ( ( sib_blkno = be32_to_cpu ( dead_info - > forw ) ) ) {
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( tp , dp , sib_blkno , & sib_buf , w ) ;
2012-11-12 22:54:10 +11:00
if ( error )
2005-04-16 15:20:36 -07:00
goto done ;
2012-06-22 18:50:14 +10:00
sib_info = sib_buf - > b_addr ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp ,
be32_to_cpu ( sib_info - > back ) ! = last_blkno | |
sib_info - > magic ! = dead_info - > magic ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2006-03-17 17:28:40 +11:00
sib_info - > back = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , sib_buf ,
2005-04-16 15:20:36 -07:00
XFS_DA_LOGRANGE ( sib_info , & sib_info - > back ,
sizeof ( sib_info - > back ) ) ) ;
sib_buf = NULL ;
}
2014-06-06 15:11:18 +10:00
par_blkno = args - > geo - > leafblk ;
2005-04-16 15:20:36 -07:00
level = - 1 ;
/*
* Walk down the tree looking for the parent of the moved block .
*/
for ( ; ; ) {
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( tp , dp , par_blkno , & par_buf , w ) ;
2012-11-12 22:54:10 +11:00
if ( error )
2005-04-16 15:20:36 -07:00
goto done ;
2012-06-22 18:50:14 +10:00
par_node = par_buf - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & par_hdr , par_node ) ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp ,
level > = 0 & & level ! = par_hdr . level + 1 ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2013-04-24 18:58:02 +10:00
level = par_hdr . level ;
2019-11-08 14:57:48 -08:00
btree = par_hdr . btree ;
2005-04-16 15:20:36 -07:00
for ( entno = 0 ;
2013-04-24 18:58:02 +10:00
entno < par_hdr . count & &
be32_to_cpu ( btree [ entno ] . hashval ) < dead_hash ;
2005-04-16 15:20:36 -07:00
entno + + )
continue ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp , entno = = par_hdr . count ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2013-04-24 18:58:02 +10:00
par_blkno = be32_to_cpu ( btree [ entno ] . before ) ;
2005-04-16 15:20:36 -07:00
if ( level = = dead_level + 1 )
break ;
2012-06-22 18:50:14 +10:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-16 15:20:36 -07:00
par_buf = NULL ;
}
/*
* We ' re in the right parent block .
* Look for the right entry .
*/
for ( ; ; ) {
for ( ;
2013-04-24 18:58:02 +10:00
entno < par_hdr . count & &
be32_to_cpu ( btree [ entno ] . before ) ! = last_blkno ;
2005-04-16 15:20:36 -07:00
entno + + )
continue ;
2013-04-24 18:58:02 +10:00
if ( entno < par_hdr . count )
2005-04-16 15:20:36 -07:00
break ;
2013-04-24 18:58:02 +10:00
par_blkno = par_hdr . forw ;
2012-06-22 18:50:14 +10:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-16 15:20:36 -07:00
par_buf = NULL ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp , par_blkno = = 0 ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2019-11-20 09:46:04 -08:00
error = xfs_da3_node_read ( tp , dp , par_blkno , & par_buf , w ) ;
2012-11-12 22:54:10 +11:00
if ( error )
2005-04-16 15:20:36 -07:00
goto done ;
2012-06-22 18:50:14 +10:00
par_node = par_buf - > b_addr ;
2019-11-08 14:53:00 -08:00
xfs_da3_node_hdr_from_disk ( dp - > i_mount , & par_hdr , par_node ) ;
2019-11-11 12:53:22 -08:00
if ( XFS_IS_CORRUPT ( mp , par_hdr . level ! = level ) ) {
2014-06-25 14:58:08 +10:00
error = - EFSCORRUPTED ;
2005-04-16 15:20:36 -07:00
goto done ;
}
2019-11-08 14:57:48 -08:00
btree = par_hdr . btree ;
2005-04-16 15:20:36 -07:00
entno = 0 ;
}
/*
* Update the parent entry pointing to the moved block .
*/
2013-04-24 18:58:02 +10:00
btree [ entno ] . before = cpu_to_be32 ( dead_blkno ) ;
2012-06-22 18:50:14 +10:00
xfs_trans_log_buf ( tp , par_buf ,
2013-04-24 18:58:02 +10:00
XFS_DA_LOGRANGE ( par_node , & btree [ entno ] . before ,
sizeof ( btree [ entno ] . before ) ) ) ;
2005-04-16 15:20:36 -07:00
* dead_blknop = last_blkno ;
* dead_bufp = last_buf ;
return 0 ;
done :
if ( par_buf )
2012-06-22 18:50:14 +10:00
xfs_trans_brelse ( tp , par_buf ) ;
2005-04-16 15:20:36 -07:00
if ( sib_buf )
2012-06-22 18:50:14 +10:00
xfs_trans_brelse ( tp , sib_buf ) ;
xfs_trans_brelse ( tp , last_buf ) ;
2005-04-16 15:20:36 -07:00
return error ;
}
/*
* Remove a btree block from a directory or attribute .
*/
int
2012-06-22 18:50:14 +10:00
xfs_da_shrink_inode (
2018-07-11 22:26:11 -07:00
struct xfs_da_args * args ,
xfs_dablk_t dead_blkno ,
struct xfs_buf * dead_buf )
2005-04-16 15:20:36 -07:00
{
2018-07-11 22:26:11 -07:00
struct xfs_inode * dp ;
int done , error , w , count ;
struct xfs_trans * tp ;
2005-04-16 15:20:36 -07:00
2012-03-22 05:15:13 +00:00
trace_xfs_da_shrink_inode ( args ) ;
2005-04-16 15:20:36 -07:00
dp = args - > dp ;
w = args - > whichfork ;
tp = args - > trans ;
2014-06-06 15:14:11 +10:00
count = args - > geo - > fsbcount ;
2005-04-16 15:20:36 -07:00
for ( ; ; ) {
/*
* Remove extents . If we get ENOSPC for a dir we have to move
* the last block to the place we want to kill .
*/
2013-04-24 18:58:02 +10:00
error = xfs_bunmapi ( tp , dp , dead_blkno , count ,
2018-07-11 22:26:25 -07:00
xfs_bmapi_aflag ( w ) , 0 , & done ) ;
2014-06-25 14:58:08 +10:00
if ( error = = - ENOSPC ) {
2005-04-16 15:20:36 -07:00
if ( w ! = XFS_DATA_FORK )
2006-06-20 13:04:51 +10:00
break ;
2013-04-24 18:58:02 +10:00
error = xfs_da3_swap_lastblock ( args , & dead_blkno ,
& dead_buf ) ;
if ( error )
2006-06-20 13:04:51 +10:00
break ;
} else {
2005-04-16 15:20:36 -07:00
break ;
}
}
2012-06-22 18:50:14 +10:00
xfs_trans_binval ( tp , dead_buf ) ;
2005-04-16 15:20:36 -07:00
return error ;
}
2012-06-22 18:50:13 +10:00
static int
xfs_dabuf_map (
struct xfs_inode * dp ,
xfs_dablk_t bno ,
2019-11-20 10:18:50 -08:00
unsigned int flags ,
2012-06-22 18:50:13 +10:00
int whichfork ,
2019-11-20 10:18:44 -08:00
struct xfs_buf_map * * mapp ,
2012-06-22 18:50:13 +10:00
int * nmaps )
{
struct xfs_mount * mp = dp - > i_mount ;
2019-11-20 09:46:00 -08:00
int nfsb = xfs_dabuf_nfsb ( mp , whichfork ) ;
2019-11-20 10:18:44 -08:00
struct xfs_bmbt_irec irec , * irecs = & irec ;
struct xfs_buf_map * map = * mapp ;
xfs_fileoff_t off = bno ;
int error = 0 , nirecs , i ;
2005-04-16 15:20:36 -07:00
2019-11-20 10:18:44 -08:00
if ( nfsb > 1 )
2019-11-20 09:46:00 -08:00
irecs = kmem_zalloc ( sizeof ( irec ) * nfsb , KM_NOFS ) ;
2019-11-20 10:18:44 -08:00
2019-11-20 09:46:00 -08:00
nirecs = nfsb ;
2019-11-20 10:18:44 -08:00
error = xfs_bmapi_read ( dp , bno , nfsb , irecs , & nirecs ,
xfs_bmapi_aflag ( whichfork ) ) ;
2019-11-20 09:46:00 -08:00
if ( error )
2019-11-20 10:18:44 -08:00
goto out_free_irecs ;
2012-06-22 18:50:13 +10:00
2019-11-20 10:18:44 -08:00
/*
* Use the caller provided map for the single map case , else allocate a
* larger one that needs to be free by the caller .
*/
if ( nirecs > 1 ) {
map = kmem_zalloc ( nirecs * sizeof ( struct xfs_buf_map ) , KM_NOFS ) ;
2020-03-13 13:17:40 -07:00
if ( ! map ) {
error = - ENOMEM ;
2019-11-20 10:18:44 -08:00
goto out_free_irecs ;
2020-03-13 13:17:40 -07:00
}
2019-11-20 10:18:44 -08:00
* mapp = map ;
}
2019-11-02 09:41:18 -07:00
2019-11-20 10:18:44 -08:00
for ( i = 0 ; i < nirecs ; i + + ) {
if ( irecs [ i ] . br_startblock = = HOLESTARTBLOCK | |
irecs [ i ] . br_startblock = = DELAYSTARTBLOCK )
goto invalid_mapping ;
if ( off ! = irecs [ i ] . br_startoff )
goto invalid_mapping ;
map [ i ] . bm_bn = XFS_FSB_TO_DADDR ( mp , irecs [ i ] . br_startblock ) ;
map [ i ] . bm_len = XFS_FSB_TO_BB ( mp , irecs [ i ] . br_blockcount ) ;
off + = irecs [ i ] . br_blockcount ;
}
if ( off ! = bno + nfsb )
goto invalid_mapping ;
* nmaps = nirecs ;
out_free_irecs :
if ( irecs ! = & irec )
kmem_free ( irecs ) ;
return error ;
invalid_mapping :
/* Caller ok with no mapping. */
2019-11-20 10:18:50 -08:00
if ( XFS_IS_CORRUPT ( mp , ! ( flags & XFS_DABUF_MAP_HOLE_OK ) ) ) {
2019-11-20 10:18:44 -08:00
error = - EFSCORRUPTED ;
2019-11-02 09:41:18 -07:00
if ( xfs_error_level > = XFS_ERRLEVEL_LOW ) {
2019-11-20 10:18:44 -08:00
xfs_alert ( mp , " %s: bno %u inode %llu " ,
__func__ , bno , dp - > i_ino ) ;
2019-11-02 09:41:18 -07:00
2019-11-20 10:18:44 -08:00
for ( i = 0 ; i < nirecs ; i + + ) {
2019-11-02 09:41:18 -07:00
xfs_alert ( mp ,
2011-03-07 10:08:35 +11:00
" [%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d " ,
2019-11-20 10:18:44 -08:00
i , irecs [ i ] . br_startoff ,
irecs [ i ] . br_startblock ,
irecs [ i ] . br_blockcount ,
2019-11-02 09:41:18 -07:00
irecs [ i ] . br_state ) ;
2005-04-16 15:20:36 -07:00
}
}
2019-11-20 10:18:44 -08:00
} else {
* nmaps = 0 ;
2005-04-16 15:20:36 -07:00
}
2019-11-20 10:18:44 -08:00
goto out_free_irecs ;
2012-06-22 18:50:13 +10:00
}
/*
* Get a buffer for the dir / attr block .
*/
int
xfs_da_get_buf (
2019-11-20 09:46:00 -08:00
struct xfs_trans * tp ,
2012-06-22 18:50:13 +10:00
struct xfs_inode * dp ,
xfs_dablk_t bno ,
2012-06-22 18:50:14 +10:00
struct xfs_buf * * bpp ,
2012-06-22 18:50:13 +10:00
int whichfork )
{
2019-11-20 09:46:00 -08:00
struct xfs_mount * mp = dp - > i_mount ;
2012-06-22 18:50:13 +10:00
struct xfs_buf * bp ;
2019-11-20 09:46:00 -08:00
struct xfs_buf_map map , * mapp = & map ;
int nmap = 1 ;
2012-06-22 18:50:13 +10:00
int error ;
* bpp = NULL ;
2019-11-20 09:46:05 -08:00
error = xfs_dabuf_map ( dp , bno , 0 , whichfork , & mapp , & nmap ) ;
2019-11-20 10:18:50 -08:00
if ( error | | nmap = = 0 )
2012-06-22 18:50:13 +10:00
goto out_free ;
2020-01-23 17:01:18 -08:00
error = xfs_trans_get_buf_map ( tp , mp - > m_ddev_targp , mapp , nmap , 0 , & bp ) ;
if ( error )
2012-06-22 18:50:13 +10:00
goto out_free ;
2012-06-22 18:50:14 +10:00
* bpp = bp ;
2012-06-22 18:50:13 +10:00
out_free :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
return error ;
}
/*
* Get a buffer for the dir / attr block , fill in the contents .
*/
int
xfs_da_read_buf (
2019-11-20 09:46:00 -08:00
struct xfs_trans * tp ,
2012-06-22 18:50:13 +10:00
struct xfs_inode * dp ,
xfs_dablk_t bno ,
2019-11-20 09:46:04 -08:00
unsigned int flags ,
2012-06-22 18:50:14 +10:00
struct xfs_buf * * bpp ,
2012-11-12 22:54:10 +11:00
int whichfork ,
2012-11-14 17:54:40 +11:00
const struct xfs_buf_ops * ops )
2012-06-22 18:50:13 +10:00
{
2019-11-20 09:46:00 -08:00
struct xfs_mount * mp = dp - > i_mount ;
2012-06-22 18:50:13 +10:00
struct xfs_buf * bp ;
2019-11-20 09:46:00 -08:00
struct xfs_buf_map map , * mapp = & map ;
int nmap = 1 ;
2012-06-22 18:50:13 +10:00
int error ;
* bpp = NULL ;
2019-11-20 09:46:04 -08:00
error = xfs_dabuf_map ( dp , bno , flags , whichfork , & mapp , & nmap ) ;
2019-11-20 10:18:50 -08:00
if ( error | | ! nmap )
2012-06-22 18:50:13 +10:00
goto out_free ;
2019-11-20 09:46:00 -08:00
error = xfs_trans_read_buf_map ( mp , tp , mp - > m_ddev_targp , mapp , nmap , 0 ,
& bp , ops ) ;
2012-06-22 18:50:13 +10:00
if ( error )
goto out_free ;
if ( whichfork = = XFS_ATTR_FORK )
xfs_buf_set_ref ( bp , XFS_ATTR_BTREE_REF ) ;
2005-04-16 15:20:36 -07:00
else
2012-06-22 18:50:13 +10:00
xfs_buf_set_ref ( bp , XFS_DIR_BTREE_REF ) ;
2012-06-22 18:50:14 +10:00
* bpp = bp ;
2012-06-22 18:50:13 +10:00
out_free :
2005-04-16 15:20:36 -07:00
if ( mapp ! = & map )
2008-05-19 16:31:57 +10:00
kmem_free ( mapp ) ;
2005-04-16 15:20:36 -07:00
2012-06-22 18:50:13 +10:00
return error ;
2005-04-16 15:20:36 -07:00
}
/*
* Readahead the dir / attr block .
*/
2017-02-02 15:13:58 -08:00
int
2005-04-16 15:20:36 -07:00
xfs_da_reada_buf (
2012-06-22 18:50:13 +10:00
struct xfs_inode * dp ,
xfs_dablk_t bno ,
2019-11-20 09:46:02 -08:00
unsigned int flags ,
2012-11-12 22:54:10 +11:00
int whichfork ,
2012-11-14 17:54:40 +11:00
const struct xfs_buf_ops * ops )
2005-04-16 15:20:36 -07:00
{
2012-06-22 18:50:13 +10:00
struct xfs_buf_map map ;
struct xfs_buf_map * mapp ;
int nmap ;
int error ;
mapp = & map ;
nmap = 1 ;
2019-11-20 09:46:02 -08:00
error = xfs_dabuf_map ( dp , bno , flags , whichfork , & mapp , & nmap ) ;
2019-11-20 10:18:50 -08:00
if ( error | | ! nmap )
2012-06-22 18:50:13 +10:00
goto out_free ;
2005-04-16 15:20:36 -07:00
2012-11-14 17:54:40 +11:00
xfs_buf_readahead_map ( dp - > i_mount - > m_ddev_targp , mapp , nmap , ops ) ;
2012-06-22 18:50:13 +10:00
out_free :
if ( mapp ! = & map )
kmem_free ( mapp ) ;
2017-02-02 15:13:58 -08:00
return error ;
2005-04-16 15:20:36 -07:00
}