2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2001 , 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# include "xfs.h"
2005-11-02 06:38:42 +03:00
# include "xfs_fs.h"
2013-10-29 15:11:58 +04:00
# include "xfs_shared.h"
2013-10-23 03:51:50 +04:00
# include "xfs_format.h"
2013-10-23 03:50:10 +04:00
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-11-02 06:38:42 +03:00
# include "xfs_bit.h"
2005-04-17 02:20:36 +04:00
# include "xfs_mount.h"
# include "xfs_btree.h"
2020-03-11 21:01:04 +03:00
# include "xfs_btree_staging.h"
2005-04-17 02:20:36 +04:00
# include "xfs_ialloc.h"
2013-10-23 03:51:50 +04:00
# include "xfs_ialloc_btree.h"
2005-04-17 02:20:36 +04:00
# include "xfs_alloc.h"
# include "xfs_error.h"
2012-11-12 15:54:08 +04:00
# include "xfs_trace.h"
2013-10-23 03:50:10 +04:00
# include "xfs_trans.h"
2016-08-03 04:33:42 +03:00
# include "xfs_rmap.h"
2021-06-02 03:48:24 +03:00
# include "xfs_ag.h"
2005-04-17 02:20:36 +04:00
2021-09-28 00:26:19 +03:00
static struct kmem_cache * xfs_inobt_cur_cache ;
2021-09-23 22:21:37 +03:00
2008-10-30 08:58:01 +03:00
STATIC int
xfs_inobt_get_minrecs (
struct xfs_btree_cur * cur ,
int level )
{
2019-06-05 21:19:34 +03:00
return M_IGEO ( cur - > bc_mp ) - > inobt_mnr [ level ! = 0 ] ;
2008-10-30 08:58:01 +03:00
}
2005-04-17 02:20:36 +04:00
2008-10-30 08:53:59 +03:00
STATIC struct xfs_btree_cur *
xfs_inobt_dup_cursor (
struct xfs_btree_cur * cur )
{
return xfs_inobt_init_cursor ( cur - > bc_mp , cur - > bc_tp ,
2021-06-02 03:48:24 +03:00
cur - > bc_ag . agbp , cur - > bc_ag . pag , cur - > bc_btnum ) ;
2008-10-30 08:53:59 +03:00
}
2008-10-30 08:57:16 +03:00
STATIC void
xfs_inobt_set_root (
2021-08-12 19:49:03 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * nptr ,
int inc ) /* level change */
2008-10-30 08:57:16 +03:00
{
2020-03-11 03:51:15 +03:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2020-03-10 18:57:29 +03:00
struct xfs_agi * agi = agbp - > b_addr ;
2008-10-30 08:57:16 +03:00
agi - > agi_root = nptr - > s ;
be32_add_cpu ( & agi - > agi_level , inc ) ;
xfs_ialloc_log_agi ( cur - > bc_tp , agbp , XFS_AGI_ROOT | XFS_AGI_LEVEL ) ;
}
2014-04-24 10:00:52 +04:00
STATIC void
xfs_finobt_set_root (
2021-08-12 19:49:03 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * nptr ,
int inc ) /* level change */
2014-04-24 10:00:52 +04:00
{
2020-03-11 03:51:15 +03:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2020-03-10 18:57:29 +03:00
struct xfs_agi * agi = agbp - > b_addr ;
2014-04-24 10:00:52 +04:00
agi - > agi_free_root = nptr - > s ;
be32_add_cpu ( & agi - > agi_free_level , inc ) ;
xfs_ialloc_log_agi ( cur - > bc_tp , agbp ,
XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL ) ;
}
2020-08-17 19:58:01 +03:00
/* Update the inode btree block counter for this btree. */
static inline void
xfs_inobt_mod_blockcount (
struct xfs_btree_cur * cur ,
int howmuch )
{
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
struct xfs_agi * agi = agbp - > b_addr ;
2021-08-19 04:46:55 +03:00
if ( ! xfs_has_inobtcounts ( cur - > bc_mp ) )
2020-08-17 19:58:01 +03:00
return ;
if ( cur - > bc_btnum = = XFS_BTNUM_FINO )
be32_add_cpu ( & agi - > agi_fblocks , howmuch ) ;
else if ( cur - > bc_btnum = = XFS_BTNUM_INO )
be32_add_cpu ( & agi - > agi_iblocks , howmuch ) ;
xfs_ialloc_log_agi ( cur - > bc_tp , agbp , XFS_AGI_IBLOCKS ) ;
}
2008-10-30 08:57:03 +03:00
STATIC int
2017-01-25 18:49:35 +03:00
__xfs_inobt_alloc_block (
2021-08-12 19:53:27 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * start ,
union xfs_btree_ptr * new ,
int * stat ,
enum xfs_ag_resv_type resv )
2008-10-30 08:57:03 +03:00
{
xfs_alloc_arg_t args ; /* block allocation args */
int error ; /* error return value */
xfs_agblock_t sbno = be32_to_cpu ( start - > s ) ;
memset ( & args , 0 , sizeof ( args ) ) ;
args . tp = cur - > bc_tp ;
args . mp = cur - > bc_mp ;
2018-12-12 19:46:23 +03:00
args . oinfo = XFS_RMAP_OINFO_INOBT ;
2021-06-02 03:48:24 +03:00
args . fsbno = XFS_AGB_TO_FSB ( args . mp , cur - > bc_ag . pag - > pag_agno , sbno ) ;
2008-10-30 08:57:03 +03:00
args . minlen = 1 ;
args . maxlen = 1 ;
args . prod = 1 ;
args . type = XFS_ALLOCTYPE_NEAR_BNO ;
2017-01-25 18:49:35 +03:00
args . resv = resv ;
2008-10-30 08:57:03 +03:00
error = xfs_alloc_vextent ( & args ) ;
2018-03-07 04:03:30 +03:00
if ( error )
2008-10-30 08:57:03 +03:00
return error ;
2018-03-07 04:03:30 +03:00
2008-10-30 08:57:03 +03:00
if ( args . fsbno = = NULLFSBLOCK ) {
* stat = 0 ;
return 0 ;
}
ASSERT ( args . len = = 1 ) ;
new - > s = cpu_to_be32 ( XFS_FSB_TO_AGBNO ( args . mp , args . fsbno ) ) ;
* stat = 1 ;
2020-08-17 19:58:01 +03:00
xfs_inobt_mod_blockcount ( cur , 1 ) ;
2008-10-30 08:57:03 +03:00
return 0 ;
}
2017-01-25 18:49:35 +03:00
STATIC int
xfs_inobt_alloc_block (
2021-08-12 19:53:27 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * start ,
union xfs_btree_ptr * new ,
int * stat )
2017-01-25 18:49:35 +03:00
{
return __xfs_inobt_alloc_block ( cur , start , new , stat , XFS_AG_RESV_NONE ) ;
}
STATIC int
xfs_finobt_alloc_block (
2021-08-12 19:53:27 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * start ,
union xfs_btree_ptr * new ,
int * stat )
2017-01-25 18:49:35 +03:00
{
2019-02-14 20:33:15 +03:00
if ( cur - > bc_mp - > m_finobt_nores )
2018-01-13 01:07:21 +03:00
return xfs_inobt_alloc_block ( cur , start , new , stat ) ;
2017-01-25 18:49:35 +03:00
return __xfs_inobt_alloc_block ( cur , start , new , stat ,
XFS_AG_RESV_METADATA ) ;
}
2008-10-30 08:57:51 +03:00
STATIC int
2018-01-13 01:07:21 +03:00
__xfs_inobt_free_block (
2008-10-30 08:57:51 +03:00
struct xfs_btree_cur * cur ,
2018-01-13 01:07:21 +03:00
struct xfs_buf * bp ,
enum xfs_ag_resv_type resv )
2008-10-30 08:57:51 +03:00
{
2020-08-17 19:58:01 +03:00
xfs_inobt_mod_blockcount ( cur , - 1 ) ;
2016-02-08 06:58:07 +03:00
return xfs_free_extent ( cur - > bc_tp ,
2021-08-19 04:46:57 +03:00
XFS_DADDR_TO_FSB ( cur - > bc_mp , xfs_buf_daddr ( bp ) ) , 1 ,
2018-12-12 19:46:23 +03:00
& XFS_RMAP_OINFO_INOBT , resv ) ;
2018-01-13 01:07:21 +03:00
}
STATIC int
xfs_inobt_free_block (
struct xfs_btree_cur * cur ,
struct xfs_buf * bp )
{
return __xfs_inobt_free_block ( cur , bp , XFS_AG_RESV_NONE ) ;
}
STATIC int
xfs_finobt_free_block (
struct xfs_btree_cur * cur ,
struct xfs_buf * bp )
{
2019-02-14 20:33:15 +03:00
if ( cur - > bc_mp - > m_finobt_nores )
2018-01-13 01:07:21 +03:00
return xfs_inobt_free_block ( cur , bp ) ;
return __xfs_inobt_free_block ( cur , bp , XFS_AG_RESV_METADATA ) ;
2008-10-30 08:57:51 +03:00
}
2008-10-30 08:57:03 +03:00
2008-10-30 08:55:23 +03:00
STATIC int
xfs_inobt_get_maxrecs (
struct xfs_btree_cur * cur ,
int level )
{
2019-06-05 21:19:34 +03:00
return M_IGEO ( cur - > bc_mp ) - > inobt_mxr [ level ! = 0 ] ;
2008-10-30 08:55:23 +03:00
}
2008-10-30 08:56:09 +03:00
STATIC void
xfs_inobt_init_key_from_rec (
2021-08-11 03:02:16 +03:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2008-10-30 08:56:09 +03:00
{
key - > inobt . ir_startino = rec - > inobt . ir_startino ;
}
2017-06-16 21:00:08 +03:00
STATIC void
xfs_inobt_init_high_key_from_rec (
2021-08-11 03:02:16 +03:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2017-06-16 21:00:08 +03:00
{
2021-08-11 03:02:16 +03:00
__u32 x ;
2017-06-16 21:00:08 +03:00
x = be32_to_cpu ( rec - > inobt . ir_startino ) ;
x + = XFS_INODES_PER_CHUNK - 1 ;
key - > inobt . ir_startino = cpu_to_be32 ( x ) ;
}
2008-10-30 08:57:40 +03:00
STATIC void
xfs_inobt_init_rec_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_rec * rec )
{
rec - > inobt . ir_startino = cpu_to_be32 ( cur - > bc_rec . i . ir_startino ) ;
2021-08-19 04:46:37 +03:00
if ( xfs_has_sparseinodes ( cur - > bc_mp ) ) {
2015-05-29 02:03:04 +03:00
rec - > inobt . ir_u . sp . ir_holemask =
cpu_to_be16 ( cur - > bc_rec . i . ir_holemask ) ;
rec - > inobt . ir_u . sp . ir_count = cur - > bc_rec . i . ir_count ;
rec - > inobt . ir_u . sp . ir_freecount = cur - > bc_rec . i . ir_freecount ;
} else {
/* ir_holemask/ir_count not supported on-disk */
rec - > inobt . ir_u . f . ir_freecount =
cpu_to_be32 ( cur - > bc_rec . i . ir_freecount ) ;
}
2008-10-30 08:57:40 +03:00
rec - > inobt . ir_free = cpu_to_be64 ( cur - > bc_rec . i . ir_free ) ;
}
2008-10-30 08:56:09 +03:00
/*
2009-03-29 11:55:42 +04:00
* initial value of ptr for lookup
2008-10-30 08:56:09 +03:00
*/
STATIC void
xfs_inobt_init_ptr_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_ptr * ptr )
{
2020-03-11 03:51:15 +03:00
struct xfs_agi * agi = cur - > bc_ag . agbp - > b_addr ;
2008-10-30 08:56:09 +03:00
2021-06-02 03:48:24 +03:00
ASSERT ( cur - > bc_ag . pag - > pag_agno = = be32_to_cpu ( agi - > agi_seqno ) ) ;
2008-10-30 08:56:09 +03:00
ptr - > s = agi - > agi_root ;
}
2014-04-24 10:00:52 +04:00
STATIC void
xfs_finobt_init_ptr_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_ptr * ptr )
{
2020-03-11 03:51:15 +03:00
struct xfs_agi * agi = cur - > bc_ag . agbp - > b_addr ;
2014-04-24 10:00:52 +04:00
2021-06-02 03:48:24 +03:00
ASSERT ( cur - > bc_ag . pag - > pag_agno = = be32_to_cpu ( agi - > agi_seqno ) ) ;
2014-04-24 10:00:52 +04:00
ptr - > s = agi - > agi_free_root ;
}
2017-06-16 21:00:05 +03:00
STATIC int64_t
2008-10-30 08:56:09 +03:00
xfs_inobt_key_diff (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * key )
2008-10-30 08:56:09 +03:00
{
2017-06-16 21:00:05 +03:00
return ( int64_t ) be32_to_cpu ( key - > inobt . ir_startino ) -
2008-10-30 08:56:09 +03:00
cur - > bc_rec . i . ir_startino ;
}
2017-06-16 21:00:08 +03:00
STATIC int64_t
xfs_inobt_diff_two_keys (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-06-16 21:00:08 +03:00
{
return ( int64_t ) be32_to_cpu ( k1 - > inobt . ir_startino ) -
be32_to_cpu ( k2 - > inobt . ir_startino ) ;
}
2018-01-08 21:51:03 +03:00
static xfs_failaddr_t
2012-11-14 10:52:32 +04:00
xfs_inobt_verify (
2012-11-12 15:54:08 +04:00
struct xfs_buf * bp )
{
2019-06-29 05:27:29 +03:00
struct xfs_mount * mp = bp - > b_mount ;
2012-11-12 15:54:08 +04:00
struct xfs_btree_block * block = XFS_BUF_TO_BLOCK ( bp ) ;
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
2012-11-12 15:54:08 +04:00
unsigned int level ;
2019-02-07 21:45:46 +03:00
if ( ! xfs_verify_magic ( bp , block - > bb_magic ) )
return __this_address ;
2013-04-21 23:53:46 +04:00
/*
* During growfs operations , we can ' t verify the exact owner as the
* perag is not fully initialised and hence not attached to the buffer .
*
* Similarly , during log recovery we will have a perag structure
* attached , but the agi information will not yet have been initialised
* from the on disk AGI . We don ' t currently use any of this information ,
* but beware of the landmine ( i . e . need to check pag - > pagi_init ) if we
* ever do .
*/
2021-08-19 04:46:55 +03:00
if ( xfs_has_crc ( mp ) ) {
2018-01-08 21:51:03 +03:00
fa = xfs_btree_sblock_v5hdr_verify ( bp ) ;
if ( fa )
return fa ;
2013-04-21 23:53:46 +04:00
}
2012-11-12 15:54:08 +04:00
2016-01-04 08:13:21 +03:00
/* level verification */
2013-04-21 23:53:46 +04:00
level = be16_to_cpu ( block - > bb_level ) ;
2019-06-05 21:19:34 +03:00
if ( level > = M_IGEO ( mp ) - > inobt_maxlevels )
2018-01-08 21:51:03 +03:00
return __this_address ;
2013-04-21 23:53:46 +04:00
2019-06-05 21:19:34 +03:00
return xfs_btree_sblock_verify ( bp ,
M_IGEO ( mp ) - > inobt_mxr [ level ! = 0 ] ) ;
2012-11-14 10:52:32 +04:00
}
static void
2012-11-14 10:54:40 +04:00
xfs_inobt_read_verify (
2012-11-14 10:52:32 +04:00
struct xfs_buf * bp )
{
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
2014-02-27 08:23:10 +04:00
if ( ! xfs_btree_sblock_verify_crc ( bp ) )
2018-01-08 21:51:03 +03:00
xfs_verifier_error ( bp , - EFSBADCRC , __this_address ) ;
else {
fa = xfs_inobt_verify ( bp ) ;
if ( fa )
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
}
2014-02-27 08:23:10 +04:00
2018-01-08 21:51:02 +03:00
if ( bp - > b_error )
2014-02-27 08:23:10 +04:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2012-11-14 10:52:32 +04:00
}
2012-11-12 15:54:08 +04:00
2012-11-14 10:54:40 +04:00
static void
xfs_inobt_write_verify (
2012-11-14 10:52:32 +04:00
struct xfs_buf * bp )
{
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
fa = xfs_inobt_verify ( bp ) ;
if ( fa ) {
2013-04-21 23:53:46 +04:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2018-01-08 21:51:03 +03:00
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
2014-02-27 08:14:31 +04:00
return ;
2013-04-21 23:53:46 +04:00
}
xfs_btree_sblock_calc_crc ( bp ) ;
2012-11-12 15:54:08 +04:00
}
2012-11-14 10:54:40 +04:00
const struct xfs_buf_ops xfs_inobt_buf_ops = {
2016-01-04 08:10:19 +03:00
. name = " xfs_inobt " ,
2019-02-07 21:45:46 +03:00
. magic = { cpu_to_be32 ( XFS_IBT_MAGIC ) , cpu_to_be32 ( XFS_IBT_CRC_MAGIC ) } ,
2012-11-14 10:54:40 +04:00
. verify_read = xfs_inobt_read_verify ,
. verify_write = xfs_inobt_write_verify ,
2018-01-08 21:51:08 +03:00
. verify_struct = xfs_inobt_verify ,
2012-11-14 10:54:40 +04:00
} ;
2019-02-07 21:45:46 +03:00
const struct xfs_buf_ops xfs_finobt_buf_ops = {
. name = " xfs_finobt " ,
2019-02-07 21:45:46 +03:00
. magic = { cpu_to_be32 ( XFS_FIBT_MAGIC ) ,
cpu_to_be32 ( XFS_FIBT_CRC_MAGIC ) } ,
2019-02-07 21:45:46 +03:00
. verify_read = xfs_inobt_read_verify ,
. verify_write = xfs_inobt_write_verify ,
. verify_struct = xfs_inobt_verify ,
} ;
2008-10-30 08:58:32 +03:00
STATIC int
xfs_inobt_keys_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2008-10-30 08:58:32 +03:00
{
return be32_to_cpu ( k1 - > inobt . ir_startino ) <
be32_to_cpu ( k2 - > inobt . ir_startino ) ;
}
STATIC int
xfs_inobt_recs_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_rec * r1 ,
const union xfs_btree_rec * r2 )
2008-10-30 08:58:32 +03:00
{
return be32_to_cpu ( r1 - > inobt . ir_startino ) + XFS_INODES_PER_CHUNK < =
be32_to_cpu ( r2 - > inobt . ir_startino ) ;
}
2008-10-30 08:53:59 +03:00
static const struct xfs_btree_ops xfs_inobt_ops = {
2008-10-30 08:55:34 +03:00
. rec_len = sizeof ( xfs_inobt_rec_t ) ,
. key_len = sizeof ( xfs_inobt_key_t ) ,
2008-10-30 08:53:59 +03:00
. dup_cursor = xfs_inobt_dup_cursor ,
2008-10-30 08:57:16 +03:00
. set_root = xfs_inobt_set_root ,
2008-10-30 08:57:03 +03:00
. alloc_block = xfs_inobt_alloc_block ,
2008-10-30 08:57:51 +03:00
. free_block = xfs_inobt_free_block ,
2008-10-30 08:58:01 +03:00
. get_minrecs = xfs_inobt_get_minrecs ,
2008-10-30 08:55:23 +03:00
. get_maxrecs = xfs_inobt_get_maxrecs ,
2008-10-30 08:56:09 +03:00
. init_key_from_rec = xfs_inobt_init_key_from_rec ,
2017-06-16 21:00:08 +03:00
. init_high_key_from_rec = xfs_inobt_init_high_key_from_rec ,
2008-10-30 08:57:40 +03:00
. init_rec_from_cur = xfs_inobt_init_rec_from_cur ,
2008-10-30 08:56:09 +03:00
. init_ptr_from_cur = xfs_inobt_init_ptr_from_cur ,
. key_diff = xfs_inobt_key_diff ,
2012-11-14 10:54:40 +04:00
. buf_ops = & xfs_inobt_buf_ops ,
2017-06-16 21:00:08 +03:00
. diff_two_keys = xfs_inobt_diff_two_keys ,
2008-10-30 08:58:32 +03:00
. keys_inorder = xfs_inobt_keys_inorder ,
. recs_inorder = xfs_inobt_recs_inorder ,
2008-10-30 08:53:59 +03:00
} ;
2014-04-24 10:00:52 +04:00
static const struct xfs_btree_ops xfs_finobt_ops = {
. rec_len = sizeof ( xfs_inobt_rec_t ) ,
. key_len = sizeof ( xfs_inobt_key_t ) ,
. dup_cursor = xfs_inobt_dup_cursor ,
. set_root = xfs_finobt_set_root ,
2017-01-25 18:49:35 +03:00
. alloc_block = xfs_finobt_alloc_block ,
2018-01-13 01:07:21 +03:00
. free_block = xfs_finobt_free_block ,
2014-04-24 10:00:52 +04:00
. get_minrecs = xfs_inobt_get_minrecs ,
. get_maxrecs = xfs_inobt_get_maxrecs ,
. init_key_from_rec = xfs_inobt_init_key_from_rec ,
2017-06-16 21:00:08 +03:00
. init_high_key_from_rec = xfs_inobt_init_high_key_from_rec ,
2014-04-24 10:00:52 +04:00
. init_rec_from_cur = xfs_inobt_init_rec_from_cur ,
. init_ptr_from_cur = xfs_finobt_init_ptr_from_cur ,
. key_diff = xfs_inobt_key_diff ,
2019-02-07 21:45:46 +03:00
. buf_ops = & xfs_finobt_buf_ops ,
2017-06-16 21:00:08 +03:00
. diff_two_keys = xfs_inobt_diff_two_keys ,
2014-04-24 10:00:52 +04:00
. keys_inorder = xfs_inobt_keys_inorder ,
. recs_inorder = xfs_inobt_recs_inorder ,
} ;
2008-10-30 08:53:59 +03:00
/*
2020-03-11 21:01:04 +03:00
* Initialize a new inode btree cursor .
2008-10-30 08:53:59 +03:00
*/
2020-03-11 21:01:04 +03:00
static struct xfs_btree_cur *
xfs_inobt_init_common (
2008-10-30 08:53:59 +03:00
struct xfs_mount * mp , /* file system mount point */
struct xfs_trans * tp , /* transaction pointer */
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2014-04-24 10:00:50 +04:00
xfs_btnum_t btnum ) /* ialloc or free ino btree */
2008-10-30 08:53:59 +03:00
{
struct xfs_btree_cur * cur ;
2021-09-16 22:27:24 +03:00
cur = xfs_btree_alloc_cursor ( mp , tp , btnum ,
2021-09-23 22:21:37 +03:00
M_IGEO ( mp ) - > inobt_maxlevels , xfs_inobt_cur_cache ) ;
2014-04-24 10:00:52 +04:00
if ( btnum = = XFS_BTNUM_INO ) {
2016-12-05 06:38:58 +03:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_ibt_2 ) ;
2020-03-11 21:01:04 +03:00
cur - > bc_ops = & xfs_inobt_ops ;
2014-04-24 10:00:52 +04:00
} else {
2016-12-05 06:38:58 +03:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_fibt_2 ) ;
2020-03-11 21:01:04 +03:00
cur - > bc_ops = & xfs_finobt_ops ;
2014-04-24 10:00:52 +04:00
}
2021-08-19 04:46:37 +03:00
if ( xfs_has_crc ( mp ) )
2013-04-21 23:53:46 +04:00
cur - > bc_flags | = XFS_BTREE_CRC_BLOCKS ;
2008-10-30 08:53:59 +03:00
2021-06-02 03:48:24 +03:00
/* take a reference for the cursor */
atomic_inc ( & pag - > pag_ref ) ;
2021-06-02 03:48:24 +03:00
cur - > bc_ag . pag = pag ;
2020-03-11 21:01:04 +03:00
return cur ;
}
/* Create an inode btree cursor. */
struct xfs_btree_cur *
xfs_inobt_init_cursor (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
struct xfs_buf * agbp ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2020-03-11 21:01:04 +03:00
xfs_btnum_t btnum )
{
struct xfs_btree_cur * cur ;
struct xfs_agi * agi = agbp - > b_addr ;
2021-06-02 03:48:24 +03:00
cur = xfs_inobt_init_common ( mp , tp , pag , btnum ) ;
2020-03-11 21:01:04 +03:00
if ( btnum = = XFS_BTNUM_INO )
cur - > bc_nlevels = be32_to_cpu ( agi - > agi_level ) ;
else
cur - > bc_nlevels = be32_to_cpu ( agi - > agi_free_level ) ;
cur - > bc_ag . agbp = agbp ;
return cur ;
}
2008-10-30 08:53:59 +03:00
2020-03-11 21:01:04 +03:00
/* Create an inode btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_inobt_stage_cursor (
struct xfs_mount * mp ,
struct xbtree_afakeroot * afake ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2020-03-11 21:01:04 +03:00
xfs_btnum_t btnum )
{
struct xfs_btree_cur * cur ;
2021-06-02 03:48:24 +03:00
cur = xfs_inobt_init_common ( mp , NULL , pag , btnum ) ;
2020-03-11 21:01:04 +03:00
xfs_btree_stage_afakeroot ( cur , afake ) ;
2008-10-30 08:53:59 +03:00
return cur ;
}
2008-10-30 09:11:19 +03:00
2020-03-11 21:01:04 +03:00
/*
* Install a new inobt btree root . Caller is responsible for invalidating
* and freeing the old btree blocks .
*/
void
xfs_inobt_commit_staged_btree (
struct xfs_btree_cur * cur ,
struct xfs_trans * tp ,
struct xfs_buf * agbp )
{
struct xfs_agi * agi = agbp - > b_addr ;
struct xbtree_afakeroot * afake = cur - > bc_ag . afake ;
2020-08-26 20:48:50 +03:00
int fields ;
2020-03-11 21:01:04 +03:00
ASSERT ( cur - > bc_flags & XFS_BTREE_STAGING ) ;
if ( cur - > bc_btnum = = XFS_BTNUM_INO ) {
2020-08-26 20:48:50 +03:00
fields = XFS_AGI_ROOT | XFS_AGI_LEVEL ;
2020-03-11 21:01:04 +03:00
agi - > agi_root = cpu_to_be32 ( afake - > af_root ) ;
agi - > agi_level = cpu_to_be32 ( afake - > af_levels ) ;
2021-08-19 04:46:55 +03:00
if ( xfs_has_inobtcounts ( cur - > bc_mp ) ) {
2020-08-26 20:48:50 +03:00
agi - > agi_iblocks = cpu_to_be32 ( afake - > af_blocks ) ;
fields | = XFS_AGI_IBLOCKS ;
}
xfs_ialloc_log_agi ( tp , agbp , fields ) ;
2020-03-11 21:01:04 +03:00
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_inobt_ops ) ;
} else {
2020-08-26 20:48:50 +03:00
fields = XFS_AGI_FREE_ROOT | XFS_AGI_FREE_LEVEL ;
2020-03-11 21:01:04 +03:00
agi - > agi_free_root = cpu_to_be32 ( afake - > af_root ) ;
agi - > agi_free_level = cpu_to_be32 ( afake - > af_levels ) ;
2021-08-19 04:46:55 +03:00
if ( xfs_has_inobtcounts ( cur - > bc_mp ) ) {
2020-08-26 20:48:50 +03:00
agi - > agi_fblocks = cpu_to_be32 ( afake - > af_blocks ) ;
fields | = XFS_AGI_IBLOCKS ;
}
xfs_ialloc_log_agi ( tp , agbp , fields ) ;
2020-03-11 21:01:04 +03:00
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_finobt_ops ) ;
}
}
2021-09-23 20:32:06 +03:00
/* Calculate number of records in an inode btree block. */
static inline unsigned int
xfs_inobt_block_maxrecs (
unsigned int blocklen ,
bool leaf )
{
if ( leaf )
return blocklen / sizeof ( xfs_inobt_rec_t ) ;
return blocklen / ( sizeof ( xfs_inobt_key_t ) + sizeof ( xfs_inobt_ptr_t ) ) ;
}
2008-10-30 09:11:19 +03:00
/*
* Calculate number of records in an inobt btree block .
*/
int
xfs_inobt_maxrecs (
struct xfs_mount * mp ,
int blocklen ,
int leaf )
{
2008-10-30 09:14:34 +03:00
blocklen - = XFS_INOBT_BLOCK_LEN ( mp ) ;
2021-09-23 20:32:06 +03:00
return xfs_inobt_block_maxrecs ( blocklen , leaf ) ;
}
2008-10-30 09:11:19 +03:00
2021-09-23 20:32:06 +03:00
/*
* Maximum number of inode btree records per AG . Pretend that we can fill an
* entire AG completely full of inodes except for the AG headers .
*/
# define XFS_MAX_INODE_RECORDS \
( ( XFS_MAX_AG_BYTES - ( 4 * BBSIZE ) ) / XFS_DINODE_MIN_SIZE ) / \
XFS_INODES_PER_CHUNK
/* Compute the max possible height for the inode btree. */
static inline unsigned int
xfs_inobt_maxlevels_ondisk ( void )
{
unsigned int minrecs [ 2 ] ;
unsigned int blocklen ;
blocklen = min ( XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN ,
XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN ) ;
minrecs [ 0 ] = xfs_inobt_block_maxrecs ( blocklen , true ) / 2 ;
minrecs [ 1 ] = xfs_inobt_block_maxrecs ( blocklen , false ) / 2 ;
return xfs_btree_compute_maxlevels ( minrecs , XFS_MAX_INODE_RECORDS ) ;
}
/* Compute the max possible height for the free inode btree. */
static inline unsigned int
xfs_finobt_maxlevels_ondisk ( void )
{
unsigned int minrecs [ 2 ] ;
unsigned int blocklen ;
blocklen = XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN ;
minrecs [ 0 ] = xfs_inobt_block_maxrecs ( blocklen , true ) / 2 ;
minrecs [ 1 ] = xfs_inobt_block_maxrecs ( blocklen , false ) / 2 ;
return xfs_btree_compute_maxlevels ( minrecs , XFS_MAX_INODE_RECORDS ) ;
}
/* Compute the max possible height for either inode btree. */
unsigned int
xfs_iallocbt_maxlevels_ondisk ( void )
{
return max ( xfs_inobt_maxlevels_ondisk ( ) ,
xfs_finobt_maxlevels_ondisk ( ) ) ;
2008-10-30 09:11:19 +03:00
}
2015-05-29 02:09:05 +03:00
/*
* Convert the inode record holemask to an inode allocation bitmap . The inode
* allocation bitmap is inode granularity and specifies whether an inode is
* physically allocated on disk ( not whether the inode is considered allocated
* or free by the fs ) .
*
* A bit value of 1 means the inode is allocated , a value of 0 means it is free .
*/
uint64_t
xfs_inobt_irec_to_allocmask (
struct xfs_inobt_rec_incore * rec )
{
uint64_t bitmap = 0 ;
uint64_t inodespbit ;
int nextbit ;
uint allocbitmap ;
/*
* The holemask has 16 - bits for a 64 inode record . Therefore each
* holemask bit represents multiple inodes . Create a mask of bits to set
* in the allocmask for each holemask bit .
*/
inodespbit = ( 1 < < XFS_INODES_PER_HOLEMASK_BIT ) - 1 ;
/*
* Allocated inodes are represented by 0 bits in holemask . Invert the 0
* bits to 1 and convert to a uint so we can use xfs_next_bit ( ) . Mask
* anything beyond the 16 holemask bits since this casts to a larger
* type .
*/
allocbitmap = ~ rec - > ir_holemask & ( ( 1 < < XFS_INOBT_HOLEMASK_BITS ) - 1 ) ;
/*
* allocbitmap is the inverted holemask so every set bit represents
* allocated inodes . To expand from 16 - bit holemask granularity to
* 64 - bit ( e . g . , bit - per - inode ) , set inodespbit bits in the target
* bitmap for every holemask bit .
*/
nextbit = xfs_next_bit ( & allocbitmap , 1 , 0 ) ;
while ( nextbit ! = - 1 ) {
ASSERT ( nextbit < ( sizeof ( rec - > ir_holemask ) * NBBY ) ) ;
bitmap | = ( inodespbit < <
( nextbit * XFS_INODES_PER_HOLEMASK_BIT ) ) ;
nextbit = xfs_next_bit ( & allocbitmap , 1 , nextbit + 1 ) ;
}
return bitmap ;
}
2015-05-29 02:18:32 +03:00
# if defined(DEBUG) || defined(XFS_WARN)
/*
* Verify that an in - core inode record has a valid inode count .
*/
int
xfs_inobt_rec_check_count (
struct xfs_mount * mp ,
struct xfs_inobt_rec_incore * rec )
{
int inocount = 0 ;
int nextbit = 0 ;
uint64_t allocbmap ;
int wordsz ;
wordsz = sizeof ( allocbmap ) / sizeof ( unsigned int ) ;
allocbmap = xfs_inobt_irec_to_allocmask ( rec ) ;
nextbit = xfs_next_bit ( ( uint * ) & allocbmap , wordsz , nextbit ) ;
while ( nextbit ! = - 1 ) {
inocount + + ;
nextbit = xfs_next_bit ( ( uint * ) & allocbmap , wordsz ,
nextbit + 1 ) ;
}
if ( inocount ! = rec - > ir_count )
return - EFSCORRUPTED ;
return 0 ;
}
# endif /* DEBUG */
2017-01-25 18:49:35 +03:00
static xfs_extlen_t
xfs_inobt_max_size (
2022-07-07 12:13:17 +03:00
struct xfs_perag * pag )
2017-01-25 18:49:35 +03:00
{
2022-07-07 12:13:17 +03:00
struct xfs_mount * mp = pag - > pag_mount ;
xfs_agblock_t agblocks = pag - > block_count ;
2018-11-20 00:31:08 +03:00
2017-01-25 18:49:35 +03:00
/* Bail out if we're uninitialized, which can happen in mkfs. */
2019-06-05 21:19:34 +03:00
if ( M_IGEO ( mp ) - > inobt_mxr [ 0 ] = = 0 )
2017-01-25 18:49:35 +03:00
return 0 ;
2019-05-20 21:25:39 +03:00
/*
* The log is permanently allocated , so the space it occupies will
* never be available for the kinds of things that would require btree
* expansion . We therefore can pretend the space isn ' t there .
*/
2022-07-07 12:13:21 +03:00
if ( xfs_ag_contains_log ( mp , pag - > pag_agno ) )
2019-05-20 21:25:39 +03:00
agblocks - = mp - > m_sb . sb_logblocks ;
2019-06-05 21:19:34 +03:00
return xfs_btree_calc_size ( M_IGEO ( mp ) - > inobt_mnr ,
2018-11-20 00:31:08 +03:00
( uint64_t ) agblocks * mp - > m_sb . sb_inopblock /
XFS_INODES_PER_CHUNK ) ;
2017-01-25 18:49:35 +03:00
}
2019-07-02 19:39:38 +03:00
/* Read AGI and create inobt cursor. */
int
xfs_inobt_cur (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2019-07-02 19:39:38 +03:00
xfs_btnum_t which ,
struct xfs_btree_cur * * curpp ,
struct xfs_buf * * agi_bpp )
{
struct xfs_btree_cur * cur ;
int error ;
ASSERT ( * agi_bpp = = NULL ) ;
ASSERT ( * curpp = = NULL ) ;
2022-07-07 12:07:24 +03:00
error = xfs_ialloc_read_agi ( pag , tp , agi_bpp ) ;
2019-07-02 19:39:38 +03:00
if ( error )
return error ;
2021-06-02 03:48:24 +03:00
cur = xfs_inobt_init_cursor ( mp , tp , * agi_bpp , pag , which ) ;
2019-07-02 19:39:38 +03:00
* curpp = cur ;
return 0 ;
}
2017-01-25 18:49:35 +03:00
static int
xfs_inobt_count_blocks (
struct xfs_mount * mp ,
2018-07-30 08:37:08 +03:00
struct xfs_trans * tp ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2017-01-25 18:49:35 +03:00
xfs_btnum_t btnum ,
xfs_extlen_t * tree_blocks )
{
2019-07-02 19:39:38 +03:00
struct xfs_buf * agbp = NULL ;
struct xfs_btree_cur * cur = NULL ;
2017-01-25 18:49:35 +03:00
int error ;
2021-06-02 03:48:24 +03:00
error = xfs_inobt_cur ( mp , tp , pag , btnum , & cur , & agbp ) ;
2017-01-25 18:49:35 +03:00
if ( error )
return error ;
error = xfs_btree_count_blocks ( cur , tree_blocks ) ;
2018-07-19 22:26:31 +03:00
xfs_btree_del_cursor ( cur , error ) ;
2018-07-30 08:37:08 +03:00
xfs_trans_brelse ( tp , agbp ) ;
2017-01-25 18:49:35 +03:00
return error ;
}
2020-08-26 20:54:27 +03:00
/* Read finobt block count from AGI header. */
static int
xfs_finobt_read_blocks (
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2022-07-07 12:07:24 +03:00
struct xfs_trans * tp ,
2020-08-26 20:54:27 +03:00
xfs_extlen_t * tree_blocks )
{
struct xfs_buf * agbp ;
struct xfs_agi * agi ;
int error ;
2022-07-07 12:07:24 +03:00
error = xfs_ialloc_read_agi ( pag , tp , & agbp ) ;
2020-08-26 20:54:27 +03:00
if ( error )
return error ;
agi = agbp - > b_addr ;
* tree_blocks = be32_to_cpu ( agi - > agi_fblocks ) ;
xfs_trans_brelse ( tp , agbp ) ;
return 0 ;
}
2017-01-25 18:49:35 +03:00
/*
* Figure out how many blocks to reserve and how many are used by this btree .
*/
int
xfs_finobt_calc_reserves (
struct xfs_mount * mp ,
2018-07-30 08:37:08 +03:00
struct xfs_trans * tp ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2017-01-25 18:49:35 +03:00
xfs_extlen_t * ask ,
xfs_extlen_t * used )
{
xfs_extlen_t tree_len = 0 ;
int error ;
2021-08-19 04:46:37 +03:00
if ( ! xfs_has_finobt ( mp ) )
2017-01-25 18:49:35 +03:00
return 0 ;
2021-08-19 04:46:55 +03:00
if ( xfs_has_inobtcounts ( mp ) )
2022-07-07 12:07:24 +03:00
error = xfs_finobt_read_blocks ( pag , tp , & tree_len ) ;
2020-08-26 20:54:27 +03:00
else
2021-06-02 03:48:24 +03:00
error = xfs_inobt_count_blocks ( mp , tp , pag , XFS_BTNUM_FINO ,
2020-08-26 20:54:27 +03:00
& tree_len ) ;
2017-01-25 18:49:35 +03:00
if ( error )
return error ;
2022-07-07 12:13:17 +03:00
* ask + = xfs_inobt_max_size ( pag ) ;
2017-01-25 18:49:35 +03:00
* used + = tree_len ;
return 0 ;
}
2018-05-09 20:02:01 +03:00
/* Calculate the inobt btree size for some records. */
xfs_extlen_t
xfs_iallocbt_calc_size (
struct xfs_mount * mp ,
unsigned long long len )
{
2019-06-05 21:19:34 +03:00
return xfs_btree_calc_size ( M_IGEO ( mp ) - > inobt_mnr , len ) ;
2018-05-09 20:02:01 +03:00
}
2021-09-23 22:21:37 +03:00
int __init
xfs_inobt_init_cur_cache ( void )
{
xfs_inobt_cur_cache = kmem_cache_create ( " xfs_inobt_cur " ,
xfs_btree_cur_sizeof ( xfs_inobt_maxlevels_ondisk ( ) ) ,
0 , 0 , NULL ) ;
if ( ! xfs_inobt_cur_cache )
return - ENOMEM ;
return 0 ;
}
void
xfs_inobt_destroy_cur_cache ( void )
{
kmem_cache_destroy ( xfs_inobt_cur_cache ) ;
xfs_inobt_cur_cache = NULL ;
}