2018-06-05 19:42:14 -07:00
// SPDX-License-Identifier: GPL-2.0
2005-04-16 15:20:36 -07:00
/*
2005-11-02 14:58:39 +11:00
* Copyright ( c ) 2000 - 2001 , 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-16 15:20:36 -07:00
*/
# include "xfs.h"
2005-11-02 14:38:42 +11:00
# include "xfs_fs.h"
2013-10-29 22:11:58 +11:00
# include "xfs_shared.h"
2013-10-23 10:51:50 +11:00
# include "xfs_format.h"
2013-10-23 10:50:10 +11:00
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-04-16 15:20:36 -07:00
# include "xfs_mount.h"
# include "xfs_btree.h"
2020-03-11 10:52:49 -07:00
# include "xfs_btree_staging.h"
2013-10-23 10:51:50 +11:00
# include "xfs_alloc_btree.h"
2005-04-16 15:20:36 -07:00
# include "xfs_alloc.h"
2012-04-29 10:39:43 +00:00
# include "xfs_extent_busy.h"
2005-04-16 15:20:36 -07:00
# include "xfs_error.h"
2009-12-14 23:14:59 +00:00
# include "xfs_trace.h"
2013-10-23 10:50:10 +11:00
# include "xfs_trans.h"
2021-06-02 10:48:24 +10:00
# include "xfs_ag.h"
2005-04-16 15:20:36 -07:00
2008-10-30 16:56:32 +11:00
STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor (
struct xfs_btree_cur * cur )
{
return xfs_allocbt_init_cursor ( cur - > bc_mp , cur - > bc_tp ,
2021-06-02 10:48:24 +10:00
cur - > bc_ag . agbp , cur - > bc_ag . pag , cur - > bc_btnum ) ;
2008-10-30 16:56:32 +11:00
}
2008-10-30 16:57:16 +11:00
STATIC void
xfs_allocbt_set_root (
2021-08-12 09:49:03 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * ptr ,
int inc )
2008-10-30 16:57:16 +11:00
{
2020-03-10 17:51:15 -07:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2020-03-10 08:57:29 -07:00
struct xfs_agf * agf = agbp - > b_addr ;
2008-10-30 16:57:16 +11:00
int btnum = cur - > bc_btnum ;
ASSERT ( ptr - > s ! = 0 ) ;
agf - > agf_roots [ btnum ] = ptr - > s ;
be32_add_cpu ( & agf - > agf_levels [ btnum ] , inc ) ;
2021-06-02 10:48:24 +10:00
cur - > bc_ag . pag - > pagf_levels [ btnum ] + = inc ;
2008-10-30 16:57:16 +11:00
xfs_alloc_log_agf ( cur - > bc_tp , agbp , XFS_AGF_ROOTS | XFS_AGF_LEVELS ) ;
}
2008-10-30 16:57:03 +11:00
STATIC int
xfs_allocbt_alloc_block (
2021-08-12 09:53:27 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * start ,
union xfs_btree_ptr * new ,
int * stat )
2008-10-30 16:57:03 +11:00
{
int error ;
xfs_agblock_t bno ;
/* Allocate the new block from the freelist. If we can't, give up. */
2020-03-10 17:51:15 -07:00
error = xfs_alloc_get_freelist ( cur - > bc_tp , cur - > bc_ag . agbp ,
2008-10-30 16:57:03 +11:00
& bno , 1 ) ;
2018-03-06 17:03:30 -08:00
if ( error )
2008-10-30 16:57:03 +11:00
return error ;
if ( bno = = NULLAGBLOCK ) {
* stat = 0 ;
return 0 ;
}
2011-04-24 19:06:16 +00:00
2021-04-28 15:05:50 -07:00
atomic64_inc ( & cur - > bc_mp - > m_allocbt_blks ) ;
2021-06-02 10:48:24 +10:00
xfs_extent_busy_reuse ( cur - > bc_mp , cur - > bc_ag . agbp - > b_pag , bno , 1 , false ) ;
2008-10-30 16:57:03 +11:00
new - > s = cpu_to_be32 ( bno ) ;
* stat = 1 ;
return 0 ;
}
2008-10-30 16:57:51 +11:00
STATIC int
xfs_allocbt_free_block (
struct xfs_btree_cur * cur ,
struct xfs_buf * bp )
{
2020-03-10 17:51:15 -07:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2008-10-30 16:57:51 +11:00
xfs_agblock_t bno ;
int error ;
2009-01-14 23:22:07 -06:00
bno = xfs_daddr_to_agbno ( cur - > bc_mp , XFS_BUF_ADDR ( bp ) ) ;
2008-10-30 16:57:51 +11:00
error = xfs_alloc_put_freelist ( cur - > bc_tp , agbp , NULL , bno , 1 ) ;
if ( error )
return error ;
2021-04-28 15:05:50 -07:00
atomic64_dec ( & cur - > bc_mp - > m_allocbt_blks ) ;
2021-06-02 10:48:24 +10:00
xfs_extent_busy_insert ( cur - > bc_tp , agbp - > b_pag , bno , 1 ,
2012-04-29 10:41:10 +00:00
XFS_EXTENT_BUSY_SKIP_DISCARD ) ;
2008-10-30 16:57:51 +11:00
return 0 ;
}
2005-04-16 15:20:36 -07:00
/*
2008-10-30 16:56:32 +11:00
* Update the longest extent in the AGF
2005-04-16 15:20:36 -07:00
*/
2008-10-30 16:56:32 +11:00
STATIC void
xfs_allocbt_update_lastrec (
struct xfs_btree_cur * cur ,
struct xfs_btree_block * block ,
union xfs_btree_rec * rec ,
int ptr ,
int reason )
2005-04-16 15:20:36 -07:00
{
2020-03-10 17:51:15 -07:00
struct xfs_agf * agf = cur - > bc_ag . agbp - > b_addr ;
2010-01-11 11:47:41 +00:00
struct xfs_perag * pag ;
2008-10-30 16:56:32 +11:00
__be32 len ;
2008-10-30 16:58:01 +11:00
int numrecs ;
2005-04-16 15:20:36 -07:00
2008-10-30 16:56:32 +11:00
ASSERT ( cur - > bc_btnum = = XFS_BTNUM_CNT ) ;
2005-04-16 15:20:36 -07:00
2008-10-30 16:56:32 +11:00
switch ( reason ) {
case LASTREC_UPDATE :
2005-04-16 15:20:36 -07:00
/*
2008-10-30 16:56:32 +11:00
* If this is the last leaf block and it ' s the last record ,
* then update the size of the longest extent in the AG .
2005-04-16 15:20:36 -07:00
*/
2008-10-30 16:56:32 +11:00
if ( ptr ! = xfs_btree_get_numrecs ( block ) )
return ;
len = rec - > alloc . ar_blockcount ;
break ;
2008-10-30 16:57:40 +11:00
case LASTREC_INSREC :
if ( be32_to_cpu ( rec - > alloc . ar_blockcount ) < =
be32_to_cpu ( agf - > agf_longest ) )
return ;
len = rec - > alloc . ar_blockcount ;
2008-10-30 16:58:01 +11:00
break ;
case LASTREC_DELREC :
numrecs = xfs_btree_get_numrecs ( block ) ;
if ( ptr < = numrecs )
return ;
ASSERT ( ptr = = numrecs + 1 ) ;
if ( numrecs ) {
xfs_alloc_rec_t * rrp ;
2008-10-30 17:11:40 +11:00
rrp = XFS_ALLOC_REC_ADDR ( cur - > bc_mp , block , numrecs ) ;
2008-10-30 16:58:01 +11:00
len = rrp - > ar_blockcount ;
} else {
len = 0 ;
}
2008-10-30 16:57:40 +11:00
break ;
2008-10-30 16:56:32 +11:00
default :
ASSERT ( 0 ) ;
return ;
2005-04-16 15:20:36 -07:00
}
2008-10-30 16:53:59 +11:00
2008-10-30 16:56:32 +11:00
agf - > agf_longest = len ;
2020-07-13 09:13:00 -07:00
pag = cur - > bc_ag . agbp - > b_pag ;
2010-01-11 11:47:41 +00:00
pag - > pagf_longest = be32_to_cpu ( len ) ;
2020-03-10 17:51:15 -07:00
xfs_alloc_log_agf ( cur - > bc_tp , cur - > bc_ag . agbp , XFS_AGF_LONGEST ) ;
2008-10-30 16:53:59 +11:00
}
2008-10-30 16:58:01 +11:00
STATIC int
xfs_allocbt_get_minrecs (
struct xfs_btree_cur * cur ,
int level )
{
return cur - > bc_mp - > m_alloc_mnr [ level ! = 0 ] ;
}
2008-10-30 16:55:23 +11:00
STATIC int
xfs_allocbt_get_maxrecs (
struct xfs_btree_cur * cur ,
int level )
{
return cur - > bc_mp - > m_alloc_mxr [ level ! = 0 ] ;
}
2008-10-30 16:56:09 +11:00
STATIC void
xfs_allocbt_init_key_from_rec (
2021-08-10 17:02:16 -07:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2008-10-30 16:56:09 +11:00
{
key - > alloc . ar_startblock = rec - > alloc . ar_startblock ;
key - > alloc . ar_blockcount = rec - > alloc . ar_blockcount ;
}
2017-03-28 14:56:34 -07:00
STATIC void
xfs_bnobt_init_high_key_from_rec (
2021-08-10 17:02:16 -07:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2017-03-28 14:56:34 -07:00
{
2021-08-10 17:02:16 -07:00
__u32 x ;
2017-03-28 14:56:34 -07:00
x = be32_to_cpu ( rec - > alloc . ar_startblock ) ;
x + = be32_to_cpu ( rec - > alloc . ar_blockcount ) - 1 ;
key - > alloc . ar_startblock = cpu_to_be32 ( x ) ;
key - > alloc . ar_blockcount = 0 ;
}
STATIC void
xfs_cntbt_init_high_key_from_rec (
2021-08-10 17:02:16 -07:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2017-03-28 14:56:34 -07:00
{
key - > alloc . ar_blockcount = rec - > alloc . ar_blockcount ;
key - > alloc . ar_startblock = 0 ;
}
2008-10-30 16:57:40 +11:00
STATIC void
xfs_allocbt_init_rec_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_rec * rec )
{
rec - > alloc . ar_startblock = cpu_to_be32 ( cur - > bc_rec . a . ar_startblock ) ;
rec - > alloc . ar_blockcount = cpu_to_be32 ( cur - > bc_rec . a . ar_blockcount ) ;
}
2008-10-30 16:56:09 +11:00
STATIC void
xfs_allocbt_init_ptr_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_ptr * ptr )
{
2020-03-10 17:51:15 -07:00
struct xfs_agf * agf = cur - > bc_ag . agbp - > b_addr ;
2008-10-30 16:56:09 +11:00
2021-06-02 10:48:24 +10:00
ASSERT ( cur - > bc_ag . pag - > pag_agno = = be32_to_cpu ( agf - > agf_seqno ) ) ;
2008-10-30 16:56:09 +11:00
ptr - > s = agf - > agf_roots [ cur - > bc_btnum ] ;
}
2017-06-16 11:00:05 -07:00
STATIC int64_t
2017-03-28 14:56:34 -07:00
xfs_bnobt_key_diff (
2021-08-10 17:02:15 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * key )
2008-10-30 16:56:09 +11:00
{
2021-08-10 17:02:15 -07:00
struct xfs_alloc_rec_incore * rec = & cur - > bc_rec . a ;
const struct xfs_alloc_rec * kp = & key - > alloc ;
2008-10-30 16:56:09 +11:00
2017-06-16 11:00:05 -07:00
return ( int64_t ) be32_to_cpu ( kp - > ar_startblock ) - rec - > ar_startblock ;
2017-03-28 14:56:34 -07:00
}
2017-06-16 11:00:05 -07:00
STATIC int64_t
2017-03-28 14:56:34 -07:00
xfs_cntbt_key_diff (
2021-08-10 17:02:15 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * key )
2017-03-28 14:56:34 -07:00
{
2021-08-10 17:02:15 -07:00
struct xfs_alloc_rec_incore * rec = & cur - > bc_rec . a ;
const struct xfs_alloc_rec * kp = & key - > alloc ;
int64_t diff ;
2008-10-30 16:56:09 +11:00
2017-06-16 11:00:05 -07:00
diff = ( int64_t ) be32_to_cpu ( kp - > ar_blockcount ) - rec - > ar_blockcount ;
2008-10-30 16:56:09 +11:00
if ( diff )
return diff ;
2017-06-16 11:00:05 -07:00
return ( int64_t ) be32_to_cpu ( kp - > ar_startblock ) - rec - > ar_startblock ;
2008-10-30 16:56:09 +11:00
}
2017-06-16 11:00:05 -07:00
STATIC int64_t
2017-03-28 14:56:34 -07:00
xfs_bnobt_diff_two_keys (
2021-08-10 17:02:15 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-28 14:56:34 -07:00
{
2017-06-16 11:00:05 -07:00
return ( int64_t ) be32_to_cpu ( k1 - > alloc . ar_startblock ) -
2017-03-28 14:56:34 -07:00
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
}
2017-06-16 11:00:05 -07:00
STATIC int64_t
2017-03-28 14:56:34 -07:00
xfs_cntbt_diff_two_keys (
2021-08-10 17:02:15 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-28 14:56:34 -07:00
{
2021-08-10 17:02:15 -07:00
int64_t diff ;
2017-03-28 14:56:34 -07:00
diff = be32_to_cpu ( k1 - > alloc . ar_blockcount ) -
be32_to_cpu ( k2 - > alloc . ar_blockcount ) ;
if ( diff )
return diff ;
return be32_to_cpu ( k1 - > alloc . ar_startblock ) -
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
}
2018-01-08 10:51:03 -08:00
static xfs_failaddr_t
2012-11-14 17:52:32 +11:00
xfs_allocbt_verify (
2012-11-12 22:54:08 +11:00
struct xfs_buf * bp )
{
2019-06-28 19:27:29 -07:00
struct xfs_mount * mp = bp - > b_mount ;
2012-11-12 22:54:08 +11:00
struct xfs_btree_block * block = XFS_BUF_TO_BLOCK ( bp ) ;
struct xfs_perag * pag = bp - > b_pag ;
2018-01-08 10:51:03 -08:00
xfs_failaddr_t fa ;
2012-11-12 22:54:08 +11:00
unsigned int level ;
2019-02-07 10:45:47 -08:00
xfs_btnum_t btnum = XFS_BTNUM_BNOi ;
if ( ! xfs_verify_magic ( bp , block - > bb_magic ) )
return __this_address ;
if ( xfs_sb_version_hascrc ( & mp - > m_sb ) ) {
fa = xfs_btree_sblock_v5hdr_verify ( bp ) ;
if ( fa )
return fa ;
}
2012-11-12 22:54:08 +11:00
/*
2019-02-07 10:45:47 -08:00
* The perag may not be attached during grow operations or fully
* initialized from the AGF during log recovery . Therefore we can only
* check against maximum tree depth from those contexts .
2013-04-21 14:53:46 -05:00
*
2019-02-07 10:45:47 -08:00
* Otherwise check against the per - tree limit . Peek at one of the
* verifier magic values to determine the type of tree we ' re verifying
* against .
2012-11-12 22:54:08 +11:00
*/
level = be16_to_cpu ( block - > bb_level ) ;
2019-02-07 10:45:47 -08:00
if ( bp - > b_ops - > magic [ 0 ] = = cpu_to_be32 ( XFS_ABTC_MAGIC ) )
btnum = XFS_BTNUM_CNTi ;
if ( pag & & pag - > pagf_init ) {
if ( level > = pag - > pagf_levels [ btnum ] )
2018-01-08 10:51:03 -08:00
return __this_address ;
2019-02-07 10:45:47 -08:00
} else if ( level > = mp - > m_ag_maxlevels )
2018-01-08 10:51:03 -08:00
return __this_address ;
2012-11-12 22:54:08 +11:00
2016-01-04 16:13:21 +11:00
return xfs_btree_sblock_verify ( bp , mp - > m_alloc_mxr [ level ! = 0 ] ) ;
2012-11-14 17:52:32 +11:00
}
2012-11-12 22:54:08 +11:00
2012-11-14 17:52:32 +11:00
static void
2012-11-14 17:54:40 +11:00
xfs_allocbt_read_verify (
2012-11-14 17:52:32 +11:00
struct xfs_buf * bp )
{
2018-01-08 10:51:03 -08:00
xfs_failaddr_t fa ;
2014-02-27 15:23:10 +11:00
if ( ! xfs_btree_sblock_verify_crc ( bp ) )
2018-01-08 10:51:03 -08:00
xfs_verifier_error ( bp , - EFSBADCRC , __this_address ) ;
else {
fa = xfs_allocbt_verify ( bp ) ;
if ( fa )
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
}
2014-02-27 15:23:10 +11:00
2018-01-08 10:51:02 -08:00
if ( bp - > b_error )
2014-02-27 15:23:10 +11:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2012-11-14 17:52:32 +11:00
}
2012-11-14 17:54:40 +11:00
static void
xfs_allocbt_write_verify (
2012-11-14 17:52:32 +11:00
struct xfs_buf * bp )
{
2018-01-08 10:51:03 -08:00
xfs_failaddr_t fa ;
fa = xfs_allocbt_verify ( bp ) ;
if ( fa ) {
2013-04-21 14:53:46 -05:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2018-01-08 10:51:03 -08:00
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
2014-02-27 15:14:31 +11:00
return ;
2013-04-21 14:53:46 -05:00
}
xfs_btree_sblock_calc_crc ( bp ) ;
2012-11-12 22:54:08 +11:00
}
2019-02-07 10:45:47 -08:00
const struct xfs_buf_ops xfs_bnobt_buf_ops = {
. name = " xfs_bnobt " ,
2019-02-07 10:45:47 -08:00
. magic = { cpu_to_be32 ( XFS_ABTB_MAGIC ) ,
cpu_to_be32 ( XFS_ABTB_CRC_MAGIC ) } ,
2012-11-14 17:54:40 +11:00
. verify_read = xfs_allocbt_read_verify ,
. verify_write = xfs_allocbt_write_verify ,
2018-01-08 10:51:08 -08:00
. verify_struct = xfs_allocbt_verify ,
2012-11-14 17:54:40 +11:00
} ;
2019-02-07 10:45:47 -08:00
const struct xfs_buf_ops xfs_cntbt_buf_ops = {
. name = " xfs_cntbt " ,
2019-02-07 10:45:47 -08:00
. magic = { cpu_to_be32 ( XFS_ABTC_MAGIC ) ,
cpu_to_be32 ( XFS_ABTC_CRC_MAGIC ) } ,
2019-02-07 10:45:47 -08:00
. verify_read = xfs_allocbt_read_verify ,
. verify_write = xfs_allocbt_write_verify ,
. verify_struct = xfs_allocbt_verify ,
} ;
2012-11-14 17:54:40 +11:00
2008-10-30 16:58:32 +11:00
STATIC int
2017-03-28 14:56:34 -07:00
xfs_bnobt_keys_inorder (
2021-08-10 17:02:17 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2008-10-30 16:58:32 +11:00
{
2017-03-28 14:56:34 -07:00
return be32_to_cpu ( k1 - > alloc . ar_startblock ) <
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
2008-10-30 16:58:32 +11:00
}
STATIC int
2017-03-28 14:56:34 -07:00
xfs_bnobt_recs_inorder (
2021-08-10 17:02:17 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_rec * r1 ,
const union xfs_btree_rec * r2 )
2008-10-30 16:58:32 +11:00
{
2017-03-28 14:56:34 -07:00
return be32_to_cpu ( r1 - > alloc . ar_startblock ) +
be32_to_cpu ( r1 - > alloc . ar_blockcount ) < =
be32_to_cpu ( r2 - > alloc . ar_startblock ) ;
}
STATIC int
xfs_cntbt_keys_inorder (
2021-08-10 17:02:17 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-28 14:56:34 -07:00
{
return be32_to_cpu ( k1 - > alloc . ar_blockcount ) <
be32_to_cpu ( k2 - > alloc . ar_blockcount ) | |
( k1 - > alloc . ar_blockcount = = k2 - > alloc . ar_blockcount & &
be32_to_cpu ( k1 - > alloc . ar_startblock ) <
be32_to_cpu ( k2 - > alloc . ar_startblock ) ) ;
2008-10-30 16:58:32 +11:00
}
2017-03-28 14:56:34 -07:00
STATIC int
xfs_cntbt_recs_inorder (
2021-08-10 17:02:17 -07:00
struct xfs_btree_cur * cur ,
const union xfs_btree_rec * r1 ,
const union xfs_btree_rec * r2 )
2017-03-28 14:56:34 -07:00
{
return be32_to_cpu ( r1 - > alloc . ar_blockcount ) <
be32_to_cpu ( r2 - > alloc . ar_blockcount ) | |
( r1 - > alloc . ar_blockcount = = r2 - > alloc . ar_blockcount & &
be32_to_cpu ( r1 - > alloc . ar_startblock ) <
be32_to_cpu ( r2 - > alloc . ar_startblock ) ) ;
}
static const struct xfs_btree_ops xfs_bnobt_ops = {
2008-10-30 16:55:34 +11:00
. rec_len = sizeof ( xfs_alloc_rec_t ) ,
. key_len = sizeof ( xfs_alloc_key_t ) ,
2008-10-30 16:53:59 +11:00
. dup_cursor = xfs_allocbt_dup_cursor ,
2008-10-30 16:57:16 +11:00
. set_root = xfs_allocbt_set_root ,
2008-10-30 16:57:03 +11:00
. alloc_block = xfs_allocbt_alloc_block ,
2008-10-30 16:57:51 +11:00
. free_block = xfs_allocbt_free_block ,
2008-10-30 16:56:32 +11:00
. update_lastrec = xfs_allocbt_update_lastrec ,
2008-10-30 16:58:01 +11:00
. get_minrecs = xfs_allocbt_get_minrecs ,
2008-10-30 16:55:23 +11:00
. get_maxrecs = xfs_allocbt_get_maxrecs ,
2008-10-30 16:56:09 +11:00
. init_key_from_rec = xfs_allocbt_init_key_from_rec ,
2017-03-28 14:56:34 -07:00
. init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec ,
2008-10-30 16:57:40 +11:00
. init_rec_from_cur = xfs_allocbt_init_rec_from_cur ,
2008-10-30 16:56:09 +11:00
. init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur ,
2017-03-28 14:56:34 -07:00
. key_diff = xfs_bnobt_key_diff ,
2019-02-07 10:45:47 -08:00
. buf_ops = & xfs_bnobt_buf_ops ,
2017-03-28 14:56:34 -07:00
. diff_two_keys = xfs_bnobt_diff_two_keys ,
. keys_inorder = xfs_bnobt_keys_inorder ,
. recs_inorder = xfs_bnobt_recs_inorder ,
} ;
static const struct xfs_btree_ops xfs_cntbt_ops = {
. rec_len = sizeof ( xfs_alloc_rec_t ) ,
. key_len = sizeof ( xfs_alloc_key_t ) ,
. dup_cursor = xfs_allocbt_dup_cursor ,
. set_root = xfs_allocbt_set_root ,
. alloc_block = xfs_allocbt_alloc_block ,
. free_block = xfs_allocbt_free_block ,
. update_lastrec = xfs_allocbt_update_lastrec ,
. get_minrecs = xfs_allocbt_get_minrecs ,
. get_maxrecs = xfs_allocbt_get_maxrecs ,
. init_key_from_rec = xfs_allocbt_init_key_from_rec ,
. init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec ,
. init_rec_from_cur = xfs_allocbt_init_rec_from_cur ,
. init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur ,
. key_diff = xfs_cntbt_key_diff ,
2019-02-07 10:45:47 -08:00
. buf_ops = & xfs_cntbt_buf_ops ,
2017-03-28 14:56:34 -07:00
. diff_two_keys = xfs_cntbt_diff_two_keys ,
. keys_inorder = xfs_cntbt_keys_inorder ,
. recs_inorder = xfs_cntbt_recs_inorder ,
2008-10-30 16:53:59 +11:00
} ;
2020-03-11 10:52:49 -07:00
/* Allocate most of a new allocation btree cursor. */
STATIC struct xfs_btree_cur *
xfs_allocbt_init_common (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
2021-06-02 10:48:24 +10:00
struct xfs_perag * pag ,
2020-03-11 10:52:49 -07:00
xfs_btnum_t btnum )
2008-10-30 16:53:59 +11:00
{
struct xfs_btree_cur * cur ;
ASSERT ( btnum = = XFS_BTNUM_BNO | | btnum = = XFS_BTNUM_CNT ) ;
2020-07-22 09:23:10 -07:00
cur = kmem_cache_zalloc ( xfs_btree_cur_zone , GFP_NOFS | __GFP_NOFAIL ) ;
2008-10-30 16:53:59 +11:00
cur - > bc_tp = tp ;
cur - > bc_mp = mp ;
cur - > bc_btnum = btnum ;
cur - > bc_blocklog = mp - > m_sb . sb_blocklog ;
2021-06-02 10:48:24 +10:00
cur - > bc_ag . abt . active = false ;
2011-07-08 14:34:18 +02:00
if ( btnum = = XFS_BTNUM_CNT ) {
2017-03-28 14:56:34 -07:00
cur - > bc_ops = & xfs_cntbt_ops ;
2020-03-11 10:52:49 -07:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_abtc_2 ) ;
2008-10-30 16:56:32 +11:00
cur - > bc_flags = XFS_BTREE_LASTREC_UPDATE ;
2011-07-08 14:34:18 +02:00
} else {
2017-03-28 14:56:34 -07:00
cur - > bc_ops = & xfs_bnobt_ops ;
2020-03-11 10:52:49 -07:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_abtb_2 ) ;
2011-07-08 14:34:18 +02:00
}
2008-10-30 16:53:59 +11:00
2021-06-02 10:48:24 +10:00
/* take a reference for the cursor */
atomic_inc ( & pag - > pag_ref ) ;
2021-06-02 10:48:24 +10:00
cur - > bc_ag . pag = pag ;
2008-10-30 16:53:59 +11:00
2013-04-21 14:53:46 -05:00
if ( xfs_sb_version_hascrc ( & mp - > m_sb ) )
cur - > bc_flags | = XFS_BTREE_CRC_BLOCKS ;
2008-10-30 16:53:59 +11:00
return cur ;
}
2008-10-30 17:11:19 +11:00
2020-03-11 10:52:49 -07:00
/*
* Allocate a new allocation btree cursor .
*/
struct xfs_btree_cur * /* new alloc btree cursor */
xfs_allocbt_init_cursor (
struct xfs_mount * mp , /* file system mount point */
struct xfs_trans * tp , /* transaction pointer */
struct xfs_buf * agbp , /* buffer for agf structure */
2021-06-02 10:48:24 +10:00
struct xfs_perag * pag ,
2020-03-11 10:52:49 -07:00
xfs_btnum_t btnum ) /* btree identifier */
{
struct xfs_agf * agf = agbp - > b_addr ;
struct xfs_btree_cur * cur ;
2021-06-02 10:48:24 +10:00
cur = xfs_allocbt_init_common ( mp , tp , pag , btnum ) ;
2020-03-11 10:52:49 -07:00
if ( btnum = = XFS_BTNUM_CNT )
cur - > bc_nlevels = be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_CNT ] ) ;
else
cur - > bc_nlevels = be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_BNO ] ) ;
cur - > bc_ag . agbp = agbp ;
return cur ;
}
/* Create a free space btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_allocbt_stage_cursor (
struct xfs_mount * mp ,
struct xbtree_afakeroot * afake ,
2021-06-02 10:48:24 +10:00
struct xfs_perag * pag ,
2020-03-11 10:52:49 -07:00
xfs_btnum_t btnum )
{
struct xfs_btree_cur * cur ;
2021-06-02 10:48:24 +10:00
cur = xfs_allocbt_init_common ( mp , NULL , pag , btnum ) ;
2020-03-11 10:52:49 -07:00
xfs_btree_stage_afakeroot ( cur , afake ) ;
return cur ;
}
/*
* Install a new free space btree root . Caller is responsible for invalidating
* and freeing the old btree blocks .
*/
void
xfs_allocbt_commit_staged_btree (
struct xfs_btree_cur * cur ,
struct xfs_trans * tp ,
struct xfs_buf * agbp )
{
struct xfs_agf * agf = agbp - > b_addr ;
struct xbtree_afakeroot * afake = cur - > bc_ag . afake ;
ASSERT ( cur - > bc_flags & XFS_BTREE_STAGING ) ;
agf - > agf_roots [ cur - > bc_btnum ] = cpu_to_be32 ( afake - > af_root ) ;
agf - > agf_levels [ cur - > bc_btnum ] = cpu_to_be32 ( afake - > af_levels ) ;
xfs_alloc_log_agf ( tp , agbp , XFS_AGF_ROOTS | XFS_AGF_LEVELS ) ;
if ( cur - > bc_btnum = = XFS_BTNUM_BNO ) {
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_bnobt_ops ) ;
} else {
cur - > bc_flags | = XFS_BTREE_LASTREC_UPDATE ;
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_cntbt_ops ) ;
}
}
2008-10-30 17:11:19 +11:00
/*
* Calculate number of records in an alloc btree block .
*/
int
xfs_allocbt_maxrecs (
struct xfs_mount * mp ,
int blocklen ,
int leaf )
{
2008-10-30 17:14:34 +11:00
blocklen - = XFS_ALLOC_BLOCK_LEN ( mp ) ;
2008-10-30 17:11:19 +11:00
if ( leaf )
return blocklen / sizeof ( xfs_alloc_rec_t ) ;
return blocklen / ( sizeof ( xfs_alloc_key_t ) + sizeof ( xfs_alloc_ptr_t ) ) ;
}
2018-05-09 10:02:01 -07:00
/* Calculate the freespace btree size for some records. */
xfs_extlen_t
xfs_allocbt_calc_size (
struct xfs_mount * mp ,
unsigned long long len )
{
return xfs_btree_calc_size ( mp - > m_alloc_mnr , len ) ;
}