2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2001 , 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# include "xfs.h"
2005-11-02 06:38:42 +03:00
# include "xfs_fs.h"
2013-10-29 15:11:58 +04:00
# include "xfs_shared.h"
2013-10-23 03:51:50 +04:00
# include "xfs_format.h"
2013-10-23 03:50:10 +04:00
# include "xfs_log_format.h"
# include "xfs_trans_resv.h"
2005-04-17 02:20:36 +04:00
# include "xfs_mount.h"
# include "xfs_btree.h"
2020-03-11 20:52:49 +03:00
# include "xfs_btree_staging.h"
2013-10-23 03:51:50 +04:00
# include "xfs_alloc_btree.h"
2005-04-17 02:20:36 +04:00
# include "xfs_alloc.h"
2012-04-29 14:39:43 +04:00
# include "xfs_extent_busy.h"
2005-04-17 02:20:36 +04:00
# include "xfs_error.h"
2009-12-15 02:14:59 +03:00
# include "xfs_trace.h"
2013-10-23 03:50:10 +04:00
# include "xfs_trans.h"
2021-06-02 03:48:24 +03:00
# include "xfs_ag.h"
2005-04-17 02:20:36 +04:00
2021-09-28 00:26:19 +03:00
static struct kmem_cache * xfs_allocbt_cur_cache ;
2005-04-17 02:20:36 +04:00
2008-10-30 08:56:32 +03:00
STATIC struct xfs_btree_cur *
xfs_allocbt_dup_cursor (
struct xfs_btree_cur * cur )
{
return xfs_allocbt_init_cursor ( cur - > bc_mp , cur - > bc_tp ,
2021-06-02 03:48:24 +03:00
cur - > bc_ag . agbp , cur - > bc_ag . pag , cur - > bc_btnum ) ;
2008-10-30 08:56:32 +03:00
}
2008-10-30 08:57:16 +03:00
STATIC void
xfs_allocbt_set_root (
2021-08-12 19:49:03 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * ptr ,
int inc )
2008-10-30 08:57:16 +03:00
{
2020-03-11 03:51:15 +03:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2020-03-10 18:57:29 +03:00
struct xfs_agf * agf = agbp - > b_addr ;
2008-10-30 08:57:16 +03:00
int btnum = cur - > bc_btnum ;
ASSERT ( ptr - > s ! = 0 ) ;
agf - > agf_roots [ btnum ] = ptr - > s ;
be32_add_cpu ( & agf - > agf_levels [ btnum ] , inc ) ;
2021-06-02 03:48:24 +03:00
cur - > bc_ag . pag - > pagf_levels [ btnum ] + = inc ;
2008-10-30 08:57:16 +03:00
xfs_alloc_log_agf ( cur - > bc_tp , agbp , XFS_AGF_ROOTS | XFS_AGF_LEVELS ) ;
}
2008-10-30 08:57:03 +03:00
STATIC int
xfs_allocbt_alloc_block (
2021-08-12 19:53:27 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_ptr * start ,
union xfs_btree_ptr * new ,
int * stat )
2008-10-30 08:57:03 +03:00
{
int error ;
xfs_agblock_t bno ;
/* Allocate the new block from the freelist. If we can't, give up. */
2022-07-07 12:08:01 +03:00
error = xfs_alloc_get_freelist ( cur - > bc_ag . pag , cur - > bc_tp ,
cur - > bc_ag . agbp , & bno , 1 ) ;
2018-03-07 04:03:30 +03:00
if ( error )
2008-10-30 08:57:03 +03:00
return error ;
if ( bno = = NULLAGBLOCK ) {
* stat = 0 ;
return 0 ;
}
2011-04-24 23:06:16 +04:00
2021-04-29 01:05:50 +03:00
atomic64_inc ( & cur - > bc_mp - > m_allocbt_blks ) ;
2022-07-07 12:08:01 +03:00
xfs_extent_busy_reuse ( cur - > bc_mp , cur - > bc_ag . pag , bno , 1 , false ) ;
2008-10-30 08:57:03 +03:00
new - > s = cpu_to_be32 ( bno ) ;
* stat = 1 ;
return 0 ;
}
2008-10-30 08:57:51 +03:00
STATIC int
xfs_allocbt_free_block (
struct xfs_btree_cur * cur ,
struct xfs_buf * bp )
{
2020-03-11 03:51:15 +03:00
struct xfs_buf * agbp = cur - > bc_ag . agbp ;
2008-10-30 08:57:51 +03:00
xfs_agblock_t bno ;
int error ;
2021-08-19 04:46:57 +03:00
bno = xfs_daddr_to_agbno ( cur - > bc_mp , xfs_buf_daddr ( bp ) ) ;
2022-07-07 12:08:08 +03:00
error = xfs_alloc_put_freelist ( cur - > bc_ag . pag , cur - > bc_tp , agbp , NULL ,
bno , 1 ) ;
2008-10-30 08:57:51 +03:00
if ( error )
return error ;
2021-04-29 01:05:50 +03:00
atomic64_dec ( & cur - > bc_mp - > m_allocbt_blks ) ;
2021-06-02 03:48:24 +03:00
xfs_extent_busy_insert ( cur - > bc_tp , agbp - > b_pag , bno , 1 ,
2012-04-29 14:41:10 +04:00
XFS_EXTENT_BUSY_SKIP_DISCARD ) ;
2008-10-30 08:57:51 +03:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
2008-10-30 08:56:32 +03:00
* Update the longest extent in the AGF
2005-04-17 02:20:36 +04:00
*/
2008-10-30 08:56:32 +03:00
STATIC void
xfs_allocbt_update_lastrec (
2021-08-12 19:56:49 +03:00
struct xfs_btree_cur * cur ,
const struct xfs_btree_block * block ,
const union xfs_btree_rec * rec ,
int ptr ,
int reason )
2005-04-17 02:20:36 +04:00
{
2020-03-11 03:51:15 +03:00
struct xfs_agf * agf = cur - > bc_ag . agbp - > b_addr ;
2010-01-11 14:47:41 +03:00
struct xfs_perag * pag ;
2008-10-30 08:56:32 +03:00
__be32 len ;
2008-10-30 08:58:01 +03:00
int numrecs ;
2005-04-17 02:20:36 +04:00
2008-10-30 08:56:32 +03:00
ASSERT ( cur - > bc_btnum = = XFS_BTNUM_CNT ) ;
2005-04-17 02:20:36 +04:00
2008-10-30 08:56:32 +03:00
switch ( reason ) {
case LASTREC_UPDATE :
2005-04-17 02:20:36 +04:00
/*
2008-10-30 08:56:32 +03:00
* If this is the last leaf block and it ' s the last record ,
* then update the size of the longest extent in the AG .
2005-04-17 02:20:36 +04:00
*/
2008-10-30 08:56:32 +03:00
if ( ptr ! = xfs_btree_get_numrecs ( block ) )
return ;
len = rec - > alloc . ar_blockcount ;
break ;
2008-10-30 08:57:40 +03:00
case LASTREC_INSREC :
if ( be32_to_cpu ( rec - > alloc . ar_blockcount ) < =
be32_to_cpu ( agf - > agf_longest ) )
return ;
len = rec - > alloc . ar_blockcount ;
2008-10-30 08:58:01 +03:00
break ;
case LASTREC_DELREC :
numrecs = xfs_btree_get_numrecs ( block ) ;
if ( ptr < = numrecs )
return ;
ASSERT ( ptr = = numrecs + 1 ) ;
if ( numrecs ) {
xfs_alloc_rec_t * rrp ;
2008-10-30 09:11:40 +03:00
rrp = XFS_ALLOC_REC_ADDR ( cur - > bc_mp , block , numrecs ) ;
2008-10-30 08:58:01 +03:00
len = rrp - > ar_blockcount ;
} else {
len = 0 ;
}
2008-10-30 08:57:40 +03:00
break ;
2008-10-30 08:56:32 +03:00
default :
ASSERT ( 0 ) ;
return ;
2005-04-17 02:20:36 +04:00
}
2008-10-30 08:53:59 +03:00
2008-10-30 08:56:32 +03:00
agf - > agf_longest = len ;
2020-07-13 19:13:00 +03:00
pag = cur - > bc_ag . agbp - > b_pag ;
2010-01-11 14:47:41 +03:00
pag - > pagf_longest = be32_to_cpu ( len ) ;
2020-03-11 03:51:15 +03:00
xfs_alloc_log_agf ( cur - > bc_tp , cur - > bc_ag . agbp , XFS_AGF_LONGEST ) ;
2008-10-30 08:53:59 +03:00
}
2008-10-30 08:58:01 +03:00
STATIC int
xfs_allocbt_get_minrecs (
struct xfs_btree_cur * cur ,
int level )
{
return cur - > bc_mp - > m_alloc_mnr [ level ! = 0 ] ;
}
2008-10-30 08:55:23 +03:00
STATIC int
xfs_allocbt_get_maxrecs (
struct xfs_btree_cur * cur ,
int level )
{
return cur - > bc_mp - > m_alloc_mxr [ level ! = 0 ] ;
}
2008-10-30 08:56:09 +03:00
STATIC void
xfs_allocbt_init_key_from_rec (
2021-08-11 03:02:16 +03:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2008-10-30 08:56:09 +03:00
{
key - > alloc . ar_startblock = rec - > alloc . ar_startblock ;
key - > alloc . ar_blockcount = rec - > alloc . ar_blockcount ;
}
2017-03-29 00:56:34 +03:00
STATIC void
xfs_bnobt_init_high_key_from_rec (
2021-08-11 03:02:16 +03:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2017-03-29 00:56:34 +03:00
{
2021-08-11 03:02:16 +03:00
__u32 x ;
2017-03-29 00:56:34 +03:00
x = be32_to_cpu ( rec - > alloc . ar_startblock ) ;
x + = be32_to_cpu ( rec - > alloc . ar_blockcount ) - 1 ;
key - > alloc . ar_startblock = cpu_to_be32 ( x ) ;
key - > alloc . ar_blockcount = 0 ;
}
STATIC void
xfs_cntbt_init_high_key_from_rec (
2021-08-11 03:02:16 +03:00
union xfs_btree_key * key ,
const union xfs_btree_rec * rec )
2017-03-29 00:56:34 +03:00
{
key - > alloc . ar_blockcount = rec - > alloc . ar_blockcount ;
key - > alloc . ar_startblock = 0 ;
}
2008-10-30 08:57:40 +03:00
STATIC void
xfs_allocbt_init_rec_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_rec * rec )
{
rec - > alloc . ar_startblock = cpu_to_be32 ( cur - > bc_rec . a . ar_startblock ) ;
rec - > alloc . ar_blockcount = cpu_to_be32 ( cur - > bc_rec . a . ar_blockcount ) ;
}
2008-10-30 08:56:09 +03:00
STATIC void
xfs_allocbt_init_ptr_from_cur (
struct xfs_btree_cur * cur ,
union xfs_btree_ptr * ptr )
{
2020-03-11 03:51:15 +03:00
struct xfs_agf * agf = cur - > bc_ag . agbp - > b_addr ;
2008-10-30 08:56:09 +03:00
2021-06-02 03:48:24 +03:00
ASSERT ( cur - > bc_ag . pag - > pag_agno = = be32_to_cpu ( agf - > agf_seqno ) ) ;
2008-10-30 08:56:09 +03:00
ptr - > s = agf - > agf_roots [ cur - > bc_btnum ] ;
}
2017-06-16 21:00:05 +03:00
STATIC int64_t
2017-03-29 00:56:34 +03:00
xfs_bnobt_key_diff (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * key )
2008-10-30 08:56:09 +03:00
{
2021-08-11 03:02:15 +03:00
struct xfs_alloc_rec_incore * rec = & cur - > bc_rec . a ;
const struct xfs_alloc_rec * kp = & key - > alloc ;
2008-10-30 08:56:09 +03:00
2017-06-16 21:00:05 +03:00
return ( int64_t ) be32_to_cpu ( kp - > ar_startblock ) - rec - > ar_startblock ;
2017-03-29 00:56:34 +03:00
}
2017-06-16 21:00:05 +03:00
STATIC int64_t
2017-03-29 00:56:34 +03:00
xfs_cntbt_key_diff (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * key )
2017-03-29 00:56:34 +03:00
{
2021-08-11 03:02:15 +03:00
struct xfs_alloc_rec_incore * rec = & cur - > bc_rec . a ;
const struct xfs_alloc_rec * kp = & key - > alloc ;
int64_t diff ;
2008-10-30 08:56:09 +03:00
2017-06-16 21:00:05 +03:00
diff = ( int64_t ) be32_to_cpu ( kp - > ar_blockcount ) - rec - > ar_blockcount ;
2008-10-30 08:56:09 +03:00
if ( diff )
return diff ;
2017-06-16 21:00:05 +03:00
return ( int64_t ) be32_to_cpu ( kp - > ar_startblock ) - rec - > ar_startblock ;
2008-10-30 08:56:09 +03:00
}
2017-06-16 21:00:05 +03:00
STATIC int64_t
2017-03-29 00:56:34 +03:00
xfs_bnobt_diff_two_keys (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-29 00:56:34 +03:00
{
2017-06-16 21:00:05 +03:00
return ( int64_t ) be32_to_cpu ( k1 - > alloc . ar_startblock ) -
2017-03-29 00:56:34 +03:00
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
}
2017-06-16 21:00:05 +03:00
STATIC int64_t
2017-03-29 00:56:34 +03:00
xfs_cntbt_diff_two_keys (
2021-08-11 03:02:15 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-29 00:56:34 +03:00
{
2021-08-11 03:02:15 +03:00
int64_t diff ;
2017-03-29 00:56:34 +03:00
diff = be32_to_cpu ( k1 - > alloc . ar_blockcount ) -
be32_to_cpu ( k2 - > alloc . ar_blockcount ) ;
if ( diff )
return diff ;
return be32_to_cpu ( k1 - > alloc . ar_startblock ) -
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
}
2018-01-08 21:51:03 +03:00
static xfs_failaddr_t
2012-11-14 10:52:32 +04:00
xfs_allocbt_verify (
2012-11-12 15:54:08 +04:00
struct xfs_buf * bp )
{
2019-06-29 05:27:29 +03:00
struct xfs_mount * mp = bp - > b_mount ;
2012-11-12 15:54:08 +04:00
struct xfs_btree_block * block = XFS_BUF_TO_BLOCK ( bp ) ;
struct xfs_perag * pag = bp - > b_pag ;
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
2012-11-12 15:54:08 +04:00
unsigned int level ;
2019-02-07 21:45:47 +03:00
xfs_btnum_t btnum = XFS_BTNUM_BNOi ;
if ( ! xfs_verify_magic ( bp , block - > bb_magic ) )
return __this_address ;
2021-08-19 04:46:55 +03:00
if ( xfs_has_crc ( mp ) ) {
2019-02-07 21:45:47 +03:00
fa = xfs_btree_sblock_v5hdr_verify ( bp ) ;
if ( fa )
return fa ;
}
2012-11-12 15:54:08 +04:00
/*
2019-02-07 21:45:47 +03:00
* The perag may not be attached during grow operations or fully
* initialized from the AGF during log recovery . Therefore we can only
* check against maximum tree depth from those contexts .
2013-04-21 23:53:46 +04:00
*
2019-02-07 21:45:47 +03:00
* Otherwise check against the per - tree limit . Peek at one of the
* verifier magic values to determine the type of tree we ' re verifying
* against .
2012-11-12 15:54:08 +04:00
*/
level = be16_to_cpu ( block - > bb_level ) ;
2019-02-07 21:45:47 +03:00
if ( bp - > b_ops - > magic [ 0 ] = = cpu_to_be32 ( XFS_ABTC_MAGIC ) )
btnum = XFS_BTNUM_CNTi ;
if ( pag & & pag - > pagf_init ) {
if ( level > = pag - > pagf_levels [ btnum ] )
2018-01-08 21:51:03 +03:00
return __this_address ;
2021-10-13 20:02:19 +03:00
} else if ( level > = mp - > m_alloc_maxlevels )
2018-01-08 21:51:03 +03:00
return __this_address ;
2012-11-12 15:54:08 +04:00
2016-01-04 08:13:21 +03:00
return xfs_btree_sblock_verify ( bp , mp - > m_alloc_mxr [ level ! = 0 ] ) ;
2012-11-14 10:52:32 +04:00
}
2012-11-12 15:54:08 +04:00
2012-11-14 10:52:32 +04:00
static void
2012-11-14 10:54:40 +04:00
xfs_allocbt_read_verify (
2012-11-14 10:52:32 +04:00
struct xfs_buf * bp )
{
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
2014-02-27 08:23:10 +04:00
if ( ! xfs_btree_sblock_verify_crc ( bp ) )
2018-01-08 21:51:03 +03:00
xfs_verifier_error ( bp , - EFSBADCRC , __this_address ) ;
else {
fa = xfs_allocbt_verify ( bp ) ;
if ( fa )
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
}
2014-02-27 08:23:10 +04:00
2018-01-08 21:51:02 +03:00
if ( bp - > b_error )
2014-02-27 08:23:10 +04:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2012-11-14 10:52:32 +04:00
}
2012-11-14 10:54:40 +04:00
static void
xfs_allocbt_write_verify (
2012-11-14 10:52:32 +04:00
struct xfs_buf * bp )
{
2018-01-08 21:51:03 +03:00
xfs_failaddr_t fa ;
fa = xfs_allocbt_verify ( bp ) ;
if ( fa ) {
2013-04-21 23:53:46 +04:00
trace_xfs_btree_corrupt ( bp , _RET_IP_ ) ;
2018-01-08 21:51:03 +03:00
xfs_verifier_error ( bp , - EFSCORRUPTED , fa ) ;
2014-02-27 08:14:31 +04:00
return ;
2013-04-21 23:53:46 +04:00
}
xfs_btree_sblock_calc_crc ( bp ) ;
2012-11-12 15:54:08 +04:00
}
2019-02-07 21:45:47 +03:00
const struct xfs_buf_ops xfs_bnobt_buf_ops = {
. name = " xfs_bnobt " ,
2019-02-07 21:45:47 +03:00
. magic = { cpu_to_be32 ( XFS_ABTB_MAGIC ) ,
cpu_to_be32 ( XFS_ABTB_CRC_MAGIC ) } ,
2012-11-14 10:54:40 +04:00
. verify_read = xfs_allocbt_read_verify ,
. verify_write = xfs_allocbt_write_verify ,
2018-01-08 21:51:08 +03:00
. verify_struct = xfs_allocbt_verify ,
2012-11-14 10:54:40 +04:00
} ;
2019-02-07 21:45:47 +03:00
const struct xfs_buf_ops xfs_cntbt_buf_ops = {
. name = " xfs_cntbt " ,
2019-02-07 21:45:47 +03:00
. magic = { cpu_to_be32 ( XFS_ABTC_MAGIC ) ,
cpu_to_be32 ( XFS_ABTC_CRC_MAGIC ) } ,
2019-02-07 21:45:47 +03:00
. verify_read = xfs_allocbt_read_verify ,
. verify_write = xfs_allocbt_write_verify ,
. verify_struct = xfs_allocbt_verify ,
} ;
2012-11-14 10:54:40 +04:00
2008-10-30 08:58:32 +03:00
STATIC int
2017-03-29 00:56:34 +03:00
xfs_bnobt_keys_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2008-10-30 08:58:32 +03:00
{
2017-03-29 00:56:34 +03:00
return be32_to_cpu ( k1 - > alloc . ar_startblock ) <
be32_to_cpu ( k2 - > alloc . ar_startblock ) ;
2008-10-30 08:58:32 +03:00
}
STATIC int
2017-03-29 00:56:34 +03:00
xfs_bnobt_recs_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_rec * r1 ,
const union xfs_btree_rec * r2 )
2008-10-30 08:58:32 +03:00
{
2017-03-29 00:56:34 +03:00
return be32_to_cpu ( r1 - > alloc . ar_startblock ) +
be32_to_cpu ( r1 - > alloc . ar_blockcount ) < =
be32_to_cpu ( r2 - > alloc . ar_startblock ) ;
}
STATIC int
xfs_cntbt_keys_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_key * k1 ,
const union xfs_btree_key * k2 )
2017-03-29 00:56:34 +03:00
{
return be32_to_cpu ( k1 - > alloc . ar_blockcount ) <
be32_to_cpu ( k2 - > alloc . ar_blockcount ) | |
( k1 - > alloc . ar_blockcount = = k2 - > alloc . ar_blockcount & &
be32_to_cpu ( k1 - > alloc . ar_startblock ) <
be32_to_cpu ( k2 - > alloc . ar_startblock ) ) ;
2008-10-30 08:58:32 +03:00
}
2017-03-29 00:56:34 +03:00
STATIC int
xfs_cntbt_recs_inorder (
2021-08-11 03:02:17 +03:00
struct xfs_btree_cur * cur ,
const union xfs_btree_rec * r1 ,
const union xfs_btree_rec * r2 )
2017-03-29 00:56:34 +03:00
{
return be32_to_cpu ( r1 - > alloc . ar_blockcount ) <
be32_to_cpu ( r2 - > alloc . ar_blockcount ) | |
( r1 - > alloc . ar_blockcount = = r2 - > alloc . ar_blockcount & &
be32_to_cpu ( r1 - > alloc . ar_startblock ) <
be32_to_cpu ( r2 - > alloc . ar_startblock ) ) ;
}
static const struct xfs_btree_ops xfs_bnobt_ops = {
2008-10-30 08:55:34 +03:00
. rec_len = sizeof ( xfs_alloc_rec_t ) ,
. key_len = sizeof ( xfs_alloc_key_t ) ,
2008-10-30 08:53:59 +03:00
. dup_cursor = xfs_allocbt_dup_cursor ,
2008-10-30 08:57:16 +03:00
. set_root = xfs_allocbt_set_root ,
2008-10-30 08:57:03 +03:00
. alloc_block = xfs_allocbt_alloc_block ,
2008-10-30 08:57:51 +03:00
. free_block = xfs_allocbt_free_block ,
2008-10-30 08:56:32 +03:00
. update_lastrec = xfs_allocbt_update_lastrec ,
2008-10-30 08:58:01 +03:00
. get_minrecs = xfs_allocbt_get_minrecs ,
2008-10-30 08:55:23 +03:00
. get_maxrecs = xfs_allocbt_get_maxrecs ,
2008-10-30 08:56:09 +03:00
. init_key_from_rec = xfs_allocbt_init_key_from_rec ,
2017-03-29 00:56:34 +03:00
. init_high_key_from_rec = xfs_bnobt_init_high_key_from_rec ,
2008-10-30 08:57:40 +03:00
. init_rec_from_cur = xfs_allocbt_init_rec_from_cur ,
2008-10-30 08:56:09 +03:00
. init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur ,
2017-03-29 00:56:34 +03:00
. key_diff = xfs_bnobt_key_diff ,
2019-02-07 21:45:47 +03:00
. buf_ops = & xfs_bnobt_buf_ops ,
2017-03-29 00:56:34 +03:00
. diff_two_keys = xfs_bnobt_diff_two_keys ,
. keys_inorder = xfs_bnobt_keys_inorder ,
. recs_inorder = xfs_bnobt_recs_inorder ,
} ;
static const struct xfs_btree_ops xfs_cntbt_ops = {
. rec_len = sizeof ( xfs_alloc_rec_t ) ,
. key_len = sizeof ( xfs_alloc_key_t ) ,
. dup_cursor = xfs_allocbt_dup_cursor ,
. set_root = xfs_allocbt_set_root ,
. alloc_block = xfs_allocbt_alloc_block ,
. free_block = xfs_allocbt_free_block ,
. update_lastrec = xfs_allocbt_update_lastrec ,
. get_minrecs = xfs_allocbt_get_minrecs ,
. get_maxrecs = xfs_allocbt_get_maxrecs ,
. init_key_from_rec = xfs_allocbt_init_key_from_rec ,
. init_high_key_from_rec = xfs_cntbt_init_high_key_from_rec ,
. init_rec_from_cur = xfs_allocbt_init_rec_from_cur ,
. init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur ,
. key_diff = xfs_cntbt_key_diff ,
2019-02-07 21:45:47 +03:00
. buf_ops = & xfs_cntbt_buf_ops ,
2017-03-29 00:56:34 +03:00
. diff_two_keys = xfs_cntbt_diff_two_keys ,
. keys_inorder = xfs_cntbt_keys_inorder ,
. recs_inorder = xfs_cntbt_recs_inorder ,
2008-10-30 08:53:59 +03:00
} ;
2020-03-11 20:52:49 +03:00
/* Allocate most of a new allocation btree cursor. */
STATIC struct xfs_btree_cur *
xfs_allocbt_init_common (
struct xfs_mount * mp ,
struct xfs_trans * tp ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2020-03-11 20:52:49 +03:00
xfs_btnum_t btnum )
2008-10-30 08:53:59 +03:00
{
struct xfs_btree_cur * cur ;
ASSERT ( btnum = = XFS_BTNUM_BNO | | btnum = = XFS_BTNUM_CNT ) ;
2021-09-23 22:21:37 +03:00
cur = xfs_btree_alloc_cursor ( mp , tp , btnum , mp - > m_alloc_maxlevels ,
xfs_allocbt_cur_cache ) ;
2021-06-02 03:48:24 +03:00
cur - > bc_ag . abt . active = false ;
2011-07-08 16:34:18 +04:00
if ( btnum = = XFS_BTNUM_CNT ) {
2017-03-29 00:56:34 +03:00
cur - > bc_ops = & xfs_cntbt_ops ;
2020-03-11 20:52:49 +03:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_abtc_2 ) ;
2008-10-30 08:56:32 +03:00
cur - > bc_flags = XFS_BTREE_LASTREC_UPDATE ;
2011-07-08 16:34:18 +04:00
} else {
2017-03-29 00:56:34 +03:00
cur - > bc_ops = & xfs_bnobt_ops ;
2020-03-11 20:52:49 +03:00
cur - > bc_statoff = XFS_STATS_CALC_INDEX ( xs_abtb_2 ) ;
2011-07-08 16:34:18 +04:00
}
2008-10-30 08:53:59 +03:00
2021-06-02 03:48:24 +03:00
/* take a reference for the cursor */
atomic_inc ( & pag - > pag_ref ) ;
2021-06-02 03:48:24 +03:00
cur - > bc_ag . pag = pag ;
2008-10-30 08:53:59 +03:00
2021-08-19 04:46:37 +03:00
if ( xfs_has_crc ( mp ) )
2013-04-21 23:53:46 +04:00
cur - > bc_flags | = XFS_BTREE_CRC_BLOCKS ;
2008-10-30 08:53:59 +03:00
return cur ;
}
2008-10-30 09:11:19 +03:00
2020-03-11 20:52:49 +03:00
/*
* Allocate a new allocation btree cursor .
*/
struct xfs_btree_cur * /* new alloc btree cursor */
xfs_allocbt_init_cursor (
struct xfs_mount * mp , /* file system mount point */
struct xfs_trans * tp , /* transaction pointer */
struct xfs_buf * agbp , /* buffer for agf structure */
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2020-03-11 20:52:49 +03:00
xfs_btnum_t btnum ) /* btree identifier */
{
struct xfs_agf * agf = agbp - > b_addr ;
struct xfs_btree_cur * cur ;
2021-06-02 03:48:24 +03:00
cur = xfs_allocbt_init_common ( mp , tp , pag , btnum ) ;
2020-03-11 20:52:49 +03:00
if ( btnum = = XFS_BTNUM_CNT )
cur - > bc_nlevels = be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_CNT ] ) ;
else
cur - > bc_nlevels = be32_to_cpu ( agf - > agf_levels [ XFS_BTNUM_BNO ] ) ;
cur - > bc_ag . agbp = agbp ;
return cur ;
}
/* Create a free space btree cursor with a fake root for staging. */
struct xfs_btree_cur *
xfs_allocbt_stage_cursor (
struct xfs_mount * mp ,
struct xbtree_afakeroot * afake ,
2021-06-02 03:48:24 +03:00
struct xfs_perag * pag ,
2020-03-11 20:52:49 +03:00
xfs_btnum_t btnum )
{
struct xfs_btree_cur * cur ;
2021-06-02 03:48:24 +03:00
cur = xfs_allocbt_init_common ( mp , NULL , pag , btnum ) ;
2020-03-11 20:52:49 +03:00
xfs_btree_stage_afakeroot ( cur , afake ) ;
return cur ;
}
/*
* Install a new free space btree root . Caller is responsible for invalidating
* and freeing the old btree blocks .
*/
void
xfs_allocbt_commit_staged_btree (
struct xfs_btree_cur * cur ,
struct xfs_trans * tp ,
struct xfs_buf * agbp )
{
struct xfs_agf * agf = agbp - > b_addr ;
struct xbtree_afakeroot * afake = cur - > bc_ag . afake ;
ASSERT ( cur - > bc_flags & XFS_BTREE_STAGING ) ;
agf - > agf_roots [ cur - > bc_btnum ] = cpu_to_be32 ( afake - > af_root ) ;
agf - > agf_levels [ cur - > bc_btnum ] = cpu_to_be32 ( afake - > af_levels ) ;
xfs_alloc_log_agf ( tp , agbp , XFS_AGF_ROOTS | XFS_AGF_LEVELS ) ;
if ( cur - > bc_btnum = = XFS_BTNUM_BNO ) {
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_bnobt_ops ) ;
} else {
cur - > bc_flags | = XFS_BTREE_LASTREC_UPDATE ;
xfs_btree_commit_afakeroot ( cur , tp , agbp , & xfs_cntbt_ops ) ;
}
}
2021-09-23 20:32:06 +03:00
/* Calculate number of records in an alloc btree block. */
static inline unsigned int
xfs_allocbt_block_maxrecs (
unsigned int blocklen ,
bool leaf )
{
if ( leaf )
return blocklen / sizeof ( xfs_alloc_rec_t ) ;
return blocklen / ( sizeof ( xfs_alloc_key_t ) + sizeof ( xfs_alloc_ptr_t ) ) ;
}
2008-10-30 09:11:19 +03:00
/*
* Calculate number of records in an alloc btree block .
*/
int
xfs_allocbt_maxrecs (
struct xfs_mount * mp ,
int blocklen ,
int leaf )
{
2008-10-30 09:14:34 +03:00
blocklen - = XFS_ALLOC_BLOCK_LEN ( mp ) ;
2021-09-23 20:32:06 +03:00
return xfs_allocbt_block_maxrecs ( blocklen , leaf ) ;
}
2008-10-30 09:11:19 +03:00
2021-09-23 20:32:06 +03:00
/* Free space btrees are at their largest when every other block is free. */
# define XFS_MAX_FREESP_RECORDS ((XFS_MAX_AG_BLOCKS + 1) / 2)
/* Compute the max possible height for free space btrees. */
unsigned int
xfs_allocbt_maxlevels_ondisk ( void )
{
unsigned int minrecs [ 2 ] ;
unsigned int blocklen ;
blocklen = min ( XFS_MIN_BLOCKSIZE - XFS_BTREE_SBLOCK_LEN ,
XFS_MIN_CRC_BLOCKSIZE - XFS_BTREE_SBLOCK_CRC_LEN ) ;
minrecs [ 0 ] = xfs_allocbt_block_maxrecs ( blocklen , true ) / 2 ;
minrecs [ 1 ] = xfs_allocbt_block_maxrecs ( blocklen , false ) / 2 ;
return xfs_btree_compute_maxlevels ( minrecs , XFS_MAX_FREESP_RECORDS ) ;
2008-10-30 09:11:19 +03:00
}
2018-05-09 20:02:01 +03:00
/* Calculate the freespace btree size for some records. */
xfs_extlen_t
xfs_allocbt_calc_size (
struct xfs_mount * mp ,
unsigned long long len )
{
return xfs_btree_calc_size ( mp - > m_alloc_mnr , len ) ;
}
2021-09-23 22:21:37 +03:00
int __init
xfs_allocbt_init_cur_cache ( void )
{
xfs_allocbt_cur_cache = kmem_cache_create ( " xfs_bnobt_cur " ,
xfs_btree_cur_sizeof ( xfs_allocbt_maxlevels_ondisk ( ) ) ,
0 , 0 , NULL ) ;
if ( ! xfs_allocbt_cur_cache )
return - ENOMEM ;
return 0 ;
}
void
xfs_allocbt_destroy_cur_cache ( void )
{
kmem_cache_destroy ( xfs_allocbt_cur_cache ) ;
xfs_allocbt_cur_cache = NULL ;
}