2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2013-06-27 10:04:53 +04:00
/*
* Copyright ( c ) 2008 - 2010 , 2013 Dave Chinner
* All Rights Reserved .
*/
# include "xfs.h"
# include "xfs_fs.h"
2013-10-23 03:36:05 +04:00
# include "xfs_shared.h"
2020-05-02 02:00:48 +03:00
# include "xfs_format.h"
2013-10-23 03:50:10 +04:00
# include "xfs_log_format.h"
2020-05-02 02:00:48 +03:00
# include "xfs_trans_resv.h"
# include "xfs_mount.h"
# include "xfs_inode.h"
2013-10-23 03:50:10 +04:00
# include "xfs_trans.h"
2013-06-27 10:04:53 +04:00
# include "xfs_trans_priv.h"
# include "xfs_icreate_item.h"
2013-12-13 04:00:43 +04:00
# include "xfs_log.h"
2020-05-02 02:00:45 +03:00
# include "xfs_log_priv.h"
# include "xfs_log_recover.h"
2020-05-02 02:00:48 +03:00
# include "xfs_ialloc.h"
# include "xfs_trace.h"
2013-06-27 10:04:53 +04:00
2021-09-28 00:26:19 +03:00
struct kmem_cache * xfs_icreate_zone ; /* inode create item zone */
2013-06-27 10:04:53 +04:00
static inline struct xfs_icreate_item * ICR_ITEM ( struct xfs_log_item * lip )
{
return container_of ( lip , struct xfs_icreate_item , ic_item ) ;
}
/*
* This returns the number of iovecs needed to log the given inode item .
*
* We only need one iovec for the icreate log structure .
*/
2013-08-12 14:50:04 +04:00
STATIC void
2013-06-27 10:04:53 +04:00
xfs_icreate_item_size (
2013-08-12 14:50:04 +04:00
struct xfs_log_item * lip ,
int * nvecs ,
int * nbytes )
2013-06-27 10:04:53 +04:00
{
2013-08-12 14:50:04 +04:00
* nvecs + = 1 ;
* nbytes + = sizeof ( struct xfs_icreate_log ) ;
2013-06-27 10:04:53 +04:00
}
/*
* This is called to fill in the vector of log iovecs for the
* given inode create log item .
*/
STATIC void
xfs_icreate_item_format (
struct xfs_log_item * lip ,
2013-12-13 04:34:02 +04:00
struct xfs_log_vec * lv )
2013-06-27 10:04:53 +04:00
{
struct xfs_icreate_item * icp = ICR_ITEM ( lip ) ;
2013-12-13 04:34:02 +04:00
struct xfs_log_iovec * vecp = NULL ;
2013-06-27 10:04:53 +04:00
2013-12-13 04:34:02 +04:00
xlog_copy_iovec ( lv , & vecp , XLOG_REG_TYPE_ICREATE ,
2013-12-13 04:00:43 +04:00
& icp - > ic_format ,
sizeof ( struct xfs_icreate_log ) ) ;
2013-06-27 10:04:53 +04:00
}
STATIC void
2019-06-29 05:27:32 +03:00
xfs_icreate_item_release (
2013-06-27 10:04:53 +04:00
struct xfs_log_item * lip )
{
2019-11-14 23:43:04 +03:00
kmem_cache_free ( xfs_icreate_zone , ICR_ITEM ( lip ) ) ;
2013-06-27 10:04:53 +04:00
}
2016-11-28 06:57:42 +03:00
static const struct xfs_item_ops xfs_icreate_item_ops = {
2019-06-29 05:27:32 +03:00
. flags = XFS_ITEM_RELEASE_WHEN_COMMITTED ,
2013-06-27 10:04:53 +04:00
. iop_size = xfs_icreate_item_size ,
. iop_format = xfs_icreate_item_format ,
2019-06-29 05:27:32 +03:00
. iop_release = xfs_icreate_item_release ,
2013-06-27 10:04:53 +04:00
} ;
/*
* Initialize the inode log item for a newly allocated ( in - core ) inode .
*
* Inode extents can only reside within an AG . Hence specify the starting
* block for the inode chunk by offset within an AG as well as the
* length of the allocated extent .
*
* This joins the item to the transaction and marks it dirty so
* that we don ' t need a separate call to do this , nor does the
* caller need to know anything about the icreate item .
*/
void
xfs_icreate_log (
struct xfs_trans * tp ,
xfs_agnumber_t agno ,
xfs_agblock_t agbno ,
unsigned int count ,
unsigned int inode_size ,
xfs_agblock_t length ,
unsigned int generation )
{
struct xfs_icreate_item * icp ;
2020-07-22 19:23:10 +03:00
icp = kmem_cache_zalloc ( xfs_icreate_zone , GFP_KERNEL | __GFP_NOFAIL ) ;
2013-06-27 10:04:53 +04:00
xfs_log_item_init ( tp - > t_mountp , & icp - > ic_item , XFS_LI_ICREATE ,
& xfs_icreate_item_ops ) ;
icp - > ic_format . icl_type = XFS_LI_ICREATE ;
icp - > ic_format . icl_size = 1 ; /* single vector */
icp - > ic_format . icl_ag = cpu_to_be32 ( agno ) ;
icp - > ic_format . icl_agbno = cpu_to_be32 ( agbno ) ;
icp - > ic_format . icl_count = cpu_to_be32 ( count ) ;
icp - > ic_format . icl_isize = cpu_to_be32 ( inode_size ) ;
icp - > ic_format . icl_length = cpu_to_be32 ( length ) ;
icp - > ic_format . icl_gen = cpu_to_be32 ( generation ) ;
xfs_trans_add_item ( tp , & icp - > ic_item ) ;
tp - > t_flags | = XFS_TRANS_DIRTY ;
2018-05-09 17:49:37 +03:00
set_bit ( XFS_LI_DIRTY , & icp - > ic_item . li_flags ) ;
2013-06-27 10:04:53 +04:00
}
2020-05-02 02:00:45 +03:00
static enum xlog_recover_reorder
xlog_recover_icreate_reorder (
struct xlog_recover_item * item )
{
/*
* Inode allocation buffers must be replayed before subsequent inode
* items try to modify those buffers . ICREATE items are the logical
* equivalent of logging a newly initialized inode buffer , so recover
* these at the same time that we recover logged buffers .
*/
return XLOG_REORDER_BUFFER_LIST ;
}
2020-05-02 02:00:48 +03:00
/*
* This routine is called when an inode create format structure is found in a
* committed transaction in the log . It ' s purpose is to initialise the inodes
* being allocated on disk . This requires us to get inode cluster buffers that
* match the range to be initialised , stamped with inode templates and written
* by delayed write so that subsequent modifications will hit the cached buffer
* and only need writing out at the end of recovery .
*/
STATIC int
xlog_recover_icreate_commit_pass2 (
struct xlog * log ,
struct list_head * buffer_list ,
struct xlog_recover_item * item ,
xfs_lsn_t lsn )
{
struct xfs_mount * mp = log - > l_mp ;
struct xfs_icreate_log * icl ;
struct xfs_ino_geometry * igeo = M_IGEO ( mp ) ;
xfs_agnumber_t agno ;
xfs_agblock_t agbno ;
unsigned int count ;
unsigned int isize ;
xfs_agblock_t length ;
int bb_per_cluster ;
int cancel_count ;
int nbufs ;
int i ;
icl = ( struct xfs_icreate_log * ) item - > ri_buf [ 0 ] . i_addr ;
if ( icl - > icl_type ! = XFS_LI_ICREATE ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad type " ) ;
return - EINVAL ;
}
if ( icl - > icl_size ! = 1 ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad icl size " ) ;
return - EINVAL ;
}
agno = be32_to_cpu ( icl - > icl_ag ) ;
if ( agno > = mp - > m_sb . sb_agcount ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad agno " ) ;
return - EINVAL ;
}
agbno = be32_to_cpu ( icl - > icl_agbno ) ;
if ( ! agbno | | agbno = = NULLAGBLOCK | | agbno > = mp - > m_sb . sb_agblocks ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad agbno " ) ;
return - EINVAL ;
}
isize = be32_to_cpu ( icl - > icl_isize ) ;
if ( isize ! = mp - > m_sb . sb_inodesize ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad isize " ) ;
return - EINVAL ;
}
count = be32_to_cpu ( icl - > icl_count ) ;
if ( ! count ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad count " ) ;
return - EINVAL ;
}
length = be32_to_cpu ( icl - > icl_length ) ;
if ( ! length | | length > = mp - > m_sb . sb_agblocks ) {
xfs_warn ( log - > l_mp , " xlog_recover_do_icreate_trans: bad length " ) ;
return - EINVAL ;
}
/*
* The inode chunk is either full or sparse and we only support
* m_ino_geo . ialloc_min_blks sized sparse allocations at this time .
*/
if ( length ! = igeo - > ialloc_blks & &
length ! = igeo - > ialloc_min_blks ) {
xfs_warn ( log - > l_mp ,
2021-08-09 20:14:45 +03:00
" %s: unsupported chunk length " , __func__ ) ;
2020-05-02 02:00:48 +03:00
return - EINVAL ;
}
/* verify inode count is consistent with extent length */
if ( ( count > > mp - > m_sb . sb_inopblog ) ! = length ) {
xfs_warn ( log - > l_mp ,
" %s: inconsistent inode count and chunk length " ,
2021-08-09 20:14:45 +03:00
__func__ ) ;
2020-05-02 02:00:48 +03:00
return - EINVAL ;
}
/*
* The icreate transaction can cover multiple cluster buffers and these
* buffers could have been freed and reused . Check the individual
* buffers for cancellation so we don ' t overwrite anything written after
* a cancellation .
*/
bb_per_cluster = XFS_FSB_TO_BB ( mp , igeo - > blocks_per_cluster ) ;
nbufs = length / igeo - > blocks_per_cluster ;
for ( i = 0 , cancel_count = 0 ; i < nbufs ; i + + ) {
xfs_daddr_t daddr ;
daddr = XFS_AGB_TO_DADDR ( mp , agno ,
agbno + i * igeo - > blocks_per_cluster ) ;
if ( xlog_is_buffer_cancelled ( log , daddr , bb_per_cluster ) )
cancel_count + + ;
}
/*
* We currently only use icreate for a single allocation at a time . This
* means we should expect either all or none of the buffers to be
* cancelled . Be conservative and skip replay if at least one buffer is
* cancelled , but warn the user that something is awry if the buffers
* are not consistent .
*
* XXX : This must be refined to only skip cancelled clusters once we use
* icreate for multiple chunk allocations .
*/
ASSERT ( ! cancel_count | | cancel_count = = nbufs ) ;
if ( cancel_count ) {
if ( cancel_count ! = nbufs )
xfs_warn ( mp ,
" WARNING: partial inode chunk cancellation, skipped icreate. " ) ;
trace_xfs_log_recover_icreate_cancel ( log , icl ) ;
return 0 ;
}
trace_xfs_log_recover_icreate_recover ( log , icl ) ;
return xfs_ialloc_inode_init ( mp , NULL , buffer_list , count , agno , agbno ,
length , be32_to_cpu ( icl - > icl_gen ) ) ;
}
2020-05-02 02:00:45 +03:00
const struct xlog_recover_item_ops xlog_icreate_item_ops = {
. item_type = XFS_LI_ICREATE ,
. reorder = xlog_recover_icreate_reorder ,
2020-05-02 02:00:48 +03:00
. commit_pass2 = xlog_recover_icreate_commit_pass2 ,
2020-05-02 02:00:45 +03:00
} ;