2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:58:39 +03:00
* Copyright ( c ) 2000 - 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# ifndef __XFS_MOUNT_H__
# define __XFS_MOUNT_H__
2012-06-14 18:22:15 +04:00
struct xlog ;
2005-04-17 02:20:36 +04:00
struct xfs_inode ;
2007-07-11 05:09:12 +04:00
struct xfs_mru_cache ;
2008-10-30 09:38:26 +03:00
struct xfs_ail ;
2009-06-08 17:33:32 +04:00
struct xfs_quotainfo ;
2014-06-06 09:01:58 +04:00
struct xfs_da_geometry ;
2021-06-02 03:48:24 +03:00
struct xfs_perag ;
2009-06-08 17:33:32 +04:00
2011-01-04 03:35:03 +03:00
/* dynamic preallocation free space thresholds, 5% down to 1% */
enum {
XFS_LOWSP_1_PCNT = 0 ,
XFS_LOWSP_2_PCNT ,
XFS_LOWSP_3_PCNT ,
XFS_LOWSP_4_PCNT ,
XFS_LOWSP_5_PCNT ,
XFS_LOWSP_MAX ,
} ;
2016-05-18 03:58:51 +03:00
/*
* Error Configuration
*
* Error classes define the subsystem the configuration belongs to .
* Error numbers define the errors that are configurable .
*/
enum {
2016-05-18 04:01:00 +03:00
XFS_ERR_METADATA ,
2016-05-18 03:58:51 +03:00
XFS_ERR_CLASS_MAX ,
} ;
enum {
2016-05-18 04:01:00 +03:00
XFS_ERR_DEFAULT ,
2016-05-18 04:09:28 +03:00
XFS_ERR_EIO ,
XFS_ERR_ENOSPC ,
XFS_ERR_ENODEV ,
2016-05-18 03:58:51 +03:00
XFS_ERR_ERRNO_MAX ,
} ;
2016-05-18 04:08:15 +03:00
# define XFS_ERR_RETRY_FOREVER -1
2016-09-14 00:51:30 +03:00
/*
* Although retry_timeout is in jiffies which is normally an unsigned long ,
* we limit the retry timeout to 86400 seconds , or one day . So even a
* signed 32 - bit long is sufficient for a HZ value up to 24855. Making it
* signed lets us store the special " -1 " value , meaning retry forever .
*/
2016-05-18 03:58:51 +03:00
struct xfs_error_cfg {
struct xfs_kobj kobj ;
int max_retries ;
2016-09-14 00:51:30 +03:00
long retry_timeout ; /* in jiffies, -1 = infinite */
2016-05-18 03:58:51 +03:00
} ;
2021-08-06 21:05:39 +03:00
/*
* Per - cpu deferred inode inactivation GC lists .
*/
struct xfs_inodegc {
2023-09-11 18:39:03 +03:00
struct xfs_mount * mp ;
2021-08-06 21:05:39 +03:00
struct llist_head list ;
2022-06-16 17:44:31 +03:00
struct delayed_work work ;
2023-06-05 07:48:15 +03:00
int error ;
2021-08-06 21:05:39 +03:00
/* approximate count of inodes in the list */
unsigned int items ;
2021-08-06 21:05:43 +03:00
unsigned int shrinker_hits ;
2023-05-02 02:16:12 +03:00
unsigned int cpu ;
2021-08-06 21:05:39 +03:00
} ;
2020-05-20 23:17:11 +03:00
/*
* The struct xfsmount layout is optimised to separate read - mostly variables
* from variables that are frequently modified . We put the read - mostly variables
* first , then place all the other variables at the end .
*
* Typically , read - mostly variables are those that are set at mount time and
* never changed again , or only change rarely as a result of things like sysfs
* knobs being tweaked .
*/
2005-04-17 02:20:36 +04:00
typedef struct xfs_mount {
2020-05-20 23:17:11 +03:00
struct xfs_sb m_sb ; /* copy of fs superblock */
2007-08-30 11:21:30 +04:00
struct super_block * m_super ;
2008-10-30 09:38:26 +03:00
struct xfs_ail * m_ail ; /* fs active log item list */
2005-04-17 02:20:36 +04:00
struct xfs_buf * m_sb_bp ; /* buffer for superblock */
2005-11-02 03:44:33 +03:00
char * m_rtname ; /* realtime device name */
char * m_logname ; /* external log device name */
2014-06-06 09:01:58 +04:00
struct xfs_da_geometry * m_dir_geo ; /* directory block geometry */
struct xfs_da_geometry * m_attr_geo ; /* attribute block geometry */
2012-06-14 18:22:15 +04:00
struct xlog * m_log ; /* log specific stuff */
2005-04-17 02:20:36 +04:00
struct xfs_inode * m_rbmip ; /* pointer to bitmap inode */
struct xfs_inode * m_rsumip ; /* pointer to summary inode */
struct xfs_inode * m_rootip ; /* pointer to root directory */
struct xfs_quotainfo * m_quotainfo ; /* disk quota information */
2024-02-22 23:42:44 +03:00
struct xfs_buftarg * m_ddev_targp ; /* data device */
struct xfs_buftarg * m_logdev_targp ; /* log device */
struct xfs_buftarg * m_rtdev_targp ; /* rt device */
2021-08-06 21:05:39 +03:00
void __percpu * m_inodegc ; /* percpu inodegc structures */
2020-05-20 23:17:11 +03:00
/*
* Optional cache of rt summary level per bitmap block with the
2023-10-16 20:41:55 +03:00
* invariant that m_rsum_cache [ bbno ] > the maximum i for which
* rsum [ i ] [ bbno ] ! = 0 , or 0 if rsum [ i ] [ bbno ] = = 0 for all i .
* Reads and writes are serialized by the rsumip inode lock .
2020-05-20 23:17:11 +03:00
*/
uint8_t * m_rsum_cache ;
struct xfs_mru_cache * m_filestream ; /* per-mount filestream data */
struct workqueue_struct * m_buf_workqueue ;
struct workqueue_struct * m_unwritten_workqueue ;
struct workqueue_struct * m_reclaim_workqueue ;
struct workqueue_struct * m_sync_workqueue ;
2021-08-06 21:05:39 +03:00
struct workqueue_struct * m_blockgc_wq ;
struct workqueue_struct * m_inodegc_wq ;
2020-05-20 23:17:11 +03:00
int m_bsize ; /* fs logical block size */
2017-06-16 21:00:05 +03:00
uint8_t m_blkbit_log ; /* blocklog + NBBY */
uint8_t m_blkbb_log ; /* blocklog - BBSHIFT */
uint8_t m_agno_log ; /* log #ag's */
2020-05-20 23:17:11 +03:00
uint8_t m_sectbb_log ; /* sectlog - BBSHIFT */
2023-10-16 19:40:11 +03:00
int8_t m_rtxblklog ; /* log2 of rextsize, if possible */
2005-04-17 02:20:36 +04:00
uint m_blockmask ; /* sb_blocksize-1 */
uint m_blockwsize ; /* sb_blocksize in words */
uint m_blockwmask ; /* blockwsize-1 */
2008-10-30 09:11:19 +03:00
uint m_alloc_mxr [ 2 ] ; /* max alloc btree records */
uint m_alloc_mnr [ 2 ] ; /* min alloc btree records */
uint m_bmap_dmxr [ 2 ] ; /* max bmap btree records */
uint m_bmap_dmnr [ 2 ] ; /* min bmap btree records */
2016-08-03 04:36:07 +03:00
uint m_rmap_mxr [ 2 ] ; /* max rmap btree records */
uint m_rmap_mnr [ 2 ] ; /* min rmap btree records */
2016-10-03 19:11:18 +03:00
uint m_refc_mxr [ 2 ] ; /* max refc btree records */
uint m_refc_mnr [ 2 ] ; /* min refc btree records */
2021-10-13 20:02:19 +03:00
uint m_alloc_maxlevels ; /* max alloc btree levels */
uint m_bm_maxlevels [ 2 ] ; /* max bmap btree levels */
2016-08-03 04:36:07 +03:00
uint m_rmap_maxlevels ; /* max rmap btree levels */
2016-10-03 19:11:18 +03:00
uint m_refc_maxlevels ; /* max refcount btree level */
2021-09-16 22:27:34 +03:00
unsigned int m_agbtree_maxlevels ; /* max level of all AG btrees */
2016-08-03 04:31:47 +03:00
xfs_extlen_t m_ag_prealloc_blocks ; /* reserved ag blocks */
2016-08-03 04:38:24 +03:00
uint m_alloc_set_aside ; /* space we can't use */
uint m_ag_max_usable ; /* max space per AG */
2020-05-20 23:17:11 +03:00
int m_dalign ; /* stripe unit */
int m_swidth ; /* stripe width */
xfs_agnumber_t m_maxagi ; /* highest inode alloc group */
uint m_allocsize_log ; /* min write size log bytes */
uint m_allocsize_blocks ; /* min write size blocks */
int m_logbufs ; /* number of log buffers */
int m_logbsize ; /* size of each log buffer */
uint m_rsumlevels ; /* rt summary levels */
uint m_rsumsize ; /* size of rt summary, bytes */
2005-04-17 02:20:36 +04:00
int m_fixedfsid [ 2 ] ; /* unchanged for life of FS */
uint m_qflags ; /* quota status flags */
2021-08-19 04:46:26 +03:00
uint64_t m_features ; /* active filesystem features */
2021-08-06 21:05:41 +03:00
uint64_t m_low_space [ XFS_LOWSP_MAX ] ;
uint64_t m_low_rtexts [ XFS_LOWSP_MAX ] ;
2023-10-16 19:40:11 +03:00
uint64_t m_rtxblkmask ; /* rt extent block mask */
2020-05-20 23:17:11 +03:00
struct xfs_ino_geometry m_ino_geo ; /* inode geometry */
2013-08-12 14:49:56 +04:00
struct xfs_trans_resv m_resv ; /* precomputed res values */
2020-05-20 23:17:11 +03:00
/* low free space thresholds */
2021-08-06 21:05:39 +03:00
unsigned long m_opstate ; /* dynamic state flags */
2020-05-20 23:17:11 +03:00
bool m_always_cow ;
bool m_fail_unmount ;
bool m_finobt_nores ; /* no per-AG finobt resv. */
bool m_update_sb ; /* sb needs update in mount */
/*
* Bitsets of per - fs metadata that have been checked and / or are sick .
* Callers must hold m_sb_lock to access these two fields .
*/
uint8_t m_fs_checked ;
uint8_t m_fs_sick ;
/*
* Bitsets of rt metadata that have been checked and / or are sick .
* Callers must hold m_sb_lock to access this field .
*/
uint8_t m_rt_checked ;
uint8_t m_rt_sick ;
/*
* End of read - mostly variables . Frequently written variables and locks
* should be placed below this comment from now on . The first variable
* here is marked as cacheline aligned so they it is separated from
* the read - mostly variables .
*/
spinlock_t ____cacheline_aligned m_sb_lock ; /* sb counter lock */
struct percpu_counter m_icount ; /* allocated inodes counter */
struct percpu_counter m_ifree ; /* free inodes counter */
struct percpu_counter m_fdblocks ; /* free block counter */
2022-04-11 23:49:42 +03:00
struct percpu_counter m_frextents ; /* free rt extent counter */
2020-05-20 23:17:11 +03:00
/*
* Count of data device blocks reserved for delayed allocations ,
* including indlen blocks . Does not include allocated CoW staging
* extents or anything related to the rt device .
*/
struct percpu_counter m_delalloc_blks ;
2021-04-29 01:05:50 +03:00
/*
* Global count of allocation btree blocks in use across all AGs . Only
* used when perag reservation is enabled . Helps prevent block
* reservation from attempting to reserve allocation btree blocks .
*/
atomic64_t m_allocbt_blks ;
2020-05-20 23:17:11 +03:00
struct radix_tree_root m_perag_tree ; /* per-ag accounting info */
spinlock_t m_perag_lock ; /* lock for m_perag_tree */
2017-06-16 21:00:05 +03:00
uint64_t m_resblks ; /* total reserved blocks */
uint64_t m_resblks_avail ; /* available reserved blocks */
uint64_t m_resblks_save ; /* reserved blks @ remount,ro */
2011-04-08 06:45:07 +04:00
struct delayed_work m_reclaim_work ; /* background inode reclaim */
2023-08-10 17:48:07 +03:00
struct dentry * m_debugfs ; /* debugfs parent */
2014-07-15 02:07:01 +04:00
struct xfs_kobj m_kobj ;
2016-05-18 03:58:51 +03:00
struct xfs_kobj m_error_kobj ;
2016-05-18 04:01:00 +03:00
struct xfs_kobj m_error_meta_kobj ;
2016-05-18 03:58:51 +03:00
struct xfs_error_cfg m_error_cfg [ XFS_ERR_CLASS_MAX ] [ XFS_ERR_ERRNO_MAX ] ;
2015-10-12 10:21:19 +03:00
struct xstats m_stats ; /* per-fs stats */
2023-08-10 17:48:07 +03:00
# ifdef CONFIG_XFS_ONLINE_SCRUB_STATS
struct xchk_stats * m_scrub_stats ;
# endif
2020-05-20 23:17:11 +03:00
xfs_agnumber_t m_agfrotor ; /* last ag where space found */
2023-02-13 01:14:52 +03:00
atomic_t m_agirotor ; /* last ag dir inode alloced */
2012-02-29 13:53:48 +04:00
2021-08-06 21:05:43 +03:00
/* Memory shrinker to throttle and reprioritize inodegc */
2023-09-11 12:44:34 +03:00
struct shrinker * m_inodegc_shrinker ;
2020-04-12 23:11:10 +03:00
/*
* Workqueue item so that we can coalesce multiple inode flush attempts
* into a single flush .
*/
struct work_struct m_flush_inodes_work ;
2015-02-16 03:49:23 +03:00
/*
* Generation of the filesysyem layout . This is incremented by each
* growfs , and used by the pNFS server to ensure the client updates
* its view of the block device once it gets a layout that might
* reference the newly added blocks . Does not need to be persistent
* as long as we only allow file system size increments , but if we
* ever support shrinks it would have to be persisted in addition
* to various other kinds of pain inflicted on the pNFS server .
*/
2017-06-16 21:00:05 +03:00
uint32_t m_generation ;
2020-05-20 23:17:11 +03:00
struct mutex m_growlock ; /* growfs mutex */
2016-03-15 03:42:44 +03:00
# ifdef DEBUG
2017-06-21 03:54:46 +03:00
/*
* Frequency with which errors are injected . Replaces xfs_etest ; the
* value stored in here is the inverse of the frequency with which the
* error triggers . 1 = always , 2 = half the time , etc .
*/
unsigned int * m_errortag ;
2017-06-21 03:54:47 +03:00
struct xfs_kobj m_errortag_kobj ;
2016-03-15 03:42:44 +03:00
# endif
2023-09-11 18:39:03 +03:00
/* cpus that have inodes queued for inactivation */
struct cpumask m_inodegc_cpumask ;
2024-02-22 23:30:59 +03:00
/* Hook to feed dirent updates to an active online repair. */
struct xfs_hooks m_dir_update_hooks ;
2005-04-17 02:20:36 +04:00
} xfs_mount_t ;
2019-06-05 21:19:34 +03:00
# define M_IGEO(mp) (&(mp)->m_ino_geo)
2021-08-19 04:46:26 +03:00
/*
* Flags for m_features .
*
* These are all the active features in the filesystem , regardless of how
* they are configured .
*/
# define XFS_FEAT_ATTR (1ULL << 0) /* xattrs present in fs */
# define XFS_FEAT_NLINK (1ULL << 1) /* 32 bit link counts */
# define XFS_FEAT_QUOTA (1ULL << 2) /* quota active */
# define XFS_FEAT_ALIGN (1ULL << 3) /* inode alignment */
# define XFS_FEAT_DALIGN (1ULL << 4) /* data alignment */
# define XFS_FEAT_LOGV2 (1ULL << 5) /* version 2 logs */
# define XFS_FEAT_SECTOR (1ULL << 6) /* sector size > 512 bytes */
# define XFS_FEAT_EXTFLG (1ULL << 7) /* unwritten extents */
# define XFS_FEAT_ASCIICI (1ULL << 8) /* ASCII only case-insens. */
# define XFS_FEAT_LAZYSBCOUNT (1ULL << 9) /* Superblk counters */
# define XFS_FEAT_ATTR2 (1ULL << 10) /* dynamic attr fork */
# define XFS_FEAT_PARENT (1ULL << 11) /* parent pointers */
# define XFS_FEAT_PROJID32 (1ULL << 12) /* 32 bit project id */
# define XFS_FEAT_CRC (1ULL << 13) /* metadata CRCs */
# define XFS_FEAT_V3INODES (1ULL << 14) /* Version 3 inodes */
# define XFS_FEAT_PQUOTINO (1ULL << 15) /* non-shared proj/grp quotas */
# define XFS_FEAT_FTYPE (1ULL << 16) /* inode type in dir */
# define XFS_FEAT_FINOBT (1ULL << 17) /* free inode btree */
# define XFS_FEAT_RMAPBT (1ULL << 18) /* reverse map btree */
# define XFS_FEAT_REFLINK (1ULL << 19) /* reflinked files */
# define XFS_FEAT_SPINODES (1ULL << 20) /* sparse inode chunks */
# define XFS_FEAT_META_UUID (1ULL << 21) /* metadata UUID */
# define XFS_FEAT_REALTIME (1ULL << 22) /* realtime device present */
# define XFS_FEAT_INOBTCNT (1ULL << 23) /* inobt block counts */
# define XFS_FEAT_BIGTIME (1ULL << 24) /* large timestamps */
# define XFS_FEAT_NEEDSREPAIR (1ULL << 25) /* needs xfs_repair */
2021-11-16 11:39:32 +03:00
# define XFS_FEAT_NREXT64 (1ULL << 26) /* large extent counters */
2021-08-19 04:46:26 +03:00
2021-08-19 04:46:51 +03:00
/* Mount features */
# define XFS_FEAT_NOATTR2 (1ULL << 48) /* disable attr2 creation */
# define XFS_FEAT_NOALIGN (1ULL << 49) /* ignore alignment */
# define XFS_FEAT_ALLOCSIZE (1ULL << 50) /* user specified allocation size */
# define XFS_FEAT_LARGE_IOSIZE (1ULL << 51) / * report large preferred
* I / O size in stat ( ) */
# define XFS_FEAT_WSYNC (1ULL << 52) /* synchronous metadata ops */
# define XFS_FEAT_DIRSYNC (1ULL << 53) /* synchronous directory ops */
# define XFS_FEAT_DISCARD (1ULL << 54) /* discard unused blocks */
# define XFS_FEAT_GRPID (1ULL << 55) /* group-ID assigned from directory */
# define XFS_FEAT_SMALL_INUMS (1ULL << 56) /* user wants 32bit inodes */
# define XFS_FEAT_IKEEP (1ULL << 57) /* keep empty inode clusters*/
# define XFS_FEAT_SWALLOC (1ULL << 58) /* stripe width allocation */
# define XFS_FEAT_FILESTREAMS (1ULL << 59) /* use filestreams allocator */
# define XFS_FEAT_DAX_ALWAYS (1ULL << 60) /* DAX always enabled */
# define XFS_FEAT_DAX_NEVER (1ULL << 61) /* DAX never enabled */
# define XFS_FEAT_NORECOVERY (1ULL << 62) /* no recovery - dirty fs */
# define XFS_FEAT_NOUUID (1ULL << 63) /* ignore uuid during mount */
2021-08-19 04:46:26 +03:00
# define __XFS_HAS_FEAT(name, NAME) \
static inline bool xfs_has_ # # name ( struct xfs_mount * mp ) \
{ \
return mp - > m_features & XFS_FEAT_ # # NAME ; \
}
/* Some features can be added dynamically so they need a set wrapper, too. */
# define __XFS_ADD_FEAT(name, NAME) \
__XFS_HAS_FEAT ( name , NAME ) ; \
static inline void xfs_add_ # # name ( struct xfs_mount * mp ) \
{ \
mp - > m_features | = XFS_FEAT_ # # NAME ; \
xfs_sb_version_add # # name ( & mp - > m_sb ) ; \
}
2021-08-19 04:46:51 +03:00
/* Superblock features */
2021-08-19 04:46:26 +03:00
__XFS_ADD_FEAT ( attr , ATTR )
__XFS_HAS_FEAT ( nlink , NLINK )
__XFS_ADD_FEAT ( quota , QUOTA )
__XFS_HAS_FEAT ( align , ALIGN )
__XFS_HAS_FEAT ( dalign , DALIGN )
__XFS_HAS_FEAT ( logv2 , LOGV2 )
__XFS_HAS_FEAT ( sector , SECTOR )
__XFS_HAS_FEAT ( extflg , EXTFLG )
__XFS_HAS_FEAT ( asciici , ASCIICI )
__XFS_HAS_FEAT ( lazysbcount , LAZYSBCOUNT )
__XFS_ADD_FEAT ( attr2 , ATTR2 )
__XFS_HAS_FEAT ( parent , PARENT )
__XFS_ADD_FEAT ( projid32 , PROJID32 )
__XFS_HAS_FEAT ( crc , CRC )
__XFS_HAS_FEAT ( v3inodes , V3INODES )
__XFS_HAS_FEAT ( pquotino , PQUOTINO )
__XFS_HAS_FEAT ( ftype , FTYPE )
__XFS_HAS_FEAT ( finobt , FINOBT )
__XFS_HAS_FEAT ( rmapbt , RMAPBT )
__XFS_HAS_FEAT ( reflink , REFLINK )
__XFS_HAS_FEAT ( sparseinodes , SPINODES )
__XFS_HAS_FEAT ( metauuid , META_UUID )
__XFS_HAS_FEAT ( realtime , REALTIME )
2021-08-19 04:46:37 +03:00
__XFS_HAS_FEAT ( inobtcounts , INOBTCNT )
__XFS_HAS_FEAT ( bigtime , BIGTIME )
__XFS_HAS_FEAT ( needsrepair , NEEDSREPAIR )
2021-11-16 11:39:32 +03:00
__XFS_HAS_FEAT ( large_extent_counts , NREXT64 )
2021-08-19 04:46:26 +03:00
2021-08-19 04:46:51 +03:00
/*
* Mount features
*
2021-08-19 04:46:52 +03:00
* These do not change dynamically - features that can come and go , such as 32
* bit inodes and read - only state , are kept as operational state rather than
2021-08-19 04:46:51 +03:00
* features .
*/
__XFS_HAS_FEAT ( noattr2 , NOATTR2 )
__XFS_HAS_FEAT ( noalign , NOALIGN )
__XFS_HAS_FEAT ( allocsize , ALLOCSIZE )
__XFS_HAS_FEAT ( large_iosize , LARGE_IOSIZE )
__XFS_HAS_FEAT ( wsync , WSYNC )
__XFS_HAS_FEAT ( dirsync , DIRSYNC )
__XFS_HAS_FEAT ( discard , DISCARD )
__XFS_HAS_FEAT ( grpid , GRPID )
__XFS_HAS_FEAT ( small_inums , SMALL_INUMS )
__XFS_HAS_FEAT ( ikeep , IKEEP )
__XFS_HAS_FEAT ( swalloc , SWALLOC )
__XFS_HAS_FEAT ( filestreams , FILESTREAMS )
__XFS_HAS_FEAT ( dax_always , DAX_ALWAYS )
__XFS_HAS_FEAT ( dax_never , DAX_NEVER )
__XFS_HAS_FEAT ( norecovery , NORECOVERY )
__XFS_HAS_FEAT ( nouuid , NOUUID )
2005-04-17 02:20:36 +04:00
/*
2021-08-19 04:46:52 +03:00
* Operational mount state flags
*
* Use these with atomic bit ops only !
2005-04-17 02:20:36 +04:00
*/
2021-08-19 04:46:52 +03:00
# define XFS_OPSTATE_UNMOUNTING 0 /* filesystem is unmounting */
# define XFS_OPSTATE_CLEAN 1 /* mount was clean */
# define XFS_OPSTATE_SHUTDOWN 2 /* stop all fs operations */
# define XFS_OPSTATE_INODE32 3 /* inode32 allocator active */
# define XFS_OPSTATE_READONLY 4 /* read-only fs */
2015-06-04 02:19:18 +03:00
2021-08-06 21:05:39 +03:00
/*
* If set , inactivation worker threads will be scheduled to process queued
* inodegc work . If not , queued inodes remain in memory waiting to be
* processed .
*/
2021-08-19 04:46:52 +03:00
# define XFS_OPSTATE_INODEGC_ENABLED 5
2021-08-06 21:05:42 +03:00
/*
* If set , background speculative prealloc gc worker threads will be scheduled
* to process queued blockgc work . If not , inodes retain their preallocations
* until explicitly deleted .
*/
2021-08-19 04:46:52 +03:00
# define XFS_OPSTATE_BLOCKGC_ENABLED 6
2021-08-06 21:05:39 +03:00
2022-05-27 03:31:34 +03:00
/* Kernel has logged a warning about online fsck being used on this fs. */
# define XFS_OPSTATE_WARNED_SCRUB 7
/* Kernel has logged a warning about shrink being used on this fs. */
# define XFS_OPSTATE_WARNED_SHRINK 8
2022-05-27 03:32:07 +03:00
/* Kernel has logged a warning about logged xattr updates being used. */
# define XFS_OPSTATE_WARNED_LARP 9
2023-09-11 18:39:08 +03:00
/* Mount time quotacheck is running */
# define XFS_OPSTATE_QUOTACHECK_RUNNING 10
2022-05-27 03:31:34 +03:00
2021-08-06 21:05:39 +03:00
# define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ # # name ( struct xfs_mount * mp ) \
{ \
return test_bit ( XFS_OPSTATE_ # # NAME , & mp - > m_opstate ) ; \
} \
static inline bool xfs_clear_ # # name ( struct xfs_mount * mp ) \
{ \
return test_and_clear_bit ( XFS_OPSTATE_ # # NAME , & mp - > m_opstate ) ; \
} \
static inline bool xfs_set_ # # name ( struct xfs_mount * mp ) \
{ \
return test_and_set_bit ( XFS_OPSTATE_ # # NAME , & mp - > m_opstate ) ; \
}
2021-08-19 04:46:52 +03:00
__XFS_IS_OPSTATE ( unmounting , UNMOUNTING )
__XFS_IS_OPSTATE ( clean , CLEAN )
__XFS_IS_OPSTATE ( shutdown , SHUTDOWN )
__XFS_IS_OPSTATE ( inode32 , INODE32 )
__XFS_IS_OPSTATE ( readonly , READONLY )
2021-08-06 21:05:39 +03:00
__XFS_IS_OPSTATE ( inodegc_enabled , INODEGC_ENABLED )
2021-08-06 21:05:42 +03:00
__XFS_IS_OPSTATE ( blockgc_enabled , BLOCKGC_ENABLED )
2023-09-11 18:39:08 +03:00
# ifdef CONFIG_XFS_QUOTA
__XFS_IS_OPSTATE ( quotacheck_running , QUOTACHECK_RUNNING )
# else
# define xfs_is_quotacheck_running(mp) (false)
# endif
2021-08-06 21:05:39 +03:00
2022-05-27 03:31:34 +03:00
static inline bool
xfs_should_warn ( struct xfs_mount * mp , long nr )
{
return ! test_and_set_bit ( nr , & mp - > m_opstate ) ;
}
2021-08-06 21:05:39 +03:00
# define XFS_OPSTATE_STRINGS \
2021-08-19 04:46:52 +03:00
{ ( 1UL < < XFS_OPSTATE_UNMOUNTING ) , " unmounting " } , \
{ ( 1UL < < XFS_OPSTATE_CLEAN ) , " clean " } , \
{ ( 1UL < < XFS_OPSTATE_SHUTDOWN ) , " shutdown " } , \
{ ( 1UL < < XFS_OPSTATE_INODE32 ) , " inode32 " } , \
{ ( 1UL < < XFS_OPSTATE_READONLY ) , " read_only " } , \
2021-08-06 21:05:42 +03:00
{ ( 1UL < < XFS_OPSTATE_INODEGC_ENABLED ) , " inodegc " } , \
2022-05-27 03:31:34 +03:00
{ ( 1UL < < XFS_OPSTATE_BLOCKGC_ENABLED ) , " blockgc " } , \
{ ( 1UL < < XFS_OPSTATE_WARNED_SCRUB ) , " wscrub " } , \
2022-05-27 03:32:07 +03:00
{ ( 1UL < < XFS_OPSTATE_WARNED_SHRINK ) , " wshrink " } , \
2023-09-11 18:39:08 +03:00
{ ( 1UL < < XFS_OPSTATE_WARNED_LARP ) , " wlarp " } , \
{ ( 1UL < < XFS_OPSTATE_QUOTACHECK_RUNNING ) , " quotacheck " }
2021-08-06 21:05:39 +03:00
2005-04-17 02:20:36 +04:00
/*
2005-05-06 00:28:29 +04:00
* Max and min values for mount - option defined I / O
* preallocation sizes .
2005-04-17 02:20:36 +04:00
*/
2005-05-06 00:28:29 +04:00
# define XFS_MAX_IO_LOG 30 /* 1G */
2005-04-17 02:20:36 +04:00
# define XFS_MIN_IO_LOG PAGE_SHIFT
2022-04-21 03:47:38 +03:00
void xfs_do_force_shutdown ( struct xfs_mount * mp , uint32_t flags , char * fname ,
2007-08-30 11:20:39 +04:00
int lnnum ) ;
2005-04-17 02:20:36 +04:00
# define xfs_force_shutdown(m,f) \
2007-08-30 11:20:39 +04:00
xfs_do_force_shutdown ( m , f , __FILE__ , __LINE__ )
2005-04-17 02:20:36 +04:00
2022-04-21 03:47:38 +03:00
# define SHUTDOWN_META_IO_ERROR (1u << 0) /* write attempt to metadata failed */
# define SHUTDOWN_LOG_IO_ERROR (1u << 1) /* write attempt to the log failed */
# define SHUTDOWN_FORCE_UMOUNT (1u << 2) /* shutdown from a forced unmount */
# define SHUTDOWN_CORRUPT_INCORE (1u << 3) /* corrupt in-memory structures */
2022-06-03 08:37:30 +03:00
# define SHUTDOWN_CORRUPT_ONDISK (1u << 4) /* corrupt metadata on device */
2023-06-01 12:44:55 +03:00
# define SHUTDOWN_DEVICE_REMOVED (1u << 5) /* device removed underneath us */
2008-11-28 06:23:36 +03:00
2021-08-11 03:00:54 +03:00
# define XFS_SHUTDOWN_STRINGS \
{ SHUTDOWN_META_IO_ERROR , " metadata_io " } , \
{ SHUTDOWN_LOG_IO_ERROR , " log_io " } , \
{ SHUTDOWN_FORCE_UMOUNT , " force_umount " } , \
2023-06-01 12:44:55 +03:00
{ SHUTDOWN_CORRUPT_INCORE , " corruption " } , \
{ SHUTDOWN_DEVICE_REMOVED , " device_removed " }
2021-08-11 03:00:54 +03:00
2005-04-17 02:20:36 +04:00
/*
* Flags for xfs_mountfs
*/
2006-03-31 07:04:17 +04:00
# define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
2005-04-17 02:20:36 +04:00
2005-11-02 06:38:42 +03:00
static inline xfs_agnumber_t
xfs_daddr_to_agno ( struct xfs_mount * mp , xfs_daddr_t d )
2005-04-17 02:20:36 +04:00
{
2017-04-20 01:19:32 +03:00
xfs_rfsblock_t ld = XFS_BB_TO_FSBT ( mp , d ) ;
2005-11-02 06:38:42 +03:00
do_div ( ld , mp - > m_sb . sb_agblocks ) ;
return ( xfs_agnumber_t ) ld ;
2005-04-17 02:20:36 +04:00
}
2005-11-02 06:38:42 +03:00
static inline xfs_agblock_t
xfs_daddr_to_agbno ( struct xfs_mount * mp , xfs_daddr_t d )
2005-04-17 02:20:36 +04:00
{
2017-04-20 01:19:32 +03:00
xfs_rfsblock_t ld = XFS_BB_TO_FSBT ( mp , d ) ;
2005-11-02 06:38:42 +03:00
return ( xfs_agblock_t ) do_div ( ld , mp - > m_sb . sb_agblocks ) ;
2005-04-17 02:20:36 +04:00
}
2015-11-03 05:06:34 +03:00
extern void xfs_uuid_table_free ( void ) ;
2017-06-16 21:00:05 +03:00
extern uint64_t xfs_default_resblks ( xfs_mount_t * mp ) ;
2008-08-13 10:49:32 +04:00
extern int xfs_mountfs ( xfs_mount_t * mp ) ;
2008-08-13 10:49:57 +04:00
extern void xfs_unmountfs ( xfs_mount_t * ) ;
2015-02-23 13:24:37 +03:00
2021-08-06 21:05:40 +03:00
/*
* Deltas for the block count can vary from 1 to very large , but lock contention
* only occurs on frequent small block count updates such as in the delayed
* allocation path for buffered writes ( page a time updates ) . Hence we set
* a large batch count ( 1024 ) to minimise global counter updates except when
* we get near to ENOSPC and we have to be very accurate with our updates .
*/
# define XFS_FDBLOCKS_BATCH 1024
xfs: don't include bnobt blocks when reserving free block pool
xfs_reserve_blocks controls the size of the user-visible free space
reserve pool. Given the difference between the current and requested
pool sizes, it will try to reserve free space from fdblocks. However,
the amount requested from fdblocks is also constrained by the amount of
space that we think xfs_mod_fdblocks will give us. If we forget to
subtract m_allocbt_blks before calling xfs_mod_fdblocks, it will will
return ENOSPC and we'll hang the kernel at mount due to the infinite
loop.
In commit fd43cf600cf6, we decided that xfs_mod_fdblocks should not hand
out the "free space" used by the free space btrees, because some portion
of the free space btrees hold in reserve space for future btree
expansion. Unfortunately, xfs_reserve_blocks' estimation of the number
of blocks that it could request from xfs_mod_fdblocks was not updated to
include m_allocbt_blks, so if space is extremely low, the caller hangs.
Fix this by creating a function to estimate the number of blocks that
can be reserved from fdblocks, which needs to exclude the set-aside and
m_allocbt_blks.
Found by running xfs/306 (which formats a single-AG 20MB filesystem)
with an fstests configuration that specifies a 1k blocksize and a
specially crafted log size that will consume 7/8 of the space (17920
blocks, specifically) in that AG.
Cc: Brian Foster <bfoster@redhat.com>
Fixes: fd43cf600cf6 ("xfs: set aside allocation btree blocks from block reservation")
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2022-03-16 21:54:18 +03:00
/*
* Estimate the amount of free space that is not available to userspace and is
* not explicitly reserved from the incore fdblocks . This includes :
*
* - The minimum number of blocks needed to support splitting a bmap btree
* - The blocks currently in use by the freespace btrees because they record
* the actual blocks that will fill per - AG metadata space reservations
*/
static inline uint64_t
xfs_fdblocks_unavailable (
struct xfs_mount * mp )
{
return mp - > m_alloc_set_aside + atomic64_read ( & mp - > m_allocbt_blks ) ;
}
2022-04-11 23:49:42 +03:00
int xfs_mod_freecounter ( struct xfs_mount * mp , struct percpu_counter * counter ,
int64_t delta , bool rsvd ) ;
static inline int
xfs_mod_fdblocks ( struct xfs_mount * mp , int64_t delta , bool reserved )
{
return xfs_mod_freecounter ( mp , & mp - > m_fdblocks , delta , reserved ) ;
}
static inline int
xfs_mod_frextents ( struct xfs_mount * mp , int64_t delta )
{
return xfs_mod_freecounter ( mp , & mp - > m_frextents , delta , false ) ;
}
2015-02-23 13:22:54 +03:00
2006-03-31 07:04:17 +04:00
extern int xfs_readsb ( xfs_mount_t * , int ) ;
2005-04-17 02:20:36 +04:00
extern void xfs_freesb ( xfs_mount_t * ) ;
2014-11-28 06:02:59 +03:00
extern bool xfs_fs_writable ( struct xfs_mount * mp , int level ) ;
2017-06-16 21:00:05 +03:00
extern int xfs_sb_validate_fsb_count ( struct xfs_sb * , uint64_t ) ;
2005-04-17 02:20:36 +04:00
2010-02-17 22:36:13 +03:00
extern int xfs_dev_is_read_only ( struct xfs_mount * , char * ) ;
2011-01-04 03:35:03 +03:00
extern void xfs_set_low_space_thresholds ( struct xfs_mount * ) ;
2015-11-03 04:27:22 +03:00
int xfs_zero_extent ( struct xfs_inode * ip , xfs_fsblock_t start_fsb ,
xfs_off_t count_fsb ) ;
2016-05-18 04:05:33 +03:00
struct xfs_error_cfg * xfs_error_get_cfg ( struct xfs_mount * mp ,
int error_class , int error ) ;
2018-07-20 19:28:40 +03:00
void xfs_force_summary_recalc ( struct xfs_mount * mp ) ;
2021-08-08 18:27:12 +03:00
int xfs_add_incompat_log_feature ( struct xfs_mount * mp , uint32_t feature ) ;
bool xfs_clear_incompat_log_features ( struct xfs_mount * mp ) ;
2019-04-26 04:26:22 +03:00
void xfs_mod_delalloc ( struct xfs_mount * mp , int64_t delta ) ;
2016-05-18 04:05:33 +03:00
2005-04-17 02:20:36 +04:00
# endif /* __XFS_MOUNT_H__ */