2018-06-06 05:42:14 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2005-11-02 06:59:41 +03:00
* Copyright ( c ) 2000 - 2005 Silicon Graphics , Inc .
* All Rights Reserved .
2005-04-17 02:20:36 +04:00
*/
# ifndef __XFS_DQUOT_H__
# define __XFS_DQUOT_H__
/*
* Dquots are structures that hold quota information about a user or a group ,
* much like inodes are for files . In fact , dquots share many characteristics
* with inodes . However , dquots can also be a centralized resource , relative
* to a collection of inodes . In this respect , dquots share some characteristics
* of the superblock .
* XFS dquots exploit both those in its algorithms . They make every attempt
* to not be a bottleneck when quotas are on and have minimal impact , if any ,
* when quotas are off .
*/
struct xfs_mount ;
struct xfs_trans ;
2013-03-18 18:51:46 +04:00
enum {
XFS_QLOWSP_1_PCNT = 0 ,
XFS_QLOWSP_3_PCNT ,
XFS_QLOWSP_5_PCNT ,
XFS_QLOWSP_MAX
} ;
2020-07-14 20:37:30 +03:00
struct xfs_dquot_res {
/* Total resources allocated and reserved. */
xfs_qcnt_t reserved ;
2020-07-14 20:37:31 +03:00
2020-07-14 20:37:31 +03:00
/* Total resources allocated. */
xfs_qcnt_t count ;
2020-07-14 20:37:31 +03:00
/* Absolute and preferred limits. */
xfs_qcnt_t hardlimit ;
xfs_qcnt_t softlimit ;
2020-07-14 20:37:31 +03:00
2020-07-14 20:37:32 +03:00
/*
* For root dquots , this is the default grace period , in seconds .
* Otherwise , this is when the quota grace period expires ,
* in seconds since the Unix epoch .
*/
time64_t timer ;
2020-07-14 20:37:30 +03:00
} ;
2021-08-06 21:05:40 +03:00
static inline bool
xfs_dquot_res_over_limits (
const struct xfs_dquot_res * qres )
{
if ( ( qres - > softlimit & & qres - > softlimit < qres - > reserved ) | |
( qres - > hardlimit & & qres - > hardlimit < qres - > reserved ) )
return true ;
return false ;
}
2005-04-17 02:20:36 +04:00
/*
* The incore dquot structure
*/
2019-11-13 04:04:02 +03:00
struct xfs_dquot {
struct list_head q_lru ;
struct xfs_mount * q_mount ;
2020-07-16 03:53:43 +03:00
xfs_dqtype_t q_type ;
2020-07-14 20:37:13 +03:00
uint16_t q_flags ;
2020-07-14 20:37:30 +03:00
xfs_dqid_t q_id ;
2019-11-13 04:04:02 +03:00
uint q_nrefs ;
int q_bufoffset ;
2020-07-14 20:37:30 +03:00
xfs_daddr_t q_blkno ;
2019-11-13 04:04:02 +03:00
xfs_fileoff_t q_fileoffset ;
2020-07-14 20:37:30 +03:00
struct xfs_dquot_res q_blk ; /* regular blocks */
struct xfs_dquot_res q_ino ; /* inodes */
struct xfs_dquot_res q_rtb ; /* realtime blocks */
2019-11-13 04:04:26 +03:00
struct xfs_dq_logitem q_logitem ;
2020-07-14 20:37:30 +03:00
2019-11-13 04:04:02 +03:00
xfs_qcnt_t q_prealloc_lo_wmark ;
xfs_qcnt_t q_prealloc_hi_wmark ;
int64_t q_low_space [ XFS_QLOWSP_MAX ] ;
struct mutex q_qlock ;
struct completion q_flush ;
atomic_t q_pincount ;
struct wait_queue_head q_pinwait ;
} ;
2005-04-17 02:20:36 +04:00
2009-01-19 04:03:19 +03:00
/*
tree-wide: fix assorted typos all over the place
That is "success", "unknown", "through", "performance", "[re|un]mapping"
, "access", "default", "reasonable", "[con]currently", "temperature"
, "channel", "[un]used", "application", "example","hierarchy", "therefore"
, "[over|under]flow", "contiguous", "threshold", "enough" and others.
Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2009-11-14 18:09:05 +03:00
* Lock hierarchy for q_qlock :
2009-01-19 04:03:19 +03:00
* XFS_QLOCK_NORMAL is the implicit default ,
2019-11-13 04:04:02 +03:00
* XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
2009-01-19 04:03:19 +03:00
*/
enum {
XFS_QLOCK_NORMAL = 0 ,
XFS_QLOCK_NESTED ,
} ;
2005-04-17 02:20:36 +04:00
/*
2019-11-13 04:04:02 +03:00
* Manage the q_flush completion queue embedded in the dquot . This completion
2008-08-13 10:41:43 +04:00
* queue synchronizes processes attempting to flush the in - core dquot back to
* disk .
2005-04-17 02:20:36 +04:00
*/
2019-11-13 04:04:02 +03:00
static inline void xfs_dqflock ( struct xfs_dquot * dqp )
2008-08-13 10:41:43 +04:00
{
wait_for_completion ( & dqp - > q_flush ) ;
}
2019-11-13 04:04:02 +03:00
static inline bool xfs_dqflock_nowait ( struct xfs_dquot * dqp )
2008-08-13 10:41:43 +04:00
{
return try_wait_for_completion ( & dqp - > q_flush ) ;
}
2019-11-13 04:04:02 +03:00
static inline void xfs_dqfunlock ( struct xfs_dquot * dqp )
2008-08-13 10:41:43 +04:00
{
complete ( & dqp - > q_flush ) ;
}
2005-04-17 02:20:36 +04:00
2011-12-07 01:58:14 +04:00
static inline int xfs_dqlock_nowait ( struct xfs_dquot * dqp )
{
return mutex_trylock ( & dqp - > q_qlock ) ;
}
static inline void xfs_dqlock ( struct xfs_dquot * dqp )
{
mutex_lock ( & dqp - > q_qlock ) ;
}
2012-02-20 06:31:22 +04:00
static inline void xfs_dqunlock ( struct xfs_dquot * dqp )
2011-12-07 01:58:14 +04:00
{
mutex_unlock ( & dqp - > q_qlock ) ;
}
2020-07-16 03:47:13 +03:00
static inline int
xfs_dquot_type ( const struct xfs_dquot * dqp )
{
2020-07-16 03:53:43 +03:00
return dqp - > q_type & XFS_DQTYPE_REC_MASK ;
2020-07-16 03:47:13 +03:00
}
2020-07-16 03:53:43 +03:00
static inline int xfs_this_quota_on ( struct xfs_mount * mp , xfs_dqtype_t type )
2012-01-23 21:31:25 +04:00
{
2020-07-16 03:52:57 +03:00
switch ( type ) {
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_USER :
2012-01-23 21:31:25 +04:00
return XFS_IS_UQUOTA_ON ( mp ) ;
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_GROUP :
2013-07-11 09:00:40 +04:00
return XFS_IS_GQUOTA_ON ( mp ) ;
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_PROJ :
2013-07-11 09:00:40 +04:00
return XFS_IS_PQUOTA_ON ( mp ) ;
2012-01-23 21:31:25 +04:00
default :
return 0 ;
}
}
2020-07-16 03:53:43 +03:00
static inline struct xfs_dquot * xfs_inode_dquot (
struct xfs_inode * ip ,
xfs_dqtype_t type )
2012-01-23 21:31:30 +04:00
{
2020-07-16 03:52:57 +03:00
switch ( type ) {
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_USER :
2012-01-23 21:31:30 +04:00
return ip - > i_udquot ;
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_GROUP :
2012-01-23 21:31:30 +04:00
return ip - > i_gdquot ;
2020-07-16 03:42:36 +03:00
case XFS_DQTYPE_PROJ :
2013-07-11 09:00:40 +04:00
return ip - > i_pdquot ;
2012-01-23 21:31:30 +04:00
default :
return NULL ;
}
}
2020-07-16 03:48:31 +03:00
/* Decide if the dquot's limits are actually being enforced. */
static inline bool
xfs_dquot_is_enforced (
const struct xfs_dquot * dqp )
{
2020-07-16 03:51:47 +03:00
switch ( xfs_dquot_type ( dqp ) ) {
2020-07-16 03:48:31 +03:00
case XFS_DQTYPE_USER :
return XFS_IS_UQUOTA_ENFORCED ( dqp - > q_mount ) ;
case XFS_DQTYPE_GROUP :
return XFS_IS_GQUOTA_ENFORCED ( dqp - > q_mount ) ;
case XFS_DQTYPE_PROJ :
return XFS_IS_PQUOTA_ENFORCED ( dqp - > q_mount ) ;
}
ASSERT ( 0 ) ;
return false ;
}
2014-07-24 13:49:28 +04:00
/*
* Check whether a dquot is under low free space conditions . We assume the quota
* is enabled and enforced .
*/
static inline bool xfs_dquot_lowsp ( struct xfs_dquot * dqp )
{
int64_t freesp ;
2020-07-14 20:37:31 +03:00
freesp = dqp - > q_blk . hardlimit - dqp - > q_blk . reserved ;
2014-07-24 13:49:28 +04:00
if ( freesp < dqp - > q_low_space [ XFS_QLOWSP_1_PCNT ] )
return true ;
return false ;
}
2020-07-14 20:37:22 +03:00
void xfs_dquot_to_disk ( struct xfs_disk_dquot * ddqp , struct xfs_dquot * dqp ) ;
2009-02-09 10:39:24 +03:00
# define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
2020-07-14 20:37:13 +03:00
# define XFS_DQ_IS_DIRTY(dqp) ((dqp)->q_flags & XFS_DQFLAG_DIRTY)
2005-04-17 02:20:36 +04:00
2019-11-13 04:04:02 +03:00
void xfs_qm_dqdestroy ( struct xfs_dquot * dqp ) ;
int xfs_qm_dqflush ( struct xfs_dquot * dqp , struct xfs_buf * * bpp ) ;
void xfs_qm_dqunpin_wait ( struct xfs_dquot * dqp ) ;
2020-07-14 20:37:33 +03:00
void xfs_qm_adjust_dqtimers ( struct xfs_dquot * d ) ;
void xfs_qm_adjust_dqlimits ( struct xfs_dquot * d ) ;
xfs_dqid_t xfs_qm_id_for_quotatype ( struct xfs_inode * ip ,
2020-07-16 03:53:43 +03:00
xfs_dqtype_t type ) ;
2019-11-13 04:04:02 +03:00
int xfs_qm_dqget ( struct xfs_mount * mp , xfs_dqid_t id ,
2020-07-16 03:53:43 +03:00
xfs_dqtype_t type , bool can_alloc ,
struct xfs_dquot * * dqpp ) ;
int xfs_qm_dqget_inode ( struct xfs_inode * ip , xfs_dqtype_t type ,
bool can_alloc , struct xfs_dquot * * dqpp ) ;
2019-11-13 04:04:02 +03:00
int xfs_qm_dqget_next ( struct xfs_mount * mp , xfs_dqid_t id ,
2020-07-16 03:53:43 +03:00
xfs_dqtype_t type , struct xfs_dquot * * dqpp ) ;
2019-11-13 04:04:02 +03:00
int xfs_qm_dqget_uncached ( struct xfs_mount * mp ,
2020-07-16 03:53:43 +03:00
xfs_dqid_t id , xfs_dqtype_t type ,
struct xfs_dquot * * dqpp ) ;
2019-11-13 04:04:02 +03:00
void xfs_qm_dqput ( struct xfs_dquot * dqp ) ;
2011-12-07 01:58:14 +04:00
2019-11-13 04:04:02 +03:00
void xfs_dqlock2 ( struct xfs_dquot * , struct xfs_dquot * ) ;
2005-04-17 02:20:36 +04:00
2019-11-13 04:04:02 +03:00
void xfs_dquot_set_prealloc_limits ( struct xfs_dquot * ) ;
2013-03-18 18:51:46 +04:00
2011-12-07 01:58:22 +04:00
static inline struct xfs_dquot * xfs_qm_dqhold ( struct xfs_dquot * dqp )
{
xfs_dqlock ( dqp ) ;
dqp - > q_nrefs + + ;
xfs_dqunlock ( dqp ) ;
return dqp ;
}
2020-08-17 19:58:36 +03:00
time64_t xfs_dquot_set_timeout ( struct xfs_mount * mp , time64_t timeout ) ;
2020-08-17 19:58:42 +03:00
time64_t xfs_dquot_set_grace_period ( time64_t grace ) ;
2020-08-17 19:58:36 +03:00
2023-12-15 21:03:45 +03:00
void xfs_qm_init_dquot_blk ( struct xfs_trans * tp , xfs_dqid_t id , xfs_dqtype_t
type , struct xfs_buf * bp ) ;
2005-04-17 02:20:36 +04:00
# endif /* __XFS_DQUOT_H__ */