2005-04-17 02:20:36 +04:00
/*
* Implementation of the diskquota system for the LINUX operating system . QUOTA
* is implemented using the BSD system call interface as the means of
* communication with the user level . This file contains the generic routines
* called by the different filesystems on allocation of an inode or block .
* These routines take care of the administration needed to have a consistent
* diskquota tracking system . The ideas of both user and group quotas are based
* on the Melbourne quota system as used on BSD derived systems . The internal
* implementation is based on one of the several variants of the LINUX
* inode - subsystem with added complexity of the diskquota system .
*
* Author : Marco van Wieringen < mvw @ planets . elm . net >
*
* Fixes : Dmitry Gorodchanin < pgmdsg @ ibi . com > , 11 Feb 96
*
* Revised list management to avoid races
* - - Bill Hawes , < whawes @ star . net > , 9 / 98
*
* Fixed races in dquot_transfer ( ) , dqget ( ) and dquot_alloc_ . . . ( ) .
* As the consequence the locking was moved from dquot_decr_ . . . ( ) ,
* dquot_incr_ . . . ( ) to calling functions .
* invalidate_dquots ( ) now writes modified dquots .
* Serialized quota_off ( ) and quota_on ( ) for mount point .
* Fixed a few bugs in grow_dquots ( ) .
* Fixed deadlock in write_dquot ( ) - we no longer account quotas on
* quota files
* remove_dquot_ref ( ) moved to inode . c - it now traverses through inodes
* add_dquot_ref ( ) restarts after blocking
* Added check for bogus uid and fixed check for group in quotactl .
* Jan Kara , < jack @ suse . cz > , sponsored by SuSE CR , 10 - 11 / 99
*
* Used struct list_head instead of own list struct
* Invalidation of referenced dquots is no longer possible
* Improved free_dquots list management
* Quota and i_blocks are now updated in one place to avoid races
* Warnings are now delayed so we won ' t block in critical section
* Write updated not to require dquot lock
* Jan Kara , < jack @ suse . cz > , 9 / 2000
*
* Added dynamic quota structure allocation
* Jan Kara < jack @ suse . cz > 12 / 2000
*
* Rewritten quota interface . Implemented new quota format and
* formats registering .
* Jan Kara , < jack @ suse . cz > , 2001 , 2002
*
* New SMP locking .
* Jan Kara , < jack @ suse . cz > , 10 / 2002
*
* Added journalled quota support , fix lock inversion problems
* Jan Kara , < jack @ suse . cz > , 2003 , 2004
*
* ( C ) Copyright 1994 - 1997 Marco van Wieringen
*/
# include <linux/errno.h>
# include <linux/kernel.h>
# include <linux/fs.h>
# include <linux/mount.h>
# include <linux/mm.h>
# include <linux/time.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/fcntl.h>
# include <linux/stat.h>
# include <linux/tty.h>
# include <linux/file.h>
# include <linux/slab.h>
# include <linux/sysctl.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/proc_fs.h>
# include <linux/security.h>
2012-02-13 07:58:52 +04:00
# include <linux/sched.h>
2017-02-02 19:54:15 +03:00
# include <linux/cred.h>
2005-04-17 02:20:36 +04:00
# include <linux/kmod.h>
# include <linux/namei.h>
2006-01-11 23:17:46 +03:00
# include <linux/capability.h>
2005-11-07 11:59:35 +03:00
# include <linux/quotaops.h>
2011-03-22 14:23:40 +03:00
# include "../internal.h" /* ugh */
2005-04-17 02:20:36 +04:00
2012-05-28 19:40:17 +04:00
# include <linux/uaccess.h>
2005-04-17 02:20:36 +04:00
/*
2009-01-12 19:23:05 +03:00
* There are three quota SMP locks . dq_list_lock protects all lists with quotas
2010-04-26 20:03:33 +04:00
* and quota formats .
2009-01-12 19:23:05 +03:00
* dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
* also guards consistency of dquot - > dq_dqb with inode - > i_blocks , i_bytes .
2005-04-17 02:20:36 +04:00
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
2009-01-12 19:23:05 +03:00
* in inode_add_bytes ( ) and inode_sub_bytes ( ) . dq_state_lock protects
* modifications of quota state ( on quotaon and quotaoff ) and readers who care
* about latest values take it as well .
2005-04-17 02:20:36 +04:00
*
2009-01-12 19:23:05 +03:00
* The spinlock ordering is hence : dq_data_lock > dq_list_lock > i_lock ,
* dq_list_lock > dq_state_lock
2005-04-17 02:20:36 +04:00
*
* Note that some things ( eg . sb pointer , type , id ) doesn ' t change during
* the life of the dquot structure and so needn ' t to be protected by a lock
*
2014-06-04 08:23:19 +04:00
* Operation accessing dquots via inode pointers are protected by dquot_srcu .
* Operation of reading pointer needs srcu_read_lock ( & dquot_srcu ) , and
* synchronize_srcu ( & dquot_srcu ) is called after clearing pointers from
* inode and before dropping dquot references to avoid use of dquots after
* they are freed . dq_data_lock is used to serialize the pointer setting and
* clearing operations .
2010-01-06 19:20:35 +03:00
* Special care needs to be taken about S_NOQUOTA inode flag ( marking that
* inode is a quota file ) . Functions adding pointers from inode to dquots have
2014-06-04 08:23:19 +04:00
* to check this flag under dq_data_lock and then ( if S_NOQUOTA is not set ) they
* have to do all pointer modifications before dropping dq_data_lock . This makes
2010-01-06 19:20:35 +03:00
* sure they cannot race with quotaon which first sets S_NOQUOTA flag and
* then drops all pointers to dquots from an inode .
2005-04-17 02:20:36 +04:00
*
2017-06-08 16:16:41 +03:00
* Each dquot has its dq_lock mutex . Dquot is locked when it is being read to
* memory ( or space for it is being allocated ) on the first dqget ( ) , when it is
* being written out , and when it is being released on the last dqput ( ) . The
* allocation and release operations are serialized by the dq_lock and by
* checking the use count in dquot_release ( ) .
2005-04-17 02:20:36 +04:00
*
* Lock ordering ( including related VFS locks ) is the following :
2017-06-08 15:39:48 +03:00
* s_umount > i_mutex > journal_lock > dquot - > dq_lock > dqio_sem
2005-04-17 02:20:36 +04:00
*/
2009-01-26 17:32:46 +03:00
static __cacheline_aligned_in_smp DEFINE_SPINLOCK ( dq_list_lock ) ;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK ( dq_state_lock ) ;
__cacheline_aligned_in_smp DEFINE_SPINLOCK ( dq_data_lock ) ;
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dq_data_lock ) ;
2014-06-04 08:23:19 +04:00
DEFINE_STATIC_SRCU ( dquot_srcu ) ;
2005-04-17 02:20:36 +04:00
2017-08-02 18:18:50 +03:00
static DECLARE_WAIT_QUEUE_HEAD ( dquot_ref_wq ) ;
2010-07-20 18:54:43 +04:00
void __quota_error ( struct super_block * sb , const char * func ,
2010-11-24 05:49:54 +03:00
const char * fmt , . . . )
2010-07-20 18:54:43 +04:00
{
if ( printk_ratelimit ( ) ) {
2010-11-24 05:49:54 +03:00
va_list args ;
struct va_format vaf ;
2010-07-20 18:54:43 +04:00
va_start ( args , fmt ) ;
2010-11-24 05:49:54 +03:00
vaf . fmt = fmt ;
vaf . va = & args ;
printk ( KERN_ERR " Quota error (device %s): %s: %pV \n " ,
sb - > s_id , func , & vaf ) ;
2010-07-20 18:54:43 +04:00
va_end ( args ) ;
}
}
EXPORT_SYMBOL ( __quota_error ) ;
2010-04-26 14:09:26 +04:00
# if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
2005-04-17 02:20:36 +04:00
static char * quotatypes [ ] = INITQFNAMES ;
2010-04-26 14:09:26 +04:00
# endif
2005-04-17 02:20:36 +04:00
static struct quota_format_type * quota_formats ; /* List of registered formats */
static struct quota_module_name module_names [ ] = INIT_QUOTA_MODULE_NAMES ;
/* SLAB cache for dquot structures */
2006-12-07 07:33:20 +03:00
static struct kmem_cache * dquot_cachep ;
2005-04-17 02:20:36 +04:00
int register_quota_format ( struct quota_format_type * fmt )
{
spin_lock ( & dq_list_lock ) ;
fmt - > qf_next = quota_formats ;
quota_formats = fmt ;
spin_unlock ( & dq_list_lock ) ;
return 0 ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( register_quota_format ) ;
2005-04-17 02:20:36 +04:00
void unregister_quota_format ( struct quota_format_type * fmt )
{
struct quota_format_type * * actqf ;
spin_lock ( & dq_list_lock ) ;
2009-01-27 17:47:22 +03:00
for ( actqf = & quota_formats ; * actqf & & * actqf ! = fmt ;
actqf = & ( * actqf ) - > qf_next )
;
2005-04-17 02:20:36 +04:00
if ( * actqf )
* actqf = ( * actqf ) - > qf_next ;
spin_unlock ( & dq_list_lock ) ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( unregister_quota_format ) ;
2005-04-17 02:20:36 +04:00
static struct quota_format_type * find_quota_format ( int id )
{
struct quota_format_type * actqf ;
spin_lock ( & dq_list_lock ) ;
2009-01-27 17:47:22 +03:00
for ( actqf = quota_formats ; actqf & & actqf - > qf_fmt_id ! = id ;
actqf = actqf - > qf_next )
;
2005-04-17 02:20:36 +04:00
if ( ! actqf | | ! try_module_get ( actqf - > qf_owner ) ) {
int qm ;
spin_unlock ( & dq_list_lock ) ;
2009-01-27 17:47:22 +03:00
for ( qm = 0 ; module_names [ qm ] . qm_fmt_id & &
module_names [ qm ] . qm_fmt_id ! = id ; qm + + )
;
if ( ! module_names [ qm ] . qm_fmt_id | |
request_module ( module_names [ qm ] . qm_mod_name ) )
2005-04-17 02:20:36 +04:00
return NULL ;
spin_lock ( & dq_list_lock ) ;
2009-01-27 17:47:22 +03:00
for ( actqf = quota_formats ; actqf & & actqf - > qf_fmt_id ! = id ;
actqf = actqf - > qf_next )
;
2005-04-17 02:20:36 +04:00
if ( actqf & & ! try_module_get ( actqf - > qf_owner ) )
actqf = NULL ;
}
spin_unlock ( & dq_list_lock ) ;
return actqf ;
}
static void put_quota_format ( struct quota_format_type * fmt )
{
module_put ( fmt - > qf_owner ) ;
}
/*
* Dquot List Management :
* The quota code uses three lists for dquot management : the inuse_list ,
* free_dquots , and dquot_hash [ ] array . A single dquot structure may be
* on all three lists , depending on its current state .
*
* All dquots are placed to the end of inuse_list when first created , and this
* list is used for invalidate operation , which must look at every dquot .
*
* Unused dquots ( dq_count = = 0 ) are added to the free_dquots list when freed ,
* and this list is searched whenever we need an available dquot . Dquots are
* removed from the list as soon as they are used again , and
* dqstats . free_dquots gives the number of dquots on the list . When
* dquot is invalidated it ' s completely released from memory .
*
* Dquots with a specific identity ( device , type and id ) are placed on
* one of the dquot_hash [ ] hash chains . The provides an efficient search
* mechanism to locate a specific dquot .
*/
static LIST_HEAD ( inuse_list ) ;
static LIST_HEAD ( free_dquots ) ;
static unsigned int dq_hash_bits , dq_hash_mask ;
static struct hlist_head * dquot_hash ;
struct dqstats dqstats ;
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dqstats ) ;
2005-04-17 02:20:36 +04:00
2010-02-09 20:20:39 +03:00
static qsize_t inode_get_rsv_space ( struct inode * inode ) ;
2015-06-24 19:07:02 +03:00
static int __dquot_initialize ( struct inode * inode , int type ) ;
2010-02-09 20:20:39 +03:00
2005-04-17 02:20:36 +04:00
static inline unsigned int
2012-09-16 16:45:30 +04:00
hashfn ( const struct super_block * sb , struct kqid qid )
2005-04-17 02:20:36 +04:00
{
2012-09-16 16:45:30 +04:00
unsigned int id = from_kqid ( & init_user_ns , qid ) ;
int type = qid . type ;
2005-04-17 02:20:36 +04:00
unsigned long tmp ;
tmp = ( ( ( unsigned long ) sb > > L1_CACHE_SHIFT ) ^ id ) * ( MAXQUOTAS - type ) ;
return ( tmp + ( tmp > > dq_hash_bits ) ) & dq_hash_mask ;
}
/*
* Following list functions expect dq_list_lock to be held
*/
static inline void insert_dquot_hash ( struct dquot * dquot )
{
2009-01-27 17:47:22 +03:00
struct hlist_head * head ;
2012-09-16 16:45:30 +04:00
head = dquot_hash + hashfn ( dquot - > dq_sb , dquot - > dq_id ) ;
2005-04-17 02:20:36 +04:00
hlist_add_head ( & dquot - > dq_hash , head ) ;
}
static inline void remove_dquot_hash ( struct dquot * dquot )
{
hlist_del_init ( & dquot - > dq_hash ) ;
}
2009-01-27 03:47:11 +03:00
static struct dquot * find_dquot ( unsigned int hashent , struct super_block * sb ,
2012-09-16 16:45:30 +04:00
struct kqid qid )
2005-04-17 02:20:36 +04:00
{
struct hlist_node * node ;
struct dquot * dquot ;
hlist_for_each ( node , dquot_hash + hashent ) {
dquot = hlist_entry ( node , struct dquot , dq_hash ) ;
2012-09-16 14:56:19 +04:00
if ( dquot - > dq_sb = = sb & & qid_eq ( dquot - > dq_id , qid ) )
2005-04-17 02:20:36 +04:00
return dquot ;
}
2009-01-26 18:01:43 +03:00
return NULL ;
2005-04-17 02:20:36 +04:00
}
/* Add a dquot to the tail of the free list */
static inline void put_dquot_last ( struct dquot * dquot )
{
2006-06-26 11:24:37 +04:00
list_add_tail ( & dquot - > dq_free , & free_dquots ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_FREE_DQUOTS ) ;
2005-04-17 02:20:36 +04:00
}
static inline void remove_free_dquot ( struct dquot * dquot )
{
if ( list_empty ( & dquot - > dq_free ) )
return ;
list_del_init ( & dquot - > dq_free ) ;
2010-04-26 20:03:33 +04:00
dqstats_dec ( DQST_FREE_DQUOTS ) ;
2005-04-17 02:20:36 +04:00
}
static inline void put_inuse ( struct dquot * dquot )
{
/* We add to the back of inuse list so we don't have to restart
* when traversing this list and we block */
2006-06-26 11:24:37 +04:00
list_add_tail ( & dquot - > dq_inuse , & inuse_list ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_ALLOC_DQUOTS ) ;
2005-04-17 02:20:36 +04:00
}
static inline void remove_inuse ( struct dquot * dquot )
{
2010-04-26 20:03:33 +04:00
dqstats_dec ( DQST_ALLOC_DQUOTS ) ;
2005-04-17 02:20:36 +04:00
list_del ( & dquot - > dq_inuse ) ;
}
/*
* End of list functions needing dq_list_lock
*/
static void wait_on_dquot ( struct dquot * dquot )
{
2006-03-23 14:00:29 +03:00
mutex_lock ( & dquot - > dq_lock ) ;
mutex_unlock ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
}
2008-04-28 13:14:32 +04:00
static inline int dquot_dirty ( struct dquot * dquot )
{
return test_bit ( DQ_MOD_B , & dquot - > dq_flags ) ;
}
static inline int mark_dquot_dirty ( struct dquot * dquot )
{
return dquot - > dq_sb - > dq_op - > mark_dirty ( dquot ) ;
}
2005-04-17 02:20:36 +04:00
2010-03-27 15:15:38 +03:00
/* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
2005-04-17 02:20:36 +04:00
int dquot_mark_dquot_dirty ( struct dquot * dquot )
{
2010-03-27 15:15:38 +03:00
int ret = 1 ;
2017-08-02 12:44:38 +03:00
if ( ! test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) )
return 0 ;
2017-08-03 12:18:23 +03:00
if ( sb_dqopt ( dquot - > dq_sb ) - > flags & DQUOT_NOLIST_DIRTY )
return test_and_set_bit ( DQ_MOD_B , & dquot - > dq_flags ) ;
2010-03-27 15:15:38 +03:00
/* If quota is dirty already, we don't have to acquire dq_list_lock */
if ( test_bit ( DQ_MOD_B , & dquot - > dq_flags ) )
return 1 ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_list_lock ) ;
2010-03-27 15:15:38 +03:00
if ( ! test_and_set_bit ( DQ_MOD_B , & dquot - > dq_flags ) ) {
2005-04-17 02:20:36 +04:00
list_add ( & dquot - > dq_dirty , & sb_dqopt ( dquot - > dq_sb ) - >
2012-09-16 14:56:19 +04:00
info [ dquot - > dq_id . type ] . dqi_dirty_list ) ;
2010-03-27 15:15:38 +03:00
ret = 0 ;
}
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_list_lock ) ;
2010-03-27 15:15:38 +03:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_mark_dquot_dirty ) ;
2005-04-17 02:20:36 +04:00
2009-12-14 15:21:15 +03:00
/* Dirtify all the dquots - this can block when journalling */
static inline int mark_all_dquot_dirty ( struct dquot * const * dquot )
{
int ret , err , cnt ;
ret = err = 0 ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
if ( dquot [ cnt ] )
/* Even in case of error we have to continue */
ret = mark_dquot_dirty ( dquot [ cnt ] ) ;
if ( ! err )
err = ret ;
}
return err ;
}
static inline void dqput_all ( struct dquot * * dquot )
{
unsigned int cnt ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
dqput ( dquot [ cnt ] ) ;
}
2005-04-17 02:20:36 +04:00
static inline int clear_dquot_dirty ( struct dquot * dquot )
{
2017-08-03 12:18:23 +03:00
if ( sb_dqopt ( dquot - > dq_sb ) - > flags & DQUOT_NOLIST_DIRTY )
return test_and_clear_bit ( DQ_MOD_B , & dquot - > dq_flags ) ;
2017-08-02 12:54:26 +03:00
spin_lock ( & dq_list_lock ) ;
if ( ! test_and_clear_bit ( DQ_MOD_B , & dquot - > dq_flags ) ) {
spin_unlock ( & dq_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2017-08-02 12:54:26 +03:00
}
2005-04-17 02:20:36 +04:00
list_del_init ( & dquot - > dq_dirty ) ;
2017-08-02 12:54:26 +03:00
spin_unlock ( & dq_list_lock ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
void mark_info_dirty ( struct super_block * sb , int type )
{
2017-06-09 12:56:06 +03:00
spin_lock ( & dq_data_lock ) ;
sb_dqopt ( sb ) - > info [ type ] . dqi_flags | = DQF_INFO_DIRTY ;
spin_unlock ( & dq_data_lock ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( mark_info_dirty ) ;
/*
* Read dquot from disk and alloc space for it
*/
int dquot_acquire ( struct dquot * dquot )
{
int ret = 0 , ret2 = 0 ;
struct quota_info * dqopt = sb_dqopt ( dquot - > dq_sb ) ;
2006-03-23 14:00:29 +03:00
mutex_lock ( & dquot - > dq_lock ) ;
2017-06-08 16:30:45 +03:00
if ( ! test_bit ( DQ_READ_B , & dquot - > dq_flags ) )
2012-09-16 14:56:19 +04:00
ret = dqopt - > ops [ dquot - > dq_id . type ] - > read_dqblk ( dquot ) ;
2005-04-17 02:20:36 +04:00
if ( ret < 0 )
goto out_iolock ;
2016-02-18 15:20:20 +03:00
/* Make sure flags update is visible after dquot has been filled */
smp_mb__before_atomic ( ) ;
2005-04-17 02:20:36 +04:00
set_bit ( DQ_READ_B , & dquot - > dq_flags ) ;
/* Instantiate dquot if needed */
if ( ! test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) & & ! dquot - > dq_off ) {
2012-09-16 14:56:19 +04:00
ret = dqopt - > ops [ dquot - > dq_id . type ] - > commit_dqblk ( dquot ) ;
2005-04-17 02:20:36 +04:00
/* Write the info if needed */
2012-09-16 14:56:19 +04:00
if ( info_dirty ( & dqopt - > info [ dquot - > dq_id . type ] ) ) {
ret2 = dqopt - > ops [ dquot - > dq_id . type ] - > write_file_info (
dquot - > dq_sb , dquot - > dq_id . type ) ;
2009-01-27 17:47:22 +03:00
}
2005-04-17 02:20:36 +04:00
if ( ret < 0 )
goto out_iolock ;
if ( ret2 < 0 ) {
ret = ret2 ;
goto out_iolock ;
}
}
2016-02-18 15:20:20 +03:00
/*
* Make sure flags update is visible after on - disk struct has been
* allocated . Paired with smp_rmb ( ) in dqget ( ) .
*/
smp_mb__before_atomic ( ) ;
2005-04-17 02:20:36 +04:00
set_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ;
out_iolock :
2006-03-23 14:00:29 +03:00
mutex_unlock ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_acquire ) ;
2005-04-17 02:20:36 +04:00
/*
* Write dquot to disk
*/
int dquot_commit ( struct dquot * dquot )
{
2011-03-31 20:36:52 +04:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
struct quota_info * dqopt = sb_dqopt ( dquot - > dq_sb ) ;
2017-06-08 16:16:41 +03:00
mutex_lock ( & dquot - > dq_lock ) ;
2017-08-02 12:54:26 +03:00
if ( ! clear_dquot_dirty ( dquot ) )
2017-06-08 16:16:41 +03:00
goto out_lock ;
2005-04-17 02:20:36 +04:00
/* Inactive dquot can be only if there was error during read/init
* = > we have better not writing it */
2017-06-08 16:48:16 +03:00
if ( test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) )
2012-09-16 14:56:19 +04:00
ret = dqopt - > ops [ dquot - > dq_id . type ] - > commit_dqblk ( dquot ) ;
2017-06-08 16:48:16 +03:00
else
2011-03-31 20:36:52 +04:00
ret = - EIO ;
2017-06-08 16:16:41 +03:00
out_lock :
mutex_unlock ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_commit ) ;
2005-04-17 02:20:36 +04:00
/*
* Release dquot
*/
int dquot_release ( struct dquot * dquot )
{
int ret = 0 , ret2 = 0 ;
struct quota_info * dqopt = sb_dqopt ( dquot - > dq_sb ) ;
2006-03-23 14:00:29 +03:00
mutex_lock ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
/* Check whether we are not racing with some other dqget() */
if ( atomic_read ( & dquot - > dq_count ) > 1 )
goto out_dqlock ;
2012-09-16 14:56:19 +04:00
if ( dqopt - > ops [ dquot - > dq_id . type ] - > release_dqblk ) {
ret = dqopt - > ops [ dquot - > dq_id . type ] - > release_dqblk ( dquot ) ;
2005-04-17 02:20:36 +04:00
/* Write the info */
2012-09-16 14:56:19 +04:00
if ( info_dirty ( & dqopt - > info [ dquot - > dq_id . type ] ) ) {
ret2 = dqopt - > ops [ dquot - > dq_id . type ] - > write_file_info (
dquot - > dq_sb , dquot - > dq_id . type ) ;
2009-01-27 17:47:22 +03:00
}
2005-04-17 02:20:36 +04:00
if ( ret > = 0 )
ret = ret2 ;
}
clear_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ;
out_dqlock :
2006-03-23 14:00:29 +03:00
mutex_unlock ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_release ) ;
2005-04-17 02:20:36 +04:00
2008-11-25 17:31:32 +03:00
void dquot_destroy ( struct dquot * dquot )
2008-08-19 16:51:22 +04:00
{
kmem_cache_free ( dquot_cachep , dquot ) ;
}
2008-11-25 17:31:32 +03:00
EXPORT_SYMBOL ( dquot_destroy ) ;
2008-08-19 16:51:22 +04:00
static inline void do_destroy_dquot ( struct dquot * dquot )
{
dquot - > dq_sb - > dq_op - > destroy_dquot ( dquot ) ;
}
2005-04-17 02:20:36 +04:00
/* Invalidate all dquots on the list. Note that this function is called after
* quota is disabled and pointers from inodes removed so there cannot be new
2006-03-23 14:00:17 +03:00
* quota users . There can still be some users of quotas due to inodes being
* just deleted or pruned by prune_icache ( ) ( those are not attached to any
2009-01-12 19:23:05 +03:00
* list ) or parallel quotactl call . We have to wait for such users .
2006-03-23 14:00:17 +03:00
*/
2005-04-17 02:20:36 +04:00
static void invalidate_dquots ( struct super_block * sb , int type )
{
2005-06-26 01:59:36 +04:00
struct dquot * dquot , * tmp ;
2005-04-17 02:20:36 +04:00
2006-03-23 14:00:17 +03:00
restart :
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_list_lock ) ;
2005-06-26 01:59:36 +04:00
list_for_each_entry_safe ( dquot , tmp , & inuse_list , dq_inuse ) {
2005-04-17 02:20:36 +04:00
if ( dquot - > dq_sb ! = sb )
continue ;
2012-09-16 14:56:19 +04:00
if ( dquot - > dq_id . type ! = type )
2005-04-17 02:20:36 +04:00
continue ;
2006-03-23 14:00:17 +03:00
/* Wait for dquot users */
if ( atomic_read ( & dquot - > dq_count ) ) {
2014-04-04 01:46:55 +04:00
dqgrab ( dquot ) ;
2006-03-23 14:00:17 +03:00
spin_unlock ( & dq_list_lock ) ;
2017-08-02 18:18:50 +03:00
/*
* Once dqput ( ) wakes us up , we know it ' s time to free
2006-03-23 14:00:17 +03:00
* the dquot .
* IMPORTANT : we rely on the fact that there is always
* at most one process waiting for dquot to free .
* Otherwise dq_count would be > 1 and we would never
* wake up .
*/
2017-08-02 18:18:50 +03:00
wait_event ( dquot_ref_wq ,
atomic_read ( & dquot - > dq_count ) = = 1 ) ;
2006-03-23 14:00:17 +03:00
dqput ( dquot ) ;
/* At this moment dquot() need not exist (it could be
* reclaimed by prune_dqcache ( ) . Hence we must
* restart . */
goto restart ;
}
/*
* Quota now has no users and it has been written on last
* dqput ( )
*/
2005-04-17 02:20:36 +04:00
remove_dquot_hash ( dquot ) ;
remove_free_dquot ( dquot ) ;
remove_inuse ( dquot ) ;
2008-08-19 16:51:22 +04:00
do_destroy_dquot ( dquot ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock ( & dq_list_lock ) ;
}
2008-10-20 19:05:00 +04:00
/* Call callback for every active dquot on given filesystem */
int dquot_scan_active ( struct super_block * sb ,
int ( * fn ) ( struct dquot * dquot , unsigned long priv ) ,
unsigned long priv )
{
struct dquot * dquot , * old_dquot = NULL ;
int ret = 0 ;
2016-11-23 15:52:19 +03:00
WARN_ON_ONCE ( ! rwsem_is_locked ( & sb - > s_umount ) ) ;
2008-10-20 19:05:00 +04:00
spin_lock ( & dq_list_lock ) ;
list_for_each_entry ( dquot , & inuse_list , dq_inuse ) {
if ( ! test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) )
continue ;
if ( dquot - > dq_sb ! = sb )
continue ;
/* Now we have active dquot so we can just increase use count */
atomic_inc ( & dquot - > dq_count ) ;
spin_unlock ( & dq_list_lock ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_LOOKUPS ) ;
2008-10-20 19:05:00 +04:00
dqput ( old_dquot ) ;
old_dquot = dquot ;
2014-02-20 20:02:27 +04:00
/*
* - > release_dquot ( ) can be racing with us . Our reference
* protects us from new calls to it so just wait for any
* outstanding call and recheck the DQ_ACTIVE_B after that .
*/
wait_on_dquot ( dquot ) ;
if ( test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ) {
ret = fn ( dquot , priv ) ;
if ( ret < 0 )
goto out ;
}
2008-10-20 19:05:00 +04:00
spin_lock ( & dq_list_lock ) ;
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
}
spin_unlock ( & dq_list_lock ) ;
out :
dqput ( old_dquot ) ;
return ret ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_scan_active ) ;
2008-10-20 19:05:00 +04:00
2012-07-03 18:45:28 +04:00
/* Write all dquot structures to quota files */
int dquot_writeback_dquots ( struct super_block * sb , int type )
2005-04-17 02:20:36 +04:00
{
struct list_head * dirty ;
struct dquot * dquot ;
struct quota_info * dqopt = sb_dqopt ( sb ) ;
int cnt ;
2012-07-03 18:45:28 +04:00
int err , ret = 0 ;
2005-04-17 02:20:36 +04:00
2016-11-23 15:35:14 +03:00
WARN_ON_ONCE ( ! rwsem_is_locked ( & sb - > s_umount ) ) ;
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
if ( type ! = - 1 & & cnt ! = type )
continue ;
2008-08-20 19:50:32 +04:00
if ( ! sb_has_quota_active ( sb , cnt ) )
2005-04-17 02:20:36 +04:00
continue ;
spin_lock ( & dq_list_lock ) ;
dirty = & dqopt - > info [ cnt ] . dqi_dirty_list ;
while ( ! list_empty ( dirty ) ) {
2009-01-27 17:47:22 +03:00
dquot = list_first_entry ( dirty , struct dquot ,
dq_dirty ) ;
2017-08-02 12:44:38 +03:00
WARN_ON ( ! test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ) ;
2005-04-17 02:20:36 +04:00
/* Now we have active dquot from which someone is
* holding reference so we can safely just increase
* use count */
2014-04-04 01:46:55 +04:00
dqgrab ( dquot ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_list_lock ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_LOOKUPS ) ;
2012-07-03 18:45:28 +04:00
err = sb - > dq_op - > write_dquot ( dquot ) ;
if ( ! ret & & err )
2014-10-22 11:06:49 +04:00
ret = err ;
2005-04-17 02:20:36 +04:00
dqput ( dquot ) ;
spin_lock ( & dq_list_lock ) ;
}
spin_unlock ( & dq_list_lock ) ;
}
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2008-08-20 19:50:32 +04:00
if ( ( cnt = = type | | type = = - 1 ) & & sb_has_quota_active ( sb , cnt )
& & info_dirty ( & dqopt - > info [ cnt ] ) )
2005-04-17 02:20:36 +04:00
sb - > dq_op - > write_info ( sb , cnt ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_SYNCS ) ;
2005-04-17 02:20:36 +04:00
2012-07-03 18:45:28 +04:00
return ret ;
}
EXPORT_SYMBOL ( dquot_writeback_dquots ) ;
/* Write all dquot structures to disk and make them visible from userspace */
int dquot_quota_sync ( struct super_block * sb , int type )
{
struct quota_info * dqopt = sb_dqopt ( sb ) ;
int cnt ;
int ret ;
ret = dquot_writeback_dquots ( sb , type ) ;
if ( ret )
return ret ;
if ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE )
2010-02-16 11:44:52 +03:00
return 0 ;
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible . . . */
if ( sb - > s_op - > sync_fs )
sb - > s_op - > sync_fs ( sb , 1 ) ;
sync_blockdev ( sb - > s_bdev ) ;
/*
* Now when everything is written we can discard the pagecache so
* that userspace sees the changes .
*/
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
if ( type ! = - 1 & & cnt ! = type )
continue ;
if ( ! sb_has_quota_active ( sb , cnt ) )
continue ;
2016-01-22 23:40:57 +03:00
inode_lock ( dqopt - > files [ cnt ] ) ;
2012-04-25 23:10:31 +04:00
truncate_inode_pages ( & dqopt - > files [ cnt ] - > i_data , 0 ) ;
2016-01-22 23:40:57 +03:00
inode_unlock ( dqopt - > files [ cnt ] ) ;
2010-02-16 11:44:52 +03:00
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_quota_sync ) ;
2005-04-17 02:20:36 +04:00
2013-08-28 04:18:09 +04:00
static unsigned long
dqcache_shrink_scan ( struct shrinker * shrink , struct shrink_control * sc )
2005-04-17 02:20:36 +04:00
{
struct list_head * head ;
struct dquot * dquot ;
2013-08-28 04:18:09 +04:00
unsigned long freed = 0 ;
2005-04-17 02:20:36 +04:00
2014-06-04 08:22:13 +04:00
spin_lock ( & dq_list_lock ) ;
2005-04-17 02:20:36 +04:00
head = free_dquots . prev ;
2013-08-28 04:18:09 +04:00
while ( head ! = & free_dquots & & sc - > nr_to_scan ) {
2005-04-17 02:20:36 +04:00
dquot = list_entry ( head , struct dquot , dq_free ) ;
remove_dquot_hash ( dquot ) ;
remove_free_dquot ( dquot ) ;
remove_inuse ( dquot ) ;
2008-08-19 16:51:22 +04:00
do_destroy_dquot ( dquot ) ;
2013-08-28 04:18:09 +04:00
sc - > nr_to_scan - - ;
freed + + ;
2005-04-17 02:20:36 +04:00
head = free_dquots . prev ;
}
2014-06-04 08:22:13 +04:00
spin_unlock ( & dq_list_lock ) ;
2013-08-28 04:18:09 +04:00
return freed ;
2005-04-17 02:20:36 +04:00
}
2013-08-28 04:18:09 +04:00
static unsigned long
dqcache_shrink_count ( struct shrinker * shrink , struct shrink_control * sc )
2005-04-17 02:20:36 +04:00
{
2013-08-28 04:17:53 +04:00
return vfs_pressure_ratio (
percpu_counter_read_positive ( & dqstats . counter [ DQST_FREE_DQUOTS ] ) ) ;
2005-04-17 02:20:36 +04:00
}
2007-07-17 15:03:17 +04:00
static struct shrinker dqcache_shrinker = {
2013-08-28 04:18:09 +04:00
. count_objects = dqcache_shrink_count ,
. scan_objects = dqcache_shrink_scan ,
2007-07-17 15:03:17 +04:00
. seeks = DEFAULT_SEEKS ,
} ;
2005-04-17 02:20:36 +04:00
/*
* Put reference to dquot
*/
2008-10-10 18:12:23 +04:00
void dqput ( struct dquot * dquot )
2005-04-17 02:20:36 +04:00
{
2008-07-25 12:46:49 +04:00
int ret ;
2005-04-17 02:20:36 +04:00
if ( ! dquot )
return ;
2010-04-19 18:47:20 +04:00
# ifdef CONFIG_QUOTA_DEBUG
2005-04-17 02:20:36 +04:00
if ( ! atomic_read ( & dquot - > dq_count ) ) {
2010-07-20 18:54:43 +04:00
quota_error ( dquot - > dq_sb , " trying to free free dquot of %s %d " ,
2012-09-16 14:56:19 +04:00
quotatypes [ dquot - > dq_id . type ] ,
from_kqid ( & init_user_ns , dquot - > dq_id ) ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
}
# endif
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_DROPS ) ;
2005-04-17 02:20:36 +04:00
we_slept :
spin_lock ( & dq_list_lock ) ;
if ( atomic_read ( & dquot - > dq_count ) > 1 ) {
/* We have more than one user... nothing to do */
atomic_dec ( & dquot - > dq_count ) ;
2006-03-23 14:00:17 +03:00
/* Releasing dquot during quotaoff phase? */
2012-09-16 14:56:19 +04:00
if ( ! sb_has_quota_active ( dquot - > dq_sb , dquot - > dq_id . type ) & &
2006-03-23 14:00:17 +03:00
atomic_read ( & dquot - > dq_count ) = = 1 )
2017-08-02 18:18:50 +03:00
wake_up ( & dquot_ref_wq ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_list_lock ) ;
return ;
}
/* Need to release dquot? */
2017-08-02 12:44:38 +03:00
if ( dquot_dirty ( dquot ) ) {
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_list_lock ) ;
/* Commit dquot before releasing */
2008-07-25 12:46:49 +04:00
ret = dquot - > dq_sb - > dq_op - > write_dquot ( dquot ) ;
if ( ret < 0 ) {
2010-07-20 18:54:43 +04:00
quota_error ( dquot - > dq_sb , " Can't write quota structure "
" (error %d). Quota may get out of sync! " ,
ret ) ;
2008-07-25 12:46:49 +04:00
/*
* We clear dirty bit anyway , so that we avoid
* infinite loop here
*/
clear_dquot_dirty ( dquot ) ;
}
2005-04-17 02:20:36 +04:00
goto we_slept ;
}
if ( test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ) {
spin_unlock ( & dq_list_lock ) ;
dquot - > dq_sb - > dq_op - > release_dquot ( dquot ) ;
goto we_slept ;
}
atomic_dec ( & dquot - > dq_count ) ;
2010-04-19 18:47:20 +04:00
# ifdef CONFIG_QUOTA_DEBUG
2005-04-17 02:20:36 +04:00
/* sanity check */
2006-04-02 15:36:13 +04:00
BUG_ON ( ! list_empty ( & dquot - > dq_free ) ) ;
2005-04-17 02:20:36 +04:00
# endif
put_dquot_last ( dquot ) ;
spin_unlock ( & dq_list_lock ) ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dqput ) ;
2005-04-17 02:20:36 +04:00
2008-11-25 17:31:32 +03:00
struct dquot * dquot_alloc ( struct super_block * sb , int type )
2008-08-19 16:51:22 +04:00
{
return kmem_cache_zalloc ( dquot_cachep , GFP_NOFS ) ;
}
2008-11-25 17:31:32 +03:00
EXPORT_SYMBOL ( dquot_alloc ) ;
2008-08-19 16:51:22 +04:00
2005-04-17 02:20:36 +04:00
static struct dquot * get_empty_dquot ( struct super_block * sb , int type )
{
struct dquot * dquot ;
2008-08-19 16:51:22 +04:00
dquot = sb - > dq_op - > alloc_dquot ( sb , type ) ;
2005-04-17 02:20:36 +04:00
if ( ! dquot )
2009-01-26 18:01:43 +03:00
return NULL ;
2005-04-17 02:20:36 +04:00
2006-03-23 14:00:29 +03:00
mutex_init ( & dquot - > dq_lock ) ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & dquot - > dq_free ) ;
INIT_LIST_HEAD ( & dquot - > dq_inuse ) ;
INIT_HLIST_NODE ( & dquot - > dq_hash ) ;
INIT_LIST_HEAD ( & dquot - > dq_dirty ) ;
dquot - > dq_sb = sb ;
2012-09-16 16:45:30 +04:00
dquot - > dq_id = make_kqid_invalid ( type ) ;
2005-04-17 02:20:36 +04:00
atomic_set ( & dquot - > dq_count , 1 ) ;
return dquot ;
}
/*
* Get reference to dquot
2009-01-12 19:23:05 +03:00
*
* Locking is slightly tricky here . We are guarded from parallel quotaoff ( )
* destroying our dquot by :
* a ) checking for quota flags under dq_list_lock and
* b ) getting a reference to dquot before we release dq_list_lock
2005-04-17 02:20:36 +04:00
*/
2012-09-16 14:11:50 +04:00
struct dquot * dqget ( struct super_block * sb , struct kqid qid )
2005-04-17 02:20:36 +04:00
{
2012-09-16 16:45:30 +04:00
unsigned int hashent = hashfn ( sb , qid ) ;
2015-06-24 19:07:02 +03:00
struct dquot * dquot , * empty = NULL ;
2005-04-17 02:20:36 +04:00
2016-07-01 00:31:01 +03:00
if ( ! qid_has_mapping ( sb - > s_user_ns , qid ) )
return ERR_PTR ( - EINVAL ) ;
2012-09-16 16:45:30 +04:00
if ( ! sb_has_quota_active ( sb , qid . type ) )
2015-06-24 19:07:02 +03:00
return ERR_PTR ( - ESRCH ) ;
2005-04-17 02:20:36 +04:00
we_slept :
spin_lock ( & dq_list_lock ) ;
2009-01-12 19:23:05 +03:00
spin_lock ( & dq_state_lock ) ;
2012-09-16 16:45:30 +04:00
if ( ! sb_has_quota_active ( sb , qid . type ) ) {
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
spin_unlock ( & dq_list_lock ) ;
2015-06-24 19:07:02 +03:00
dquot = ERR_PTR ( - ESRCH ) ;
2009-01-12 19:23:05 +03:00
goto out ;
}
spin_unlock ( & dq_state_lock ) ;
2012-09-16 16:45:30 +04:00
dquot = find_dquot ( hashent , sb , qid ) ;
2009-01-26 18:01:43 +03:00
if ( ! dquot ) {
if ( ! empty ) {
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_list_lock ) ;
2012-09-16 16:45:30 +04:00
empty = get_empty_dquot ( sb , qid . type ) ;
2009-01-26 18:01:43 +03:00
if ( ! empty )
2005-04-17 02:20:36 +04:00
schedule ( ) ; /* Try to wait for a moment... */
goto we_slept ;
}
dquot = empty ;
2009-01-26 18:01:43 +03:00
empty = NULL ;
2012-09-16 14:56:19 +04:00
dquot - > dq_id = qid ;
2005-04-17 02:20:36 +04:00
/* all dquots go on the inuse_list */
put_inuse ( dquot ) ;
/* hash it first so it can be found */
insert_dquot_hash ( dquot ) ;
spin_unlock ( & dq_list_lock ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_LOOKUPS ) ;
2005-04-17 02:20:36 +04:00
} else {
if ( ! atomic_read ( & dquot - > dq_count ) )
remove_free_dquot ( dquot ) ;
atomic_inc ( & dquot - > dq_count ) ;
spin_unlock ( & dq_list_lock ) ;
2010-04-26 20:03:33 +04:00
dqstats_inc ( DQST_CACHE_HITS ) ;
dqstats_inc ( DQST_LOOKUPS ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-27 17:47:22 +03:00
/* Wait for dq_lock - after this we know that either dquot_release() is
* already finished or it will be canceled due to dq_count > 1 test */
2005-04-17 02:20:36 +04:00
wait_on_dquot ( dquot ) ;
2009-01-27 17:47:22 +03:00
/* Read the dquot / allocate space in quota file */
2015-06-24 19:07:02 +03:00
if ( ! test_bit ( DQ_ACTIVE_B , & dquot - > dq_flags ) ) {
int err ;
err = sb - > dq_op - > acquire_dquot ( dquot ) ;
if ( err < 0 ) {
dqput ( dquot ) ;
dquot = ERR_PTR ( err ) ;
goto out ;
}
2005-04-17 02:20:36 +04:00
}
2016-02-18 15:20:20 +03:00
/*
* Make sure following reads see filled structure - paired with
* smp_mb__before_atomic ( ) in dquot_acquire ( ) .
*/
smp_rmb ( ) ;
2010-04-19 18:47:20 +04:00
# ifdef CONFIG_QUOTA_DEBUG
2006-04-02 15:36:13 +04:00
BUG_ON ( ! dquot - > dq_sb ) ; /* Has somebody invalidated entry under us? */
2005-04-17 02:20:36 +04:00
# endif
2009-01-12 19:23:05 +03:00
out :
if ( empty )
do_destroy_dquot ( empty ) ;
2005-04-17 02:20:36 +04:00
return dquot ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dqget ) ;
2005-04-17 02:20:36 +04:00
2014-09-25 18:36:14 +04:00
static inline struct dquot * * i_dquot ( struct inode * inode )
{
return inode - > i_sb - > s_op - > get_dquots ( inode ) ;
}
2005-04-17 02:20:36 +04:00
static int dqinit_needed ( struct inode * inode , int type )
{
2015-02-12 12:36:37 +03:00
struct dquot * const * dquots ;
2005-04-17 02:20:36 +04:00
int cnt ;
if ( IS_NOQUOTA ( inode ) )
return 0 ;
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2005-04-17 02:20:36 +04:00
if ( type ! = - 1 )
2015-02-12 12:36:37 +03:00
return ! dquots [ type ] ;
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2015-02-12 12:36:37 +03:00
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
return 1 ;
return 0 ;
}
2016-11-23 16:04:55 +03:00
/* This routine is guarded by s_umount semaphore */
2005-04-17 02:20:36 +04:00
static void add_dquot_ref ( struct super_block * sb , int type )
{
2008-02-06 12:37:36 +03:00
struct inode * inode , * old_inode = NULL ;
2010-04-19 18:47:20 +04:00
# ifdef CONFIG_QUOTA_DEBUG
2010-02-09 20:20:39 +03:00
int reserved = 0 ;
2010-04-06 20:52:47 +04:00
# endif
2005-04-17 02:20:36 +04:00
2015-03-04 20:37:22 +03:00
spin_lock ( & sb - > s_inode_list_lock ) ;
2007-02-12 11:51:58 +03:00
list_for_each_entry ( inode , & sb - > s_inodes , i_sb_list ) {
2011-03-22 14:23:36 +03:00
spin_lock ( & inode - > i_lock ) ;
if ( ( inode - > i_state & ( I_FREEING | I_WILL_FREE | I_NEW ) ) | |
! atomic_read ( & inode - > i_writecount ) | |
! dqinit_needed ( inode , type ) ) {
spin_unlock ( & inode - > i_lock ) ;
2009-03-11 23:17:36 +03:00
continue ;
2011-03-22 14:23:36 +03:00
}
2007-02-12 11:51:58 +03:00
__iget ( inode ) ;
2011-03-22 14:23:36 +03:00
spin_unlock ( & inode - > i_lock ) ;
2015-03-04 20:37:22 +03:00
spin_unlock ( & sb - > s_inode_list_lock ) ;
2007-02-12 11:51:58 +03:00
2012-04-24 19:08:41 +04:00
# ifdef CONFIG_QUOTA_DEBUG
if ( unlikely ( inode_get_rsv_space ( inode ) > 0 ) )
reserved = 1 ;
# endif
2008-02-06 12:37:36 +03:00
iput ( old_inode ) ;
2010-03-03 17:05:07 +03:00
__dquot_initialize ( inode , type ) ;
2011-03-22 14:23:40 +03:00
/*
* We hold a reference to ' inode ' so it couldn ' t have been
* removed from s_inodes list while we dropped the
2015-03-04 20:37:22 +03:00
* s_inode_list_lock . We cannot iput the inode now as we can be
2011-03-22 14:23:40 +03:00
* holding the last reference and we cannot iput it under
2015-03-04 20:37:22 +03:00
* s_inode_list_lock . So we keep the reference and iput it
2011-03-22 14:23:40 +03:00
* later .
*/
2008-02-06 12:37:36 +03:00
old_inode = inode ;
2015-03-04 20:37:22 +03:00
spin_lock ( & sb - > s_inode_list_lock ) ;
2005-04-17 02:20:36 +04:00
}
2015-03-04 20:37:22 +03:00
spin_unlock ( & sb - > s_inode_list_lock ) ;
2008-02-06 12:37:36 +03:00
iput ( old_inode ) ;
2010-02-09 20:20:39 +03:00
2010-04-19 18:47:20 +04:00
# ifdef CONFIG_QUOTA_DEBUG
2010-02-09 20:20:39 +03:00
if ( reserved ) {
2010-07-20 18:54:43 +04:00
quota_error ( sb , " Writes happened before quota was turned on "
" thus quota information is probably inconsistent. "
" Please run quotacheck(8) " ) ;
2010-02-09 20:20:39 +03:00
}
2010-04-06 20:52:47 +04:00
# endif
2005-04-17 02:20:36 +04:00
}
2009-01-27 17:47:22 +03:00
/*
* Remove references to dquots from inode and add dquot to list for freeing
2011-03-31 05:57:33 +04:00
* if we have the last reference to dquot
2009-01-27 17:47:22 +03:00
*/
2014-06-04 08:21:30 +04:00
static void remove_inode_dquot_ref ( struct inode * inode , int type ,
struct list_head * tofree_head )
2005-04-17 02:20:36 +04:00
{
2015-02-12 12:36:37 +03:00
struct dquot * * dquots = i_dquot ( inode ) ;
struct dquot * dquot = dquots [ type ] ;
2005-04-17 02:20:36 +04:00
2014-06-04 08:21:30 +04:00
if ( ! dquot )
return ;
2015-02-12 12:36:37 +03:00
dquots [ type ] = NULL ;
2014-06-04 08:21:30 +04:00
if ( list_empty ( & dquot - > dq_free ) ) {
/*
* The inode still has reference to dquot so it can ' t be in the
* free list
*/
spin_lock ( & dq_list_lock ) ;
list_add ( & dquot - > dq_free , tofree_head ) ;
spin_unlock ( & dq_list_lock ) ;
} else {
/*
* Dquot is already in a list to put so we won ' t drop the last
* reference here .
*/
dqput ( dquot ) ;
2005-04-17 02:20:36 +04:00
}
}
2009-01-27 17:47:22 +03:00
/*
* Free list of dquots
* Dquots are removed from inodes and no new references can be got so we are
* the only ones holding reference
*/
2005-04-17 02:20:36 +04:00
static void put_dquot_list ( struct list_head * tofree_head )
{
struct list_head * act_head ;
struct dquot * dquot ;
act_head = tofree_head - > next ;
while ( act_head ! = tofree_head ) {
dquot = list_entry ( act_head , struct dquot , dq_free ) ;
act_head = act_head - > next ;
2009-01-27 17:47:22 +03:00
/* Remove dquot from the list so we won't have problems... */
list_del_init ( & dquot - > dq_free ) ;
2005-04-17 02:20:36 +04:00
dqput ( dquot ) ;
}
}
2007-02-12 11:51:57 +03:00
static void remove_dquot_ref ( struct super_block * sb , int type ,
struct list_head * tofree_head )
{
struct inode * inode ;
2010-06-01 11:39:48 +04:00
int reserved = 0 ;
2007-02-12 11:51:57 +03:00
2015-03-04 20:37:22 +03:00
spin_lock ( & sb - > s_inode_list_lock ) ;
2007-02-12 11:51:57 +03:00
list_for_each_entry ( inode , & sb - > s_inodes , i_sb_list ) {
2009-03-11 23:17:36 +03:00
/*
* We have to scan also I_NEW inodes because they can already
* have quota pointer initialized . Luckily , we need to touch
* only quota pointers and these have separate locking
2014-06-04 08:23:19 +04:00
* ( dq_data_lock ) .
2009-03-11 23:17:36 +03:00
*/
2014-06-04 08:23:19 +04:00
spin_lock ( & dq_data_lock ) ;
2010-06-01 11:39:48 +04:00
if ( ! IS_NOQUOTA ( inode ) ) {
if ( unlikely ( inode_get_rsv_space ( inode ) > 0 ) )
reserved = 1 ;
2007-02-12 11:51:57 +03:00
remove_inode_dquot_ref ( inode , type , tofree_head ) ;
2010-06-01 11:39:48 +04:00
}
2014-06-04 08:23:19 +04:00
spin_unlock ( & dq_data_lock ) ;
2007-02-12 11:51:57 +03:00
}
2015-03-04 20:37:22 +03:00
spin_unlock ( & sb - > s_inode_list_lock ) ;
2010-06-01 11:39:48 +04:00
# ifdef CONFIG_QUOTA_DEBUG
if ( reserved ) {
printk ( KERN_WARNING " VFS (%s): Writes happened after quota "
" was disabled thus quota information is probably "
" inconsistent. Please run quotacheck(8). \n " , sb - > s_id ) ;
}
# endif
2007-02-12 11:51:57 +03:00
}
2005-04-17 02:20:36 +04:00
/* Gather all references from inodes and drop them */
static void drop_dquot_ref ( struct super_block * sb , int type )
{
LIST_HEAD ( tofree_head ) ;
2007-02-12 11:51:57 +03:00
if ( sb - > dq_op ) {
remove_dquot_ref ( sb , type , & tofree_head ) ;
2014-06-04 08:23:19 +04:00
synchronize_srcu ( & dquot_srcu ) ;
2007-02-12 11:51:57 +03:00
put_dquot_list ( & tofree_head ) ;
}
2005-04-17 02:20:36 +04:00
}
2008-08-20 16:45:12 +04:00
static inline void dquot_incr_inodes ( struct dquot * dquot , qsize_t number )
2005-04-17 02:20:36 +04:00
{
dquot - > dq_dqb . dqb_curinodes + = number ;
}
static inline void dquot_incr_space ( struct dquot * dquot , qsize_t number )
{
dquot - > dq_dqb . dqb_curspace + = number ;
}
2009-01-13 18:43:09 +03:00
static inline void dquot_resv_space ( struct dquot * dquot , qsize_t number )
{
dquot - > dq_dqb . dqb_rsvspace + = number ;
}
2009-01-13 18:43:14 +03:00
/*
* Claim reserved quota space
*/
2010-02-09 20:20:39 +03:00
static void dquot_claim_reserved_space ( struct dquot * dquot , qsize_t number )
2009-01-13 18:43:14 +03:00
{
2010-02-09 20:20:39 +03:00
if ( dquot - > dq_dqb . dqb_rsvspace < number ) {
WARN_ON_ONCE ( 1 ) ;
number = dquot - > dq_dqb . dqb_rsvspace ;
}
2009-01-13 18:43:14 +03:00
dquot - > dq_dqb . dqb_curspace + = number ;
dquot - > dq_dqb . dqb_rsvspace - = number ;
}
2013-08-17 17:32:32 +04:00
static void dquot_reclaim_reserved_space ( struct dquot * dquot , qsize_t number )
{
if ( WARN_ON_ONCE ( dquot - > dq_dqb . dqb_curspace < number ) )
number = dquot - > dq_dqb . dqb_curspace ;
dquot - > dq_dqb . dqb_rsvspace + = number ;
dquot - > dq_dqb . dqb_curspace - = number ;
}
2009-01-13 18:43:14 +03:00
static inline
void dquot_free_reserved_space ( struct dquot * dquot , qsize_t number )
{
2010-02-09 20:20:39 +03:00
if ( dquot - > dq_dqb . dqb_rsvspace > = number )
dquot - > dq_dqb . dqb_rsvspace - = number ;
else {
WARN_ON_ONCE ( 1 ) ;
dquot - > dq_dqb . dqb_rsvspace = 0 ;
}
2009-01-13 18:43:14 +03:00
}
2009-01-27 03:47:11 +03:00
static void dquot_decr_inodes ( struct dquot * dquot , qsize_t number )
2005-04-17 02:20:36 +04:00
{
2008-10-01 20:21:39 +04:00
if ( sb_dqopt ( dquot - > dq_sb ) - > flags & DQUOT_NEGATIVE_USAGE | |
dquot - > dq_dqb . dqb_curinodes > = number )
2005-04-17 02:20:36 +04:00
dquot - > dq_dqb . dqb_curinodes - = number ;
else
dquot - > dq_dqb . dqb_curinodes = 0 ;
if ( dquot - > dq_dqb . dqb_curinodes < = dquot - > dq_dqb . dqb_isoftlimit )
2016-06-17 23:03:16 +03:00
dquot - > dq_dqb . dqb_itime = ( time64_t ) 0 ;
2005-04-17 02:20:36 +04:00
clear_bit ( DQ_INODES_B , & dquot - > dq_flags ) ;
}
2009-01-27 03:47:11 +03:00
static void dquot_decr_space ( struct dquot * dquot , qsize_t number )
2005-04-17 02:20:36 +04:00
{
2008-10-01 20:21:39 +04:00
if ( sb_dqopt ( dquot - > dq_sb ) - > flags & DQUOT_NEGATIVE_USAGE | |
dquot - > dq_dqb . dqb_curspace > = number )
2005-04-17 02:20:36 +04:00
dquot - > dq_dqb . dqb_curspace - = number ;
else
dquot - > dq_dqb . dqb_curspace = 0 ;
2008-08-20 16:45:12 +04:00
if ( dquot - > dq_dqb . dqb_curspace < = dquot - > dq_dqb . dqb_bsoftlimit )
2016-06-17 23:03:16 +03:00
dquot - > dq_dqb . dqb_btime = ( time64_t ) 0 ;
2005-04-17 02:20:36 +04:00
clear_bit ( DQ_BLKS_B , & dquot - > dq_flags ) ;
}
2012-02-14 16:28:01 +04:00
struct dquot_warn {
struct super_block * w_sb ;
2012-09-16 15:05:34 +04:00
struct kqid w_dq_id ;
2012-02-14 16:28:01 +04:00
short w_type ;
} ;
2007-12-23 01:03:25 +03:00
static int warning_issued ( struct dquot * dquot , const int warntype )
{
int flag = ( warntype = = QUOTA_NL_BHARDWARN | |
warntype = = QUOTA_NL_BSOFTLONGWARN ) ? DQ_BLKS_B :
( ( warntype = = QUOTA_NL_IHARDWARN | |
warntype = = QUOTA_NL_ISOFTLONGWARN ) ? DQ_INODES_B : 0 ) ;
if ( ! flag )
return 0 ;
return test_and_set_bit ( flag , & dquot - > dq_flags ) ;
}
2007-10-17 10:29:31 +04:00
# ifdef CONFIG_PRINT_QUOTA_WARNING
2005-04-17 02:20:36 +04:00
static int flag_print_warnings = 1 ;
2012-02-14 16:28:01 +04:00
static int need_print_warning ( struct dquot_warn * warn )
2005-04-17 02:20:36 +04:00
{
if ( ! flag_print_warnings )
return 0 ;
2012-09-16 15:05:34 +04:00
switch ( warn - > w_dq_id . type ) {
2005-04-17 02:20:36 +04:00
case USRQUOTA :
2012-09-16 16:45:30 +04:00
return uid_eq ( current_fsuid ( ) , warn - > w_dq_id . uid ) ;
2005-04-17 02:20:36 +04:00
case GRPQUOTA :
2012-09-16 15:05:34 +04:00
return in_group_p ( warn - > w_dq_id . gid ) ;
2015-03-18 22:04:53 +03:00
case PRJQUOTA :
return 1 ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
/* Print warning to user which exceeded quota */
2012-02-14 16:28:01 +04:00
static void print_warning ( struct dquot_warn * warn )
2005-04-17 02:20:36 +04:00
{
char * msg = NULL ;
2006-12-08 13:36:04 +03:00
struct tty_struct * tty ;
2012-02-14 16:28:01 +04:00
int warntype = warn - > w_type ;
2005-04-17 02:20:36 +04:00
2008-07-25 12:46:52 +04:00
if ( warntype = = QUOTA_NL_IHARDBELOW | |
warntype = = QUOTA_NL_ISOFTBELOW | |
warntype = = QUOTA_NL_BHARDBELOW | |
2012-02-14 16:28:01 +04:00
warntype = = QUOTA_NL_BSOFTBELOW | | ! need_print_warning ( warn ) )
2005-04-17 02:20:36 +04:00
return ;
2006-12-08 13:36:04 +03:00
tty = get_current_tty ( ) ;
if ( ! tty )
2008-10-13 13:39:13 +04:00
return ;
2012-02-14 16:28:01 +04:00
tty_write_message ( tty , warn - > w_sb - > s_id ) ;
2007-10-17 10:29:31 +04:00
if ( warntype = = QUOTA_NL_ISOFTWARN | | warntype = = QUOTA_NL_BSOFTWARN )
2006-12-08 13:36:04 +03:00
tty_write_message ( tty , " : warning, " ) ;
2005-04-17 02:20:36 +04:00
else
2006-12-08 13:36:04 +03:00
tty_write_message ( tty , " : write failed, " ) ;
2012-09-16 15:05:34 +04:00
tty_write_message ( tty , quotatypes [ warn - > w_dq_id . type ] ) ;
2005-04-17 02:20:36 +04:00
switch ( warntype ) {
2007-10-17 10:29:31 +04:00
case QUOTA_NL_IHARDWARN :
2005-04-17 02:20:36 +04:00
msg = " file limit reached. \r \n " ;
break ;
2007-10-17 10:29:31 +04:00
case QUOTA_NL_ISOFTLONGWARN :
2005-04-17 02:20:36 +04:00
msg = " file quota exceeded too long. \r \n " ;
break ;
2007-10-17 10:29:31 +04:00
case QUOTA_NL_ISOFTWARN :
2005-04-17 02:20:36 +04:00
msg = " file quota exceeded. \r \n " ;
break ;
2007-10-17 10:29:31 +04:00
case QUOTA_NL_BHARDWARN :
2005-04-17 02:20:36 +04:00
msg = " block limit reached. \r \n " ;
break ;
2007-10-17 10:29:31 +04:00
case QUOTA_NL_BSOFTLONGWARN :
2005-04-17 02:20:36 +04:00
msg = " block quota exceeded too long. \r \n " ;
break ;
2007-10-17 10:29:31 +04:00
case QUOTA_NL_BSOFTWARN :
2005-04-17 02:20:36 +04:00
msg = " block quota exceeded. \r \n " ;
break ;
}
2006-12-08 13:36:04 +03:00
tty_write_message ( tty , msg ) ;
2008-10-13 13:39:13 +04:00
tty_kref_put ( tty ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-17 10:29:31 +04:00
# endif
2012-02-14 16:28:01 +04:00
static void prepare_warning ( struct dquot_warn * warn , struct dquot * dquot ,
int warntype )
{
if ( warning_issued ( dquot , warntype ) )
return ;
warn - > w_type = warntype ;
warn - > w_sb = dquot - > dq_sb ;
warn - > w_dq_id = dquot - > dq_id ;
}
2009-01-13 18:43:09 +03:00
/*
* Write warnings to the console and send warning messages over netlink .
*
2012-02-14 16:28:01 +04:00
* Note that this function can call into tty and networking code .
2009-01-13 18:43:09 +03:00
*/
2012-02-14 16:28:01 +04:00
static void flush_warnings ( struct dquot_warn * warn )
2005-04-17 02:20:36 +04:00
{
int i ;
2009-09-28 15:35:17 +04:00
for ( i = 0 ; i < MAXQUOTAS ; i + + ) {
2012-02-14 16:28:01 +04:00
if ( warn [ i ] . w_type = = QUOTA_NL_NOWARN )
continue ;
2007-10-17 10:29:31 +04:00
# ifdef CONFIG_PRINT_QUOTA_WARNING
2012-02-14 16:28:01 +04:00
print_warning ( & warn [ i ] ) ;
2007-10-17 10:29:31 +04:00
# endif
2012-09-16 15:05:34 +04:00
quota_send_warning ( warn [ i ] . w_dq_id ,
2012-02-14 16:28:01 +04:00
warn [ i ] . w_sb - > s_dev , warn [ i ] . w_type ) ;
2009-09-28 15:35:17 +04:00
}
2005-04-17 02:20:36 +04:00
}
2009-01-27 03:47:11 +03:00
static int ignore_hardlimit ( struct dquot * dquot )
2005-04-17 02:20:36 +04:00
{
2012-09-16 14:56:19 +04:00
struct mem_dqinfo * info = & sb_dqopt ( dquot - > dq_sb ) - > info [ dquot - > dq_id . type ] ;
2005-04-17 02:20:36 +04:00
return capable ( CAP_SYS_RESOURCE ) & &
2009-01-27 17:47:22 +03:00
( info - > dqi_format - > qf_fmt_id ! = QFMT_VFS_OLD | |
2014-11-19 11:21:58 +03:00
! ( info - > dqi_flags & DQF_ROOT_SQUASH ) ) ;
2005-04-17 02:20:36 +04:00
}
/* needs dq_data_lock */
2012-02-14 16:28:01 +04:00
static int check_idq ( struct dquot * dquot , qsize_t inodes ,
struct dquot_warn * warn )
2005-04-17 02:20:36 +04:00
{
2009-01-27 17:47:22 +03:00
qsize_t newinodes = dquot - > dq_dqb . dqb_curinodes + inodes ;
2012-09-16 14:56:19 +04:00
if ( ! sb_has_quota_limits_enabled ( dquot - > dq_sb , dquot - > dq_id . type ) | |
2008-08-20 19:50:32 +04:00
test_bit ( DQ_FAKE_B , & dquot - > dq_flags ) )
2010-03-03 17:05:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
if ( dquot - > dq_dqb . dqb_ihardlimit & &
2009-01-27 17:47:22 +03:00
newinodes > dquot - > dq_dqb . dqb_ihardlimit & &
2005-04-17 02:20:36 +04:00
! ignore_hardlimit ( dquot ) ) {
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_IHARDWARN ) ;
2010-03-03 17:05:08 +03:00
return - EDQUOT ;
2005-04-17 02:20:36 +04:00
}
if ( dquot - > dq_dqb . dqb_isoftlimit & &
2009-01-27 17:47:22 +03:00
newinodes > dquot - > dq_dqb . dqb_isoftlimit & &
dquot - > dq_dqb . dqb_itime & &
2016-06-17 23:03:16 +03:00
ktime_get_real_seconds ( ) > = dquot - > dq_dqb . dqb_itime & &
2005-04-17 02:20:36 +04:00
! ignore_hardlimit ( dquot ) ) {
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_ISOFTLONGWARN ) ;
2010-03-03 17:05:08 +03:00
return - EDQUOT ;
2005-04-17 02:20:36 +04:00
}
if ( dquot - > dq_dqb . dqb_isoftlimit & &
2009-01-27 17:47:22 +03:00
newinodes > dquot - > dq_dqb . dqb_isoftlimit & &
2005-04-17 02:20:36 +04:00
dquot - > dq_dqb . dqb_itime = = 0 ) {
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_ISOFTWARN ) ;
2016-06-17 23:03:16 +03:00
dquot - > dq_dqb . dqb_itime = ktime_get_real_seconds ( ) +
2012-09-16 14:56:19 +04:00
sb_dqopt ( dquot - > dq_sb ) - > info [ dquot - > dq_id . type ] . dqi_igrace ;
2005-04-17 02:20:36 +04:00
}
2010-03-03 17:05:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/* needs dq_data_lock */
2012-02-14 16:28:01 +04:00
static int check_bdq ( struct dquot * dquot , qsize_t space , int prealloc ,
struct dquot_warn * warn )
2005-04-17 02:20:36 +04:00
{
2009-01-13 18:43:09 +03:00
qsize_t tspace ;
2009-01-27 17:47:22 +03:00
struct super_block * sb = dquot - > dq_sb ;
2009-01-13 18:43:09 +03:00
2012-09-16 14:56:19 +04:00
if ( ! sb_has_quota_limits_enabled ( sb , dquot - > dq_id . type ) | |
2008-08-20 19:50:32 +04:00
test_bit ( DQ_FAKE_B , & dquot - > dq_flags ) )
2010-03-03 17:05:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
2009-01-13 18:43:09 +03:00
tspace = dquot - > dq_dqb . dqb_curspace + dquot - > dq_dqb . dqb_rsvspace
+ space ;
2005-04-17 02:20:36 +04:00
if ( dquot - > dq_dqb . dqb_bhardlimit & &
2009-01-13 18:43:09 +03:00
tspace > dquot - > dq_dqb . dqb_bhardlimit & &
2005-04-17 02:20:36 +04:00
! ignore_hardlimit ( dquot ) ) {
if ( ! prealloc )
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_BHARDWARN ) ;
2010-03-03 17:05:08 +03:00
return - EDQUOT ;
2005-04-17 02:20:36 +04:00
}
if ( dquot - > dq_dqb . dqb_bsoftlimit & &
2009-01-13 18:43:09 +03:00
tspace > dquot - > dq_dqb . dqb_bsoftlimit & &
2009-01-27 17:47:22 +03:00
dquot - > dq_dqb . dqb_btime & &
2016-06-17 23:03:16 +03:00
ktime_get_real_seconds ( ) > = dquot - > dq_dqb . dqb_btime & &
2005-04-17 02:20:36 +04:00
! ignore_hardlimit ( dquot ) ) {
if ( ! prealloc )
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_BSOFTLONGWARN ) ;
2010-03-03 17:05:08 +03:00
return - EDQUOT ;
2005-04-17 02:20:36 +04:00
}
if ( dquot - > dq_dqb . dqb_bsoftlimit & &
2009-01-13 18:43:09 +03:00
tspace > dquot - > dq_dqb . dqb_bsoftlimit & &
2005-04-17 02:20:36 +04:00
dquot - > dq_dqb . dqb_btime = = 0 ) {
if ( ! prealloc ) {
2012-02-14 16:28:01 +04:00
prepare_warning ( warn , dquot , QUOTA_NL_BSOFTWARN ) ;
2016-06-17 23:03:16 +03:00
dquot - > dq_dqb . dqb_btime = ktime_get_real_seconds ( ) +
2012-09-16 14:56:19 +04:00
sb_dqopt ( sb ) - > info [ dquot - > dq_id . type ] . dqi_bgrace ;
2005-04-17 02:20:36 +04:00
}
else
/*
* We don ' t allow preallocation to exceed softlimit so exceeding will
* be always printed
*/
2010-03-03 17:05:08 +03:00
return - EDQUOT ;
2005-04-17 02:20:36 +04:00
}
2010-03-03 17:05:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-08-20 16:45:12 +04:00
static int info_idq_free ( struct dquot * dquot , qsize_t inodes )
2008-07-25 12:46:52 +04:00
{
2009-01-27 17:47:22 +03:00
qsize_t newinodes ;
2008-07-25 12:46:52 +04:00
if ( test_bit ( DQ_FAKE_B , & dquot - > dq_flags ) | |
2008-08-20 19:50:32 +04:00
dquot - > dq_dqb . dqb_curinodes < = dquot - > dq_dqb . dqb_isoftlimit | |
2012-09-16 14:56:19 +04:00
! sb_has_quota_limits_enabled ( dquot - > dq_sb , dquot - > dq_id . type ) )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_NOWARN ;
2009-01-27 17:47:22 +03:00
newinodes = dquot - > dq_dqb . dqb_curinodes - inodes ;
if ( newinodes < = dquot - > dq_dqb . dqb_isoftlimit )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_ISOFTBELOW ;
if ( dquot - > dq_dqb . dqb_curinodes > = dquot - > dq_dqb . dqb_ihardlimit & &
2009-01-27 17:47:22 +03:00
newinodes < dquot - > dq_dqb . dqb_ihardlimit )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_IHARDBELOW ;
return QUOTA_NL_NOWARN ;
}
static int info_bdq_free ( struct dquot * dquot , qsize_t space )
{
if ( test_bit ( DQ_FAKE_B , & dquot - > dq_flags ) | |
2008-08-20 16:45:12 +04:00
dquot - > dq_dqb . dqb_curspace < = dquot - > dq_dqb . dqb_bsoftlimit )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_NOWARN ;
2008-08-20 16:45:12 +04:00
if ( dquot - > dq_dqb . dqb_curspace - space < = dquot - > dq_dqb . dqb_bsoftlimit )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_BSOFTBELOW ;
2008-08-20 16:45:12 +04:00
if ( dquot - > dq_dqb . dqb_curspace > = dquot - > dq_dqb . dqb_bhardlimit & &
dquot - > dq_dqb . dqb_curspace - space < dquot - > dq_dqb . dqb_bhardlimit )
2008-07-25 12:46:52 +04:00
return QUOTA_NL_BHARDBELOW ;
return QUOTA_NL_NOWARN ;
}
2010-02-09 20:20:39 +03:00
2010-06-04 12:56:29 +04:00
static int dquot_active ( const struct inode * inode )
{
struct super_block * sb = inode - > i_sb ;
if ( IS_NOQUOTA ( inode ) )
return 0 ;
return sb_any_quota_loaded ( sb ) & ~ sb_any_quota_suspended ( sb ) ;
}
2005-04-17 02:20:36 +04:00
/*
2010-03-03 17:05:07 +03:00
* Initialize quota pointers in inode
*
* It is better to call this function outside of any transaction as it
* might need a lot of space in journal for dquot structure allocation .
2005-04-17 02:20:36 +04:00
*/
2015-06-24 19:07:02 +03:00
static int __dquot_initialize ( struct inode * inode , int type )
2005-04-17 02:20:36 +04:00
{
2014-06-04 08:20:30 +04:00
int cnt , init_needed = 0 ;
2016-03-03 12:54:57 +03:00
struct dquot * * dquots , * got [ MAXQUOTAS ] = { } ;
2009-01-12 19:23:05 +03:00
struct super_block * sb = inode - > i_sb ;
2010-02-09 20:20:39 +03:00
qsize_t rsv ;
2015-06-24 19:07:02 +03:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) )
2015-06-24 19:07:02 +03:00
return 0 ;
2009-01-12 19:23:05 +03:00
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2009-01-12 19:23:05 +03:00
/* First get references to structures we might need. */
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-09-16 14:11:50 +04:00
struct kqid qid ;
2015-03-18 22:04:53 +03:00
kprojid_t projid ;
int rc ;
2015-06-24 19:07:02 +03:00
struct dquot * dquot ;
2015-03-18 22:04:53 +03:00
2009-01-12 19:23:05 +03:00
if ( type ! = - 1 & & cnt ! = type )
continue ;
2014-06-04 08:20:30 +04:00
/*
* The i_dquot should have been initialized in most cases ,
* we check it without locking here to avoid unnecessary
* dqget ( ) / dqput ( ) calls .
*/
2015-02-12 12:36:37 +03:00
if ( dquots [ cnt ] )
2014-06-04 08:20:30 +04:00
continue ;
2015-03-18 22:04:53 +03:00
if ( ! sb_has_quota_active ( sb , cnt ) )
continue ;
2014-06-04 08:20:30 +04:00
init_needed = 1 ;
2009-01-12 19:23:05 +03:00
switch ( cnt ) {
case USRQUOTA :
2012-09-16 14:11:50 +04:00
qid = make_kqid_uid ( inode - > i_uid ) ;
2009-01-12 19:23:05 +03:00
break ;
case GRPQUOTA :
2012-09-16 14:11:50 +04:00
qid = make_kqid_gid ( inode - > i_gid ) ;
2009-01-12 19:23:05 +03:00
break ;
2015-03-18 22:04:53 +03:00
case PRJQUOTA :
rc = inode - > i_sb - > dq_op - > get_projid ( inode , & projid ) ;
if ( rc )
continue ;
qid = make_kqid_projid ( projid ) ;
break ;
2009-01-12 19:23:05 +03:00
}
2015-06-24 19:07:02 +03:00
dquot = dqget ( sb , qid ) ;
if ( IS_ERR ( dquot ) ) {
/* We raced with somebody turning quotas off... */
if ( PTR_ERR ( dquot ) ! = - ESRCH ) {
ret = PTR_ERR ( dquot ) ;
goto out_put ;
}
dquot = NULL ;
}
got [ cnt ] = dquot ;
2009-01-12 19:23:05 +03:00
}
2014-06-04 08:20:30 +04:00
/* All required i_dquot has been initialized */
if ( ! init_needed )
2015-06-24 19:07:02 +03:00
return 0 ;
2014-06-04 08:20:30 +04:00
2014-06-04 08:23:19 +04:00
spin_lock ( & dq_data_lock ) ;
2005-04-17 02:20:36 +04:00
if ( IS_NOQUOTA ( inode ) )
2015-06-24 19:07:02 +03:00
goto out_lock ;
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
if ( type ! = - 1 & & cnt ! = type )
continue ;
2009-01-12 19:23:05 +03:00
/* Avoid races with quotaoff() */
if ( ! sb_has_quota_active ( sb , cnt ) )
continue ;
2010-10-19 02:24:21 +04:00
/* We could race with quotaon or dqget() could have failed */
if ( ! got [ cnt ] )
continue ;
2015-02-12 12:36:37 +03:00
if ( ! dquots [ cnt ] ) {
dquots [ cnt ] = got [ cnt ] ;
2009-01-26 18:01:43 +03:00
got [ cnt ] = NULL ;
2010-02-09 20:20:39 +03:00
/*
* Make quota reservation system happy if someone
* did a write before quota was turned on
*/
rsv = inode_get_rsv_space ( inode ) ;
2014-06-04 08:23:19 +04:00
if ( unlikely ( rsv ) )
2015-02-12 12:36:37 +03:00
dquot_resv_space ( dquots [ cnt ] , rsv ) ;
2005-04-17 02:20:36 +04:00
}
}
2015-06-24 19:07:02 +03:00
out_lock :
2014-06-04 08:23:19 +04:00
spin_unlock ( & dq_data_lock ) ;
2015-06-24 19:07:02 +03:00
out_put :
2009-01-12 19:23:05 +03:00
/* Drop unused references */
2009-12-14 15:21:15 +03:00
dqput_all ( got ) ;
2015-06-24 19:07:02 +03:00
return ret ;
2010-03-03 17:05:07 +03:00
}
2015-06-24 19:07:02 +03:00
int dquot_initialize ( struct inode * inode )
2010-03-03 17:05:07 +03:00
{
2015-06-24 19:07:02 +03:00
return __dquot_initialize ( inode , - 1 ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_initialize ) ;
2005-04-17 02:20:36 +04:00
2017-05-25 01:24:07 +03:00
bool dquot_initialize_needed ( struct inode * inode )
{
struct dquot * * dquots ;
int i ;
if ( ! dquot_active ( inode ) )
return false ;
dquots = i_dquot ( inode ) ;
for ( i = 0 ; i < MAXQUOTAS ; i + + )
if ( ! dquots [ i ] & & sb_has_quota_active ( inode - > i_sb , i ) )
return true ;
return false ;
}
EXPORT_SYMBOL ( dquot_initialize_needed ) ;
2005-04-17 02:20:36 +04:00
/*
2014-06-04 08:23:19 +04:00
* Release all quotas referenced by inode .
*
* This function only be called on inode free or converting
* a file to quota file , no other users for the i_dquot in
* both cases , so we needn ' t call synchronize_srcu ( ) after
* clearing i_dquot .
2005-04-17 02:20:36 +04:00
*/
2010-03-03 17:05:05 +03:00
static void __dquot_drop ( struct inode * inode )
2005-04-17 02:20:36 +04:00
{
int cnt ;
2015-02-12 12:36:37 +03:00
struct dquot * * dquots = i_dquot ( inode ) ;
2009-01-12 19:23:05 +03:00
struct dquot * put [ MAXQUOTAS ] ;
2005-04-17 02:20:36 +04:00
2014-06-04 08:23:19 +04:00
spin_lock ( & dq_data_lock ) ;
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2015-02-12 12:36:37 +03:00
put [ cnt ] = dquots [ cnt ] ;
dquots [ cnt ] = NULL ;
2005-04-17 02:20:36 +04:00
}
2014-06-04 08:23:19 +04:00
spin_unlock ( & dq_data_lock ) ;
2009-12-14 15:21:15 +03:00
dqput_all ( put ) ;
2005-04-17 02:20:36 +04:00
}
2010-03-03 17:05:05 +03:00
void dquot_drop ( struct inode * inode )
{
2015-02-12 12:36:37 +03:00
struct dquot * const * dquots ;
2010-03-03 17:05:05 +03:00
int cnt ;
if ( IS_NOQUOTA ( inode ) )
return ;
/*
* Test before calling to rule out calls from proc and such
* where we are not allowed to block . Note that this is
* actually reliable test even without the lock - the caller
* must assure that nobody can come after the DQUOT_DROP and
* add quota pointers back anyway .
*/
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2010-03-03 17:05:05 +03:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2015-02-12 12:36:37 +03:00
if ( dquots [ cnt ] )
2010-03-03 17:05:05 +03:00
break ;
}
if ( cnt < MAXQUOTAS )
__dquot_drop ( inode ) ;
}
EXPORT_SYMBOL ( dquot_drop ) ;
2008-07-25 12:46:50 +04:00
2009-12-14 15:21:13 +03:00
/*
* inode_reserved_space is managed internally by quota , and protected by
* i_lock similar to i_blocks + i_bytes .
*/
static qsize_t * inode_reserved_space ( struct inode * inode )
{
/* Filesystem must explicitly define it's own method in order to use
* quota reservation interface */
BUG_ON ( ! inode - > i_sb - > dq_op - > get_reserved_space ) ;
return inode - > i_sb - > dq_op - > get_reserved_space ( inode ) ;
}
static qsize_t inode_get_rsv_space ( struct inode * inode )
{
qsize_t ret ;
2010-01-06 20:03:36 +03:00
if ( ! inode - > i_sb - > dq_op - > get_reserved_space )
return 0 ;
2009-12-14 15:21:13 +03:00
spin_lock ( & inode - > i_lock ) ;
ret = * inode_reserved_space ( inode ) ;
spin_unlock ( & inode - > i_lock ) ;
return ret ;
}
2005-04-17 02:20:36 +04:00
/*
2010-03-03 17:05:00 +03:00
* This functions updates i_blocks + i_bytes fields and quota information
* ( together with appropriate checks ) .
*
* NOTE : We absolutely rely on the fact that caller dirties the inode
* ( usually helpers in quotaops . h care about this ) and holds a handle for
* the current transaction so that dquot write and inode write go into the
* same transaction .
2005-04-17 02:20:36 +04:00
*/
/*
* This operation can block , but only after everything is updated
*/
2010-05-16 17:00:00 +04:00
int __dquot_alloc_space ( struct inode * inode , qsize_t number , int flags )
2005-04-17 02:20:36 +04:00
{
2014-06-04 08:23:19 +04:00
int cnt , ret = 0 , index ;
2012-02-14 16:28:01 +04:00
struct dquot_warn warn [ MAXQUOTAS ] ;
2010-05-16 17:00:00 +04:00
int reserve = flags & DQUOT_SPACE_RESERVE ;
2015-02-12 12:36:37 +03:00
struct dquot * * dquots ;
2005-04-17 02:20:36 +04:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) ) {
2017-08-07 18:17:10 +03:00
if ( reserve ) {
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) + = number ;
spin_unlock ( & inode - > i_lock ) ;
} else {
inode_add_bytes ( inode , number ) ;
}
2009-12-14 15:21:13 +03:00
goto out ;
}
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2012-02-14 16:28:01 +04:00
warn [ cnt ] . w_type = QUOTA_NL_NOWARN ;
2005-04-17 02:20:36 +04:00
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-14 16:28:01 +04:00
ret = check_bdq ( dquots [ cnt ] , number ,
! ( flags & DQUOT_SPACE_WARN ) , & warn [ cnt ] ) ;
if ( ret & & ! ( flags & DQUOT_SPACE_NOFAIL ) ) {
2009-12-14 15:21:13 +03:00
spin_unlock ( & dq_data_lock ) ;
goto out_flush_warn ;
2009-01-13 18:43:09 +03:00
}
2005-04-17 02:20:36 +04:00
}
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2009-01-13 18:43:09 +03:00
if ( reserve )
2012-02-14 16:28:01 +04:00
dquot_resv_space ( dquots [ cnt ] , number ) ;
2009-01-13 18:43:09 +03:00
else
2012-02-14 16:28:01 +04:00
dquot_incr_space ( dquots [ cnt ] , number ) ;
2005-04-17 02:20:36 +04:00
}
2017-08-07 18:17:10 +03:00
if ( reserve ) {
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) + = number ;
spin_unlock ( & inode - > i_lock ) ;
} else {
inode_add_bytes ( inode , number ) ;
}
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_data_lock ) ;
2009-01-13 18:43:09 +03:00
2009-12-14 15:21:13 +03:00
if ( reserve )
goto out_flush_warn ;
2012-02-14 16:28:01 +04:00
mark_all_dquot_dirty ( dquots ) ;
2009-12-14 15:21:13 +03:00
out_flush_warn :
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn ) ;
2009-01-13 18:43:09 +03:00
out :
return ret ;
}
2010-03-03 17:05:00 +03:00
EXPORT_SYMBOL ( __dquot_alloc_space ) ;
2005-04-17 02:20:36 +04:00
/*
* This operation can block , but only after everything is updated
*/
2014-10-20 18:01:33 +04:00
int dquot_alloc_inode ( struct inode * inode )
2005-04-17 02:20:36 +04:00
{
2014-06-04 08:23:19 +04:00
int cnt , ret = 0 , index ;
2012-02-14 16:28:01 +04:00
struct dquot_warn warn [ MAXQUOTAS ] ;
2015-02-12 12:36:37 +03:00
struct dquot * const * dquots ;
2005-04-17 02:20:36 +04:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) )
2010-03-03 17:05:01 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2012-02-14 16:28:01 +04:00
warn [ cnt ] . w_type = QUOTA_NL_NOWARN ;
2014-06-04 08:23:19 +04:00
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-14 16:28:01 +04:00
ret = check_idq ( dquots [ cnt ] , 1 , & warn [ cnt ] ) ;
2010-03-03 17:05:08 +03:00
if ( ret )
2005-04-17 02:20:36 +04:00
goto warn_put_all ;
}
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-14 16:28:01 +04:00
dquot_incr_inodes ( dquots [ cnt ] , 1 ) ;
2005-04-17 02:20:36 +04:00
}
2010-03-03 17:05:08 +03:00
2005-04-17 02:20:36 +04:00
warn_put_all :
spin_unlock ( & dq_data_lock ) ;
2010-03-03 17:05:01 +03:00
if ( ret = = 0 )
2012-02-14 16:28:01 +04:00
mark_all_dquot_dirty ( dquots ) ;
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_alloc_inode ) ;
2005-04-17 02:20:36 +04:00
2010-03-03 17:05:00 +03:00
/*
* Convert in - memory reserved quotas to real consumed quotas
*/
int dquot_claim_space_nodirty ( struct inode * inode , qsize_t number )
2009-01-13 18:43:14 +03:00
{
2015-02-12 12:36:37 +03:00
struct dquot * * dquots ;
2014-06-04 08:23:19 +04:00
int cnt , index ;
2009-01-13 18:43:14 +03:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) ) {
2017-08-07 18:07:28 +03:00
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) - = number ;
__inode_add_bytes ( inode , number ) ;
spin_unlock ( & inode - > i_lock ) ;
2010-03-03 17:05:00 +03:00
return 0 ;
2009-01-13 18:43:14 +03:00
}
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2009-01-13 18:43:14 +03:00
spin_lock ( & dq_data_lock ) ;
/* Claim reserved quotas to allocated quotas */
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2015-02-12 12:36:37 +03:00
if ( dquots [ cnt ] )
dquot_claim_reserved_space ( dquots [ cnt ] , number ) ;
2009-01-13 18:43:14 +03:00
}
/* Update inode bytes */
2017-08-07 18:07:28 +03:00
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) - = number ;
__inode_add_bytes ( inode , number ) ;
spin_unlock ( & inode - > i_lock ) ;
2009-01-13 18:43:14 +03:00
spin_unlock ( & dq_data_lock ) ;
2015-02-12 12:36:37 +03:00
mark_all_dquot_dirty ( dquots ) ;
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2010-03-03 17:05:00 +03:00
return 0 ;
2009-01-13 18:43:14 +03:00
}
2010-03-03 17:05:00 +03:00
EXPORT_SYMBOL ( dquot_claim_space_nodirty ) ;
2009-01-13 18:43:14 +03:00
2013-08-17 17:32:32 +04:00
/*
* Convert allocated space back to in - memory reserved quotas
*/
void dquot_reclaim_space_nodirty ( struct inode * inode , qsize_t number )
{
2015-02-12 12:36:37 +03:00
struct dquot * * dquots ;
2014-06-04 08:23:19 +04:00
int cnt , index ;
2013-08-17 17:32:32 +04:00
if ( ! dquot_active ( inode ) ) {
2017-08-07 18:07:28 +03:00
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) + = number ;
__inode_sub_bytes ( inode , number ) ;
spin_unlock ( & inode - > i_lock ) ;
2013-08-17 17:32:32 +04:00
return ;
}
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2013-08-17 17:32:32 +04:00
spin_lock ( & dq_data_lock ) ;
/* Claim reserved quotas to allocated quotas */
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2015-02-12 12:36:37 +03:00
if ( dquots [ cnt ] )
dquot_reclaim_reserved_space ( dquots [ cnt ] , number ) ;
2013-08-17 17:32:32 +04:00
}
/* Update inode bytes */
2017-08-07 18:07:28 +03:00
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) + = number ;
__inode_sub_bytes ( inode , number ) ;
spin_unlock ( & inode - > i_lock ) ;
2013-08-17 17:32:32 +04:00
spin_unlock ( & dq_data_lock ) ;
2015-02-12 12:36:37 +03:00
mark_all_dquot_dirty ( dquots ) ;
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2013-08-17 17:32:32 +04:00
return ;
}
EXPORT_SYMBOL ( dquot_reclaim_space_nodirty ) ;
2005-04-17 02:20:36 +04:00
/*
* This operation can block , but only after everything is updated
*/
2010-05-16 17:00:00 +04:00
void __dquot_free_space ( struct inode * inode , qsize_t number , int flags )
2005-04-17 02:20:36 +04:00
{
unsigned int cnt ;
2012-02-14 16:28:01 +04:00
struct dquot_warn warn [ MAXQUOTAS ] ;
2015-02-12 12:36:37 +03:00
struct dquot * * dquots ;
2014-06-04 08:23:19 +04:00
int reserve = flags & DQUOT_SPACE_RESERVE , index ;
2005-04-17 02:20:36 +04:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) ) {
2017-08-07 18:17:10 +03:00
if ( reserve ) {
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) - = number ;
spin_unlock ( & inode - > i_lock ) ;
} else {
inode_sub_bytes ( inode , number ) ;
}
2010-03-03 17:05:00 +03:00
return ;
2005-04-17 02:20:36 +04:00
}
2008-07-25 12:46:52 +04:00
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
int wtype ;
warn [ cnt ] . w_type = QUOTA_NL_NOWARN ;
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-14 16:28:01 +04:00
wtype = info_bdq_free ( dquots [ cnt ] , number ) ;
if ( wtype ! = QUOTA_NL_NOWARN )
prepare_warning ( & warn [ cnt ] , dquots [ cnt ] , wtype ) ;
2009-12-14 15:21:13 +03:00
if ( reserve )
2012-02-14 16:28:01 +04:00
dquot_free_reserved_space ( dquots [ cnt ] , number ) ;
2009-12-14 15:21:13 +03:00
else
2012-02-14 16:28:01 +04:00
dquot_decr_space ( dquots [ cnt ] , number ) ;
2005-04-17 02:20:36 +04:00
}
2017-08-07 18:17:10 +03:00
if ( reserve ) {
spin_lock ( & inode - > i_lock ) ;
* inode_reserved_space ( inode ) - = number ;
spin_unlock ( & inode - > i_lock ) ;
} else {
inode_sub_bytes ( inode , number ) ;
}
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_data_lock ) ;
2009-12-14 15:21:13 +03:00
if ( reserve )
goto out_unlock ;
2012-02-14 16:28:01 +04:00
mark_all_dquot_dirty ( dquots ) ;
2009-12-14 15:21:13 +03:00
out_unlock :
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn ) ;
2009-12-14 15:21:13 +03:00
}
2010-03-03 17:05:00 +03:00
EXPORT_SYMBOL ( __dquot_free_space ) ;
2009-12-14 15:21:13 +03:00
2005-04-17 02:20:36 +04:00
/*
* This operation can block , but only after everything is updated
*/
2014-10-20 18:01:33 +04:00
void dquot_free_inode ( struct inode * inode )
2005-04-17 02:20:36 +04:00
{
unsigned int cnt ;
2012-02-14 16:28:01 +04:00
struct dquot_warn warn [ MAXQUOTAS ] ;
2015-02-12 12:36:37 +03:00
struct dquot * const * dquots ;
2014-06-04 08:23:19 +04:00
int index ;
2005-04-17 02:20:36 +04:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) )
2010-03-03 17:05:01 +03:00
return ;
2008-07-25 12:46:52 +04:00
2015-02-12 12:36:37 +03:00
dquots = i_dquot ( inode ) ;
2014-06-04 08:23:19 +04:00
index = srcu_read_lock ( & dquot_srcu ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2012-02-14 16:28:01 +04:00
int wtype ;
warn [ cnt ] . w_type = QUOTA_NL_NOWARN ;
if ( ! dquots [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2012-02-14 16:28:01 +04:00
wtype = info_idq_free ( dquots [ cnt ] , 1 ) ;
if ( wtype ! = QUOTA_NL_NOWARN )
prepare_warning ( & warn [ cnt ] , dquots [ cnt ] , wtype ) ;
dquot_decr_inodes ( dquots [ cnt ] , 1 ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock ( & dq_data_lock ) ;
2012-02-14 16:28:01 +04:00
mark_all_dquot_dirty ( dquots ) ;
2014-06-04 08:23:19 +04:00
srcu_read_unlock ( & dquot_srcu , index ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_free_inode ) ;
2005-04-17 02:20:36 +04:00
/*
* Transfer the number of inode and blocks from one diskquota to an other .
2010-05-13 21:58:50 +04:00
* On success , dquot references in transfer_to are consumed and references
* to original dquots that need to be released are placed there . On failure ,
* references are kept untouched .
2005-04-17 02:20:36 +04:00
*
* This operation can block , but only after everything is updated
* A transaction must be started when entering this function .
2010-05-13 21:58:50 +04:00
*
2014-06-04 08:23:19 +04:00
* We are holding reference on transfer_from & transfer_to , no need to
* protect them by srcu_read_lock ( ) .
2005-04-17 02:20:36 +04:00
*/
2010-05-13 21:58:50 +04:00
int __dquot_transfer ( struct inode * inode , struct dquot * * transfer_to )
2005-04-17 02:20:36 +04:00
{
2009-01-13 18:43:14 +03:00
qsize_t space , cur_space ;
qsize_t rsv_space = 0 ;
2017-06-22 18:46:48 +03:00
qsize_t inode_usage = 1 ;
2010-05-13 21:58:50 +04:00
struct dquot * transfer_from [ MAXQUOTAS ] = { } ;
2010-03-03 17:05:08 +03:00
int cnt , ret = 0 ;
2010-10-09 23:15:30 +04:00
char is_valid [ MAXQUOTAS ] = { } ;
2012-02-14 16:28:01 +04:00
struct dquot_warn warn_to [ MAXQUOTAS ] ;
struct dquot_warn warn_from_inodes [ MAXQUOTAS ] ;
struct dquot_warn warn_from_space [ MAXQUOTAS ] ;
2005-04-17 02:20:36 +04:00
if ( IS_NOQUOTA ( inode ) )
2010-03-03 17:05:08 +03:00
return 0 ;
2017-06-22 18:46:48 +03:00
if ( inode - > i_sb - > dq_op - > get_inode_usage ) {
ret = inode - > i_sb - > dq_op - > get_inode_usage ( inode , & inode_usage ) ;
if ( ret )
return ret ;
}
2009-01-12 19:23:05 +03:00
/* Initialize the arrays */
2012-02-14 16:28:01 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
warn_to [ cnt ] . w_type = QUOTA_NL_NOWARN ;
warn_from_inodes [ cnt ] . w_type = QUOTA_NL_NOWARN ;
warn_from_space [ cnt ] . w_type = QUOTA_NL_NOWARN ;
}
2014-06-04 08:23:19 +04:00
spin_lock ( & dq_data_lock ) ;
2009-01-12 19:23:05 +03:00
if ( IS_NOQUOTA ( inode ) ) { /* File without quota accounting? */
2014-06-04 08:23:19 +04:00
spin_unlock ( & dq_data_lock ) ;
2010-05-13 21:58:50 +04:00
return 0 ;
2009-01-12 19:23:05 +03:00
}
2009-01-13 18:43:14 +03:00
cur_space = inode_get_bytes ( inode ) ;
2009-12-14 15:21:13 +03:00
rsv_space = inode_get_rsv_space ( inode ) ;
2009-01-13 18:43:14 +03:00
space = cur_space + rsv_space ;
2005-04-17 02:20:36 +04:00
/* Build the transfer_from list and check the limits */
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2010-10-09 23:15:30 +04:00
/*
* Skip changes for same uid or gid or for turned off quota - type .
*/
2009-01-26 18:01:43 +03:00
if ( ! transfer_to [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
2010-10-09 23:15:30 +04:00
/* Avoid races with quotaoff() */
if ( ! sb_has_quota_active ( inode - > i_sb , cnt ) )
continue ;
is_valid [ cnt ] = 1 ;
2014-09-25 18:36:14 +04:00
transfer_from [ cnt ] = i_dquot ( inode ) [ cnt ] ;
2017-06-22 18:46:48 +03:00
ret = check_idq ( transfer_to [ cnt ] , inode_usage , & warn_to [ cnt ] ) ;
2010-03-03 17:05:08 +03:00
if ( ret )
goto over_quota ;
2012-02-14 16:28:01 +04:00
ret = check_bdq ( transfer_to [ cnt ] , space , 0 , & warn_to [ cnt ] ) ;
2010-03-03 17:05:08 +03:00
if ( ret )
2009-01-12 19:23:05 +03:00
goto over_quota ;
2005-04-17 02:20:36 +04:00
}
/*
* Finally perform the needed transfer from transfer_from to transfer_to
*/
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
2010-10-09 23:15:30 +04:00
if ( ! is_valid [ cnt ] )
2005-04-17 02:20:36 +04:00
continue ;
/* Due to IO error we might not have transfer_from[] structure */
if ( transfer_from [ cnt ] ) {
2012-02-14 16:28:01 +04:00
int wtype ;
2017-06-22 18:46:48 +03:00
wtype = info_idq_free ( transfer_from [ cnt ] , inode_usage ) ;
2012-02-14 16:28:01 +04:00
if ( wtype ! = QUOTA_NL_NOWARN )
prepare_warning ( & warn_from_inodes [ cnt ] ,
transfer_from [ cnt ] , wtype ) ;
wtype = info_bdq_free ( transfer_from [ cnt ] , space ) ;
if ( wtype ! = QUOTA_NL_NOWARN )
prepare_warning ( & warn_from_space [ cnt ] ,
transfer_from [ cnt ] , wtype ) ;
2017-06-22 18:46:48 +03:00
dquot_decr_inodes ( transfer_from [ cnt ] , inode_usage ) ;
2009-01-13 18:43:14 +03:00
dquot_decr_space ( transfer_from [ cnt ] , cur_space ) ;
dquot_free_reserved_space ( transfer_from [ cnt ] ,
rsv_space ) ;
2005-04-17 02:20:36 +04:00
}
2017-06-22 18:46:48 +03:00
dquot_incr_inodes ( transfer_to [ cnt ] , inode_usage ) ;
2009-01-13 18:43:14 +03:00
dquot_incr_space ( transfer_to [ cnt ] , cur_space ) ;
dquot_resv_space ( transfer_to [ cnt ] , rsv_space ) ;
2005-04-17 02:20:36 +04:00
2014-09-25 18:36:14 +04:00
i_dquot ( inode ) [ cnt ] = transfer_to [ cnt ] ;
2005-04-17 02:20:36 +04:00
}
spin_unlock ( & dq_data_lock ) ;
2009-01-12 19:23:05 +03:00
2009-12-14 15:21:15 +03:00
mark_all_dquot_dirty ( transfer_from ) ;
mark_all_dquot_dirty ( transfer_to ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn_to ) ;
flush_warnings ( warn_from_inodes ) ;
flush_warnings ( warn_from_space ) ;
2010-05-13 21:58:50 +04:00
/* Pass back references to put */
2009-12-14 15:21:15 +03:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2010-10-09 23:15:30 +04:00
if ( is_valid [ cnt ] )
transfer_to [ cnt ] = transfer_from [ cnt ] ;
2010-10-11 17:22:21 +04:00
return 0 ;
2009-01-12 19:23:05 +03:00
over_quota :
spin_unlock ( & dq_data_lock ) ;
2012-02-14 16:28:01 +04:00
flush_warnings ( warn_to ) ;
2010-10-11 17:22:21 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2010-05-13 21:58:50 +04:00
EXPORT_SYMBOL ( __dquot_transfer ) ;
2005-04-17 02:20:36 +04:00
2010-02-16 08:31:50 +03:00
/* Wrapper for transferring ownership of an inode for uid/gid only
* Called from FSXXX_setattr ( )
*/
2010-03-03 17:05:03 +03:00
int dquot_transfer ( struct inode * inode , struct iattr * iattr )
2008-07-25 12:46:50 +04:00
{
2010-05-13 21:58:50 +04:00
struct dquot * transfer_to [ MAXQUOTAS ] = { } ;
2015-06-24 19:07:02 +03:00
struct dquot * dquot ;
2010-05-13 21:58:50 +04:00
struct super_block * sb = inode - > i_sb ;
int ret ;
2010-02-16 08:31:50 +03:00
2010-06-04 12:56:29 +04:00
if ( ! dquot_active ( inode ) )
2010-05-13 21:58:50 +04:00
return 0 ;
2010-04-08 22:04:20 +04:00
2015-06-24 19:07:02 +03:00
if ( iattr - > ia_valid & ATTR_UID & & ! uid_eq ( iattr - > ia_uid , inode - > i_uid ) ) {
dquot = dqget ( sb , make_kqid_uid ( iattr - > ia_uid ) ) ;
if ( IS_ERR ( dquot ) ) {
if ( PTR_ERR ( dquot ) ! = - ESRCH ) {
ret = PTR_ERR ( dquot ) ;
goto out_put ;
}
dquot = NULL ;
}
transfer_to [ USRQUOTA ] = dquot ;
}
if ( iattr - > ia_valid & ATTR_GID & & ! gid_eq ( iattr - > ia_gid , inode - > i_gid ) ) {
dquot = dqget ( sb , make_kqid_gid ( iattr - > ia_gid ) ) ;
if ( IS_ERR ( dquot ) ) {
if ( PTR_ERR ( dquot ) ! = - ESRCH ) {
ret = PTR_ERR ( dquot ) ;
goto out_put ;
}
dquot = NULL ;
}
transfer_to [ GRPQUOTA ] = dquot ;
}
2010-05-13 21:58:50 +04:00
ret = __dquot_transfer ( inode , transfer_to ) ;
2015-06-24 19:07:02 +03:00
out_put :
2010-05-13 21:58:50 +04:00
dqput_all ( transfer_to ) ;
return ret ;
2008-07-25 12:46:50 +04:00
}
2010-03-03 17:05:03 +03:00
EXPORT_SYMBOL ( dquot_transfer ) ;
2008-07-25 12:46:50 +04:00
2005-04-17 02:20:36 +04:00
/*
* Write info of quota file to disk
*/
int dquot_commit_info ( struct super_block * sb , int type )
{
struct quota_info * dqopt = sb_dqopt ( sb ) ;
2017-06-09 09:45:43 +03:00
return dqopt - > ops [ type ] - > write_file_info ( sb , type ) ;
2005-04-17 02:20:36 +04:00
}
2009-01-14 18:19:32 +03:00
EXPORT_SYMBOL ( dquot_commit_info ) ;
2005-04-17 02:20:36 +04:00
2016-01-25 21:24:50 +03:00
int dquot_get_next_id ( struct super_block * sb , struct kqid * qid )
{
struct quota_info * dqopt = sb_dqopt ( sb ) ;
2016-11-23 15:35:14 +03:00
if ( ! sb_has_quota_active ( sb , qid - > type ) )
return - ESRCH ;
if ( ! dqopt - > ops [ qid - > type ] - > get_next_id )
return - ENOSYS ;
2017-06-09 09:36:16 +03:00
return dqopt - > ops [ qid - > type ] - > get_next_id ( sb , qid ) ;
2016-01-25 21:24:50 +03:00
}
EXPORT_SYMBOL ( dquot_get_next_id ) ;
2005-04-17 02:20:36 +04:00
/*
* Definitions of diskquota operations .
*/
2009-09-22 04:01:08 +04:00
const struct dquot_operations dquot_operations = {
2005-04-17 02:20:36 +04:00
. write_dquot = dquot_commit ,
. acquire_dquot = dquot_acquire ,
. release_dquot = dquot_release ,
. mark_dirty = dquot_mark_dquot_dirty ,
2008-08-19 16:51:22 +04:00
. write_info = dquot_commit_info ,
. alloc_dquot = dquot_alloc ,
. destroy_dquot = dquot_destroy ,
2016-01-25 21:24:50 +03:00
. get_next_id = dquot_get_next_id ,
2005-04-17 02:20:36 +04:00
} ;
2010-05-19 15:16:44 +04:00
EXPORT_SYMBOL ( dquot_operations ) ;
2005-04-17 02:20:36 +04:00
2010-03-03 17:05:06 +03:00
/*
* Generic helper for - > open on filesystems supporting disk quotas .
*/
int dquot_file_open ( struct inode * inode , struct file * file )
{
int error ;
error = generic_file_open ( inode , file ) ;
if ( ! error & & ( file - > f_mode & FMODE_WRITE ) )
2010-03-03 17:05:07 +03:00
dquot_initialize ( inode ) ;
2010-03-03 17:05:06 +03:00
return error ;
}
EXPORT_SYMBOL ( dquot_file_open ) ;
2005-04-17 02:20:36 +04:00
/*
* Turn quota off on a device . type = = - 1 = = > quotaoff for all types ( umount )
*/
2010-05-19 15:16:41 +04:00
int dquot_disable ( struct super_block * sb , int type , unsigned int flags )
2005-04-17 02:20:36 +04:00
{
2008-04-28 13:14:33 +04:00
int cnt , ret = 0 ;
2005-04-17 02:20:36 +04:00
struct quota_info * dqopt = sb_dqopt ( sb ) ;
struct inode * toputinode [ MAXQUOTAS ] ;
2016-11-23 15:16:10 +03:00
/* s_umount should be held in exclusive mode */
if ( WARN_ON_ONCE ( down_read_trylock ( & sb - > s_umount ) ) )
up_read ( & sb - > s_umount ) ;
2008-08-20 19:50:32 +04:00
/* Cannot turn off usage accounting without turning off limits, or
* suspend quotas and simultaneously turn quotas off . */
if ( ( flags & DQUOT_USAGE_ENABLED & & ! ( flags & DQUOT_LIMITS_ENABLED ) )
| | ( flags & DQUOT_SUSPENDED & & flags & ( DQUOT_LIMITS_ENABLED |
DQUOT_USAGE_ENABLED ) ) )
return - EINVAL ;
2008-05-13 01:02:08 +04:00
/*
* Skip everything if there ' s nothing to do . We have to do this because
* sometimes we are called when fill_super ( ) failed and calling
* sync_fs ( ) in such cases does no good .
*/
2016-11-23 16:04:55 +03:00
if ( ! sb_any_quota_loaded ( sb ) )
2008-05-13 01:02:08 +04:00
return 0 ;
2016-11-23 16:04:55 +03:00
2005-04-17 02:20:36 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
toputinode [ cnt ] = NULL ;
if ( type ! = - 1 & & cnt ! = type )
continue ;
2008-08-20 19:50:32 +04:00
if ( ! sb_has_quota_loaded ( sb , cnt ) )
2008-04-28 13:14:33 +04:00
continue ;
2008-08-20 19:50:32 +04:00
if ( flags & DQUOT_SUSPENDED ) {
2009-01-12 19:23:05 +03:00
spin_lock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
dqopt - > flags | =
dquot_state_flag ( DQUOT_SUSPENDED , cnt ) ;
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
} else {
2009-01-12 19:23:05 +03:00
spin_lock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
dqopt - > flags & = ~ dquot_state_flag ( flags , cnt ) ;
/* Turning off suspended quotas? */
if ( ! sb_has_quota_loaded ( sb , cnt ) & &
sb_has_quota_suspended ( sb , cnt ) ) {
dqopt - > flags & = ~ dquot_state_flag (
DQUOT_SUSPENDED , cnt ) ;
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
iput ( dqopt - > files [ cnt ] ) ;
dqopt - > files [ cnt ] = NULL ;
continue ;
}
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
2008-04-28 13:14:33 +04:00
}
2008-08-20 19:50:32 +04:00
/* We still have to keep quota loaded? */
if ( sb_has_quota_loaded ( sb , cnt ) & & ! ( flags & DQUOT_SUSPENDED ) )
2005-04-17 02:20:36 +04:00
continue ;
/* Note: these are blocking operations */
drop_dquot_ref ( sb , cnt ) ;
invalidate_dquots ( sb , cnt ) ;
/*
2009-01-27 17:47:22 +03:00
* Now all dquots should be invalidated , all writes done so we
* should be only users of the info . No locks needed .
2005-04-17 02:20:36 +04:00
*/
if ( info_dirty ( & dqopt - > info [ cnt ] ) )
sb - > dq_op - > write_info ( sb , cnt ) ;
if ( dqopt - > ops [ cnt ] - > free_file_info )
dqopt - > ops [ cnt ] - > free_file_info ( sb , cnt ) ;
put_quota_format ( dqopt - > info [ cnt ] . dqi_format ) ;
toputinode [ cnt ] = dqopt - > files [ cnt ] ;
2008-08-20 19:50:32 +04:00
if ( ! sb_has_quota_loaded ( sb , cnt ) )
2008-04-28 13:14:33 +04:00
dqopt - > files [ cnt ] = NULL ;
2005-04-17 02:20:36 +04:00
dqopt - > info [ cnt ] . dqi_flags = 0 ;
dqopt - > info [ cnt ] . dqi_igrace = 0 ;
dqopt - > info [ cnt ] . dqi_bgrace = 0 ;
dqopt - > ops [ cnt ] = NULL ;
}
2008-09-30 19:53:37 +04:00
/* Skip syncing and setting flags if quota files are hidden */
if ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE )
goto put_inodes ;
2005-04-17 02:20:36 +04:00
/* Sync the superblock so that buffers with quota data are written to
2005-11-08 01:13:39 +03:00
* disk ( and so userspace sees correct data afterwards ) . */
2005-04-17 02:20:36 +04:00
if ( sb - > s_op - > sync_fs )
sb - > s_op - > sync_fs ( sb , 1 ) ;
sync_blockdev ( sb - > s_bdev ) ;
/* Now the quota files are just ordinary files and we can set the
* inode flags back . Moreover we discard the pagecache so that
* userspace sees the writes we did bypassing the pagecache . We
* must also discard the blockdev buffers so that we see the
* changes done by userspace on the next quotaon ( ) */
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
2016-12-19 16:01:39 +03:00
/* This can happen when suspending quotas on remount-ro... */
if ( toputinode [ cnt ] & & ! sb_has_quota_loaded ( sb , cnt ) ) {
2016-11-23 16:04:55 +03:00
inode_lock ( toputinode [ cnt ] ) ;
2017-04-11 17:29:12 +03:00
toputinode [ cnt ] - > i_flags & = ~ S_NOQUOTA ;
2016-11-23 16:04:55 +03:00
truncate_inode_pages ( & toputinode [ cnt ] - > i_data , 0 ) ;
inode_unlock ( toputinode [ cnt ] ) ;
mark_inode_dirty_sync ( toputinode [ cnt ] ) ;
2008-09-30 19:53:37 +04:00
}
if ( sb - > s_bdev )
invalidate_bdev ( sb - > s_bdev ) ;
put_inodes :
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + )
if ( toputinode [ cnt ] ) {
2008-04-28 13:14:33 +04:00
/* On remount RO, we keep the inode pointer so that we
2008-08-20 19:50:32 +04:00
* can reenable quota on the subsequent remount RW . We
* have to check ' flags ' variable and not use sb_has_
* function because another quotaon / quotaoff could
* change global state before we got here . We refuse
* to suspend quotas when there is pending delete on
* the quota file . . . */
if ( ! ( flags & DQUOT_SUSPENDED ) )
2008-04-28 13:14:33 +04:00
iput ( toputinode [ cnt ] ) ;
else if ( ! toputinode [ cnt ] - > i_nlink )
ret = - EBUSY ;
2005-04-17 02:20:36 +04:00
}
2008-04-28 13:14:33 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2010-05-19 15:16:41 +04:00
EXPORT_SYMBOL ( dquot_disable ) ;
2005-04-17 02:20:36 +04:00
2010-05-19 15:16:45 +04:00
int dquot_quota_off ( struct super_block * sb , int type )
2008-08-20 19:50:32 +04:00
{
2010-05-19 15:16:41 +04:00
return dquot_disable ( sb , type ,
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED ) ;
2008-08-20 19:50:32 +04:00
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_quota_off ) ;
2010-05-19 15:16:41 +04:00
2005-04-17 02:20:36 +04:00
/*
* Turn quotas on on a device
*/
2008-08-20 19:50:32 +04:00
/*
* Helper function to turn quotas on when we already have the inode of
* quota file and no quota information is loaded .
*/
static int vfs_load_quota_inode ( struct inode * inode , int type , int format_id ,
unsigned int flags )
2005-04-17 02:20:36 +04:00
{
struct quota_format_type * fmt = find_quota_format ( format_id ) ;
struct super_block * sb = inode - > i_sb ;
struct quota_info * dqopt = sb_dqopt ( sb ) ;
int error ;
if ( ! fmt )
return - ESRCH ;
if ( ! S_ISREG ( inode - > i_mode ) ) {
error = - EACCES ;
goto out_fmt ;
}
if ( IS_RDONLY ( inode ) ) {
error = - EROFS ;
goto out_fmt ;
}
2015-03-18 22:04:53 +03:00
if ( ! sb - > s_op - > quota_write | | ! sb - > s_op - > quota_read | |
( type = = PRJQUOTA & & sb - > dq_op - > get_projid = = NULL ) ) {
2005-04-17 02:20:36 +04:00
error = - EINVAL ;
goto out_fmt ;
}
2016-07-05 18:41:57 +03:00
/* Filesystems outside of init_user_ns not yet supported */
if ( sb - > s_user_ns ! = & init_user_ns ) {
error = - EINVAL ;
goto out_fmt ;
}
2008-08-20 19:50:32 +04:00
/* Usage always has to be set... */
if ( ! ( flags & DQUOT_USAGE_ENABLED ) ) {
error = - EINVAL ;
goto out_fmt ;
}
2016-11-23 16:04:55 +03:00
if ( sb_has_quota_loaded ( sb , type ) ) {
error = - EBUSY ;
goto out_fmt ;
}
2005-04-17 02:20:36 +04:00
2008-09-30 19:53:37 +04:00
if ( ! ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE ) ) {
2010-02-22 23:07:17 +03:00
/* As we bypass the pagecache we must now flush all the
* dirty data and invalidate caches so that kernel sees
* changes from userspace . It is not enough to just flush
* the quota file since if blocksize < pagesize , invalidation
* of the cache could fail because of other unrelated dirty
* data */
sync_filesystem ( sb ) ;
2008-09-30 19:53:37 +04:00
invalidate_bdev ( sb - > s_bdev ) ;
}
if ( ! ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE ) ) {
/* We don't want quota and atime on quota files (deadlocks
* possible ) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit . */
2016-01-22 23:40:57 +03:00
inode_lock ( inode ) ;
2017-04-11 17:29:12 +03:00
inode - > i_flags | = S_NOQUOTA ;
2016-01-22 23:40:57 +03:00
inode_unlock ( inode ) ;
2010-01-06 19:20:35 +03:00
/*
* When S_NOQUOTA is set , remove dquot references as no more
* references can be added
*/
2010-03-03 17:05:05 +03:00
__dquot_drop ( inode ) ;
2008-09-30 19:53:37 +04:00
}
2005-04-17 02:20:36 +04:00
error = - EIO ;
dqopt - > files [ type ] = igrab ( inode ) ;
if ( ! dqopt - > files [ type ] )
2016-11-23 16:04:55 +03:00
goto out_file_flags ;
2005-04-17 02:20:36 +04:00
error = - EINVAL ;
if ( ! fmt - > qf_ops - > check_quota_file ( sb , type ) )
goto out_file_init ;
dqopt - > ops [ type ] = fmt - > qf_ops ;
dqopt - > info [ type ] . dqi_format = fmt ;
2008-04-28 13:14:33 +04:00
dqopt - > info [ type ] . dqi_fmt_id = format_id ;
2005-04-17 02:20:36 +04:00
INIT_LIST_HEAD ( & dqopt - > info [ type ] . dqi_dirty_list ) ;
2009-01-27 17:47:22 +03:00
error = dqopt - > ops [ type ] - > read_file_info ( sb , type ) ;
2017-06-09 09:59:46 +03:00
if ( error < 0 )
2005-04-17 02:20:36 +04:00
goto out_file_init ;
2017-06-09 12:56:06 +03:00
if ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE ) {
spin_lock ( & dq_data_lock ) ;
2011-11-16 18:03:59 +04:00
dqopt - > info [ type ] . dqi_flags | = DQF_SYS_FILE ;
2017-06-09 12:56:06 +03:00
spin_unlock ( & dq_data_lock ) ;
}
2009-01-12 19:23:05 +03:00
spin_lock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
dqopt - > flags | = dquot_state_flag ( flags , type ) ;
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
2005-04-17 02:20:36 +04:00
add_dquot_ref ( sb , type ) ;
return 0 ;
out_file_init :
dqopt - > files [ type ] = NULL ;
iput ( inode ) ;
2016-11-23 16:04:55 +03:00
out_file_flags :
2017-04-11 17:29:12 +03:00
inode_lock ( inode ) ;
inode - > i_flags & = ~ S_NOQUOTA ;
inode_unlock ( inode ) ;
2005-04-17 02:20:36 +04:00
out_fmt :
put_quota_format ( fmt ) ;
return error ;
}
2008-04-28 13:14:33 +04:00
/* Reenable quotas on remount RW */
2010-05-19 15:16:41 +04:00
int dquot_resume ( struct super_block * sb , int type )
2008-04-28 13:14:33 +04:00
{
struct quota_info * dqopt = sb_dqopt ( sb ) ;
struct inode * inode ;
2010-05-19 15:16:41 +04:00
int ret = 0 , cnt ;
2008-08-20 19:50:32 +04:00
unsigned int flags ;
2008-04-28 13:14:33 +04:00
2016-11-23 15:16:10 +03:00
/* s_umount should be held in exclusive mode */
if ( WARN_ON_ONCE ( down_read_trylock ( & sb - > s_umount ) ) )
up_read ( & sb - > s_umount ) ;
2010-05-19 15:16:41 +04:00
for ( cnt = 0 ; cnt < MAXQUOTAS ; cnt + + ) {
if ( type ! = - 1 & & cnt ! = type )
continue ;
2016-11-23 16:04:55 +03:00
if ( ! sb_has_quota_suspended ( sb , cnt ) )
2010-05-19 15:16:41 +04:00
continue ;
2016-11-23 16:04:55 +03:00
2010-05-19 15:16:41 +04:00
inode = dqopt - > files [ cnt ] ;
dqopt - > files [ cnt ] = NULL ;
spin_lock ( & dq_state_lock ) ;
flags = dqopt - > flags & dquot_state_flag ( DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED ,
cnt ) ;
dqopt - > flags & = ~ dquot_state_flag ( DQUOT_STATE_FLAGS , cnt ) ;
spin_unlock ( & dq_state_lock ) ;
2008-04-28 13:14:33 +04:00
2010-05-19 15:16:41 +04:00
flags = dquot_generic_flag ( flags , cnt ) ;
ret = vfs_load_quota_inode ( inode , cnt ,
dqopt - > info [ cnt ] . dqi_fmt_id , flags ) ;
iput ( inode ) ;
}
2008-04-28 13:14:33 +04:00
return ret ;
}
2010-05-19 15:16:41 +04:00
EXPORT_SYMBOL ( dquot_resume ) ;
2008-04-28 13:14:33 +04:00
2010-09-15 19:38:58 +04:00
int dquot_quota_on ( struct super_block * sb , int type , int format_id ,
2016-11-21 03:49:34 +03:00
const struct path * path )
2008-08-01 12:29:18 +04:00
{
int error = security_quota_on ( path - > dentry ) ;
if ( error )
return error ;
/* Quota file not on the same filesystem? */
2011-12-08 03:16:57 +04:00
if ( path - > dentry - > d_sb ! = sb )
2008-08-01 12:29:18 +04:00
error = - EXDEV ;
else
2015-03-18 01:26:15 +03:00
error = vfs_load_quota_inode ( d_inode ( path - > dentry ) , type ,
2008-08-20 19:50:32 +04:00
format_id , DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED ) ;
2008-08-01 12:29:18 +04:00
return error ;
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_quota_on ) ;
2005-04-17 02:20:36 +04:00
2008-08-20 19:50:32 +04:00
/*
* More powerful function for turning on quotas allowing setting
* of individual quota flags
*/
2010-05-19 15:16:45 +04:00
int dquot_enable ( struct inode * inode , int type , int format_id ,
unsigned int flags )
2008-08-20 19:50:32 +04:00
{
struct super_block * sb = inode - > i_sb ;
/* Just unsuspend quotas? */
2010-05-19 15:16:41 +04:00
BUG_ON ( flags & DQUOT_SUSPENDED ) ;
2016-11-23 15:16:10 +03:00
/* s_umount should be held in exclusive mode */
if ( WARN_ON_ONCE ( down_read_trylock ( & sb - > s_umount ) ) )
up_read ( & sb - > s_umount ) ;
2010-05-19 15:16:41 +04:00
2008-08-20 19:50:32 +04:00
if ( ! flags )
return 0 ;
/* Just updating flags needed? */
if ( sb_has_quota_loaded ( sb , type ) ) {
if ( flags & DQUOT_USAGE_ENABLED & &
2016-11-23 16:04:55 +03:00
sb_has_quota_usage_enabled ( sb , type ) )
return - EBUSY ;
2008-08-20 19:50:32 +04:00
if ( flags & DQUOT_LIMITS_ENABLED & &
2016-11-23 16:04:55 +03:00
sb_has_quota_limits_enabled ( sb , type ) )
return - EBUSY ;
2009-01-12 19:23:05 +03:00
spin_lock ( & dq_state_lock ) ;
2008-08-20 19:50:32 +04:00
sb_dqopt ( sb ) - > flags | = dquot_state_flag ( flags , type ) ;
2009-01-12 19:23:05 +03:00
spin_unlock ( & dq_state_lock ) ;
2016-11-23 16:04:55 +03:00
return 0 ;
2008-08-20 19:50:32 +04:00
}
return vfs_load_quota_inode ( inode , type , format_id , flags ) ;
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_enable ) ;
2008-08-20 19:50:32 +04:00
2005-04-17 02:20:36 +04:00
/*
* This function is used when filesystem needs to initialize quotas
* during mount time .
*/
2010-05-19 15:16:45 +04:00
int dquot_quota_on_mount ( struct super_block * sb , char * qf_name ,
2005-06-23 11:09:16 +04:00
int format_id , int type )
2005-04-17 02:20:36 +04:00
{
2005-06-23 11:09:16 +04:00
struct dentry * dentry ;
2005-04-17 02:20:36 +04:00
int error ;
2016-03-08 07:27:22 +03:00
dentry = lookup_one_len_unlocked ( qf_name , sb - > s_root , strlen ( qf_name ) ) ;
2005-06-23 11:09:16 +04:00
if ( IS_ERR ( dentry ) )
return PTR_ERR ( dentry ) ;
2015-03-18 01:26:15 +03:00
if ( d_really_is_negative ( dentry ) ) {
2005-11-29 00:44:14 +03:00
error = - ENOENT ;
goto out ;
}
2005-04-17 02:20:36 +04:00
error = security_quota_on ( dentry ) ;
2005-06-23 11:09:16 +04:00
if ( ! error )
2015-03-18 01:26:15 +03:00
error = vfs_load_quota_inode ( d_inode ( dentry ) , type , format_id ,
2008-08-20 19:50:32 +04:00
DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED ) ;
2005-06-23 11:09:16 +04:00
2005-11-29 00:44:14 +03:00
out :
2005-06-23 11:09:16 +04:00
dput ( dentry ) ;
return error ;
2005-04-17 02:20:36 +04:00
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_quota_on_mount ) ;
2008-07-25 12:46:50 +04:00
2014-10-06 20:40:51 +04:00
static int dquot_quota_enable ( struct super_block * sb , unsigned int flags )
{
int ret ;
int type ;
struct quota_info * dqopt = sb_dqopt ( sb ) ;
if ( ! ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE ) )
return - ENOSYS ;
/* Accounting cannot be turned on while fs is mounted */
flags & = ~ ( FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT ) ;
if ( ! flags )
return - EINVAL ;
for ( type = 0 ; type < MAXQUOTAS ; type + + ) {
if ( ! ( flags & qtype_enforce_flag ( type ) ) )
continue ;
/* Can't enforce without accounting */
if ( ! sb_has_quota_usage_enabled ( sb , type ) )
return - EINVAL ;
ret = dquot_enable ( dqopt - > files [ type ] , type ,
dqopt - > info [ type ] . dqi_fmt_id ,
DQUOT_LIMITS_ENABLED ) ;
if ( ret < 0 )
goto out_err ;
}
return 0 ;
out_err :
/* Backout enforcement enablement we already did */
for ( type - - ; type > = 0 ; type - - ) {
if ( flags & qtype_enforce_flag ( type ) )
dquot_disable ( sb , type , DQUOT_LIMITS_ENABLED ) ;
}
/* Error code translation for better compatibility with XFS */
if ( ret = = - EBUSY )
ret = - EEXIST ;
return ret ;
}
static int dquot_quota_disable ( struct super_block * sb , unsigned int flags )
{
int ret ;
int type ;
struct quota_info * dqopt = sb_dqopt ( sb ) ;
if ( ! ( dqopt - > flags & DQUOT_QUOTA_SYS_FILE ) )
return - ENOSYS ;
/*
* We don ' t support turning off accounting via quotactl . In principle
* quota infrastructure can do this but filesystems don ' t expect
* userspace to be able to do it .
*/
if ( flags &
( FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT ) )
return - EOPNOTSUPP ;
/* Filter out limits not enabled */
for ( type = 0 ; type < MAXQUOTAS ; type + + )
if ( ! sb_has_quota_limits_enabled ( sb , type ) )
flags & = ~ qtype_enforce_flag ( type ) ;
/* Nothing left? */
if ( ! flags )
return - EEXIST ;
for ( type = 0 ; type < MAXQUOTAS ; type + + ) {
if ( flags & qtype_enforce_flag ( type ) ) {
ret = dquot_disable ( sb , type , DQUOT_LIMITS_ENABLED ) ;
if ( ret < 0 )
goto out_err ;
}
}
return 0 ;
out_err :
/* Backout enforcement disabling we already did */
for ( type - - ; type > = 0 ; type - - ) {
if ( flags & qtype_enforce_flag ( type ) )
dquot_enable ( dqopt - > files [ type ] , type ,
dqopt - > info [ type ] . dqi_fmt_id ,
DQUOT_LIMITS_ENABLED ) ;
}
return ret ;
}
2005-04-17 02:20:36 +04:00
/* Generic routine for getting common part of quota structure */
2014-10-09 18:03:13 +04:00
static void do_get_dqblk ( struct dquot * dquot , struct qc_dqblk * di )
2005-04-17 02:20:36 +04:00
{
struct mem_dqblk * dm = & dquot - > dq_dqb ;
2010-05-07 01:04:58 +04:00
memset ( di , 0 , sizeof ( * di ) ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
2014-10-09 18:03:13 +04:00
di - > d_spc_hardlimit = dm - > dqb_bhardlimit ;
di - > d_spc_softlimit = dm - > dqb_bsoftlimit ;
2010-05-07 01:04:58 +04:00
di - > d_ino_hardlimit = dm - > dqb_ihardlimit ;
di - > d_ino_softlimit = dm - > dqb_isoftlimit ;
2014-10-09 18:03:13 +04:00
di - > d_space = dm - > dqb_curspace + dm - > dqb_rsvspace ;
di - > d_ino_count = dm - > dqb_curinodes ;
di - > d_spc_timer = dm - > dqb_btime ;
di - > d_ino_timer = dm - > dqb_itime ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_data_lock ) ;
}
2012-09-16 13:07:49 +04:00
int dquot_get_dqblk ( struct super_block * sb , struct kqid qid ,
2014-10-09 18:03:13 +04:00
struct qc_dqblk * di )
2005-04-17 02:20:36 +04:00
{
struct dquot * dquot ;
2012-09-16 14:11:50 +04:00
dquot = dqget ( sb , qid ) ;
2015-06-24 19:07:02 +03:00
if ( IS_ERR ( dquot ) )
return PTR_ERR ( dquot ) ;
2005-04-17 02:20:36 +04:00
do_get_dqblk ( dquot , di ) ;
dqput ( dquot ) ;
2009-01-12 19:23:05 +03:00
2005-04-17 02:20:36 +04:00
return 0 ;
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_get_dqblk ) ;
2005-04-17 02:20:36 +04:00
2016-01-25 21:24:50 +03:00
int dquot_get_next_dqblk ( struct super_block * sb , struct kqid * qid ,
struct qc_dqblk * di )
{
struct dquot * dquot ;
int err ;
if ( ! sb - > dq_op - > get_next_id )
return - ENOSYS ;
err = sb - > dq_op - > get_next_id ( sb , qid ) ;
if ( err < 0 )
return err ;
dquot = dqget ( sb , * qid ) ;
if ( IS_ERR ( dquot ) )
return PTR_ERR ( dquot ) ;
do_get_dqblk ( dquot , di ) ;
dqput ( dquot ) ;
return 0 ;
}
EXPORT_SYMBOL ( dquot_get_next_dqblk ) ;
2014-10-09 18:03:13 +04:00
# define VFS_QC_MASK \
( QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
QC_SPC_TIMER | QC_INO_TIMER )
2010-05-07 01:05:17 +04:00
2005-04-17 02:20:36 +04:00
/* Generic routine for setting common part of quota structure */
2014-10-09 18:03:13 +04:00
static int do_set_dqblk ( struct dquot * dquot , struct qc_dqblk * di )
2005-04-17 02:20:36 +04:00
{
struct mem_dqblk * dm = & dquot - > dq_dqb ;
int check_blim = 0 , check_ilim = 0 ;
2012-09-16 14:56:19 +04:00
struct mem_dqinfo * dqi = & sb_dqopt ( dquot - > dq_sb ) - > info [ dquot - > dq_id . type ] ;
2008-04-28 13:14:31 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & ~ VFS_QC_MASK )
2010-05-07 01:05:17 +04:00
return - EINVAL ;
2014-10-09 18:03:13 +04:00
if ( ( ( di - > d_fieldmask & QC_SPC_SOFT ) & &
2014-10-09 18:54:13 +04:00
di - > d_spc_softlimit > dqi - > dqi_max_spc_limit ) | |
2014-10-09 18:03:13 +04:00
( ( di - > d_fieldmask & QC_SPC_HARD ) & &
2014-10-09 18:54:13 +04:00
di - > d_spc_hardlimit > dqi - > dqi_max_spc_limit ) | |
2014-10-09 18:03:13 +04:00
( ( di - > d_fieldmask & QC_INO_SOFT ) & &
2014-10-09 18:54:13 +04:00
( di - > d_ino_softlimit > dqi - > dqi_max_ino_limit ) ) | |
2014-10-09 18:03:13 +04:00
( ( di - > d_fieldmask & QC_INO_HARD ) & &
2014-10-09 18:54:13 +04:00
( di - > d_ino_hardlimit > dqi - > dqi_max_ino_limit ) ) )
2008-04-28 13:14:31 +04:00
return - ERANGE ;
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_SPACE ) {
dm - > dqb_curspace = di - > d_space - dm - > dqb_rsvspace ;
2005-04-17 02:20:36 +04:00
check_blim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_SPACE_B , & dquot - > dq_flags ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-07 01:05:17 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_SPC_SOFT )
dm - > dqb_bsoftlimit = di - > d_spc_softlimit ;
if ( di - > d_fieldmask & QC_SPC_HARD )
dm - > dqb_bhardlimit = di - > d_spc_hardlimit ;
if ( di - > d_fieldmask & ( QC_SPC_SOFT | QC_SPC_HARD ) ) {
2005-04-17 02:20:36 +04:00
check_blim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_BLIMITS_B , & dquot - > dq_flags ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-07 01:05:17 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_INO_COUNT ) {
dm - > dqb_curinodes = di - > d_ino_count ;
2005-04-17 02:20:36 +04:00
check_ilim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_INODES_B , & dquot - > dq_flags ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-07 01:05:17 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_INO_SOFT )
2010-05-07 01:05:17 +04:00
dm - > dqb_isoftlimit = di - > d_ino_softlimit ;
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_INO_HARD )
2010-05-07 01:05:17 +04:00
dm - > dqb_ihardlimit = di - > d_ino_hardlimit ;
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & ( QC_INO_SOFT | QC_INO_HARD ) ) {
2005-04-17 02:20:36 +04:00
check_ilim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_ILIMITS_B , & dquot - > dq_flags ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-07 01:05:17 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_SPC_TIMER ) {
dm - > dqb_btime = di - > d_spc_timer ;
2009-01-08 05:07:29 +03:00
check_blim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_BTIME_B , & dquot - > dq_flags ) ;
2008-10-02 18:48:10 +04:00
}
2010-05-07 01:05:17 +04:00
2014-10-09 18:03:13 +04:00
if ( di - > d_fieldmask & QC_INO_TIMER ) {
dm - > dqb_itime = di - > d_ino_timer ;
2009-01-08 05:07:29 +03:00
check_ilim = 1 ;
2010-04-12 22:16:50 +04:00
set_bit ( DQ_LASTSET_B + QIF_ITIME_B , & dquot - > dq_flags ) ;
2008-10-02 18:48:10 +04:00
}
2005-04-17 02:20:36 +04:00
if ( check_blim ) {
2009-01-27 17:47:22 +03:00
if ( ! dm - > dqb_bsoftlimit | |
dm - > dqb_curspace < dm - > dqb_bsoftlimit ) {
2005-04-17 02:20:36 +04:00
dm - > dqb_btime = 0 ;
clear_bit ( DQ_BLKS_B , & dquot - > dq_flags ) ;
2014-10-09 18:03:13 +04:00
} else if ( ! ( di - > d_fieldmask & QC_SPC_TIMER ) )
2009-01-27 17:47:22 +03:00
/* Set grace only if user hasn't provided his own... */
2016-06-17 23:03:16 +03:00
dm - > dqb_btime = ktime_get_real_seconds ( ) + dqi - > dqi_bgrace ;
2005-04-17 02:20:36 +04:00
}
if ( check_ilim ) {
2009-01-27 17:47:22 +03:00
if ( ! dm - > dqb_isoftlimit | |
dm - > dqb_curinodes < dm - > dqb_isoftlimit ) {
2005-04-17 02:20:36 +04:00
dm - > dqb_itime = 0 ;
clear_bit ( DQ_INODES_B , & dquot - > dq_flags ) ;
2014-10-09 18:03:13 +04:00
} else if ( ! ( di - > d_fieldmask & QC_INO_TIMER ) )
2009-01-27 17:47:22 +03:00
/* Set grace only if user hasn't provided his own... */
2016-06-17 23:03:16 +03:00
dm - > dqb_itime = ktime_get_real_seconds ( ) + dqi - > dqi_igrace ;
2005-04-17 02:20:36 +04:00
}
2009-01-27 17:47:22 +03:00
if ( dm - > dqb_bhardlimit | | dm - > dqb_bsoftlimit | | dm - > dqb_ihardlimit | |
dm - > dqb_isoftlimit )
2005-04-17 02:20:36 +04:00
clear_bit ( DQ_FAKE_B , & dquot - > dq_flags ) ;
else
set_bit ( DQ_FAKE_B , & dquot - > dq_flags ) ;
spin_unlock ( & dq_data_lock ) ;
mark_dquot_dirty ( dquot ) ;
2008-04-28 13:14:31 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2012-09-16 13:07:49 +04:00
int dquot_set_dqblk ( struct super_block * sb , struct kqid qid ,
2014-10-09 18:03:13 +04:00
struct qc_dqblk * di )
2005-04-17 02:20:36 +04:00
{
struct dquot * dquot ;
2008-04-28 13:14:31 +04:00
int rc ;
2005-04-17 02:20:36 +04:00
2012-09-16 14:11:50 +04:00
dquot = dqget ( sb , qid ) ;
2015-06-24 19:07:02 +03:00
if ( IS_ERR ( dquot ) ) {
rc = PTR_ERR ( dquot ) ;
2008-08-20 19:50:32 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2008-04-28 13:14:31 +04:00
rc = do_set_dqblk ( dquot , di ) ;
2005-04-17 02:20:36 +04:00
dqput ( dquot ) ;
2008-08-20 19:50:32 +04:00
out :
2008-04-28 13:14:31 +04:00
return rc ;
2005-04-17 02:20:36 +04:00
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_set_dqblk ) ;
2005-04-17 02:20:36 +04:00
/* Generic routine for getting common part of quota file information */
2014-11-19 02:42:09 +03:00
int dquot_get_state ( struct super_block * sb , struct qc_state * state )
2005-04-17 02:20:36 +04:00
{
struct mem_dqinfo * mi ;
2014-11-19 02:42:09 +03:00
struct qc_type_state * tstate ;
struct quota_info * dqopt = sb_dqopt ( sb ) ;
int type ;
2005-04-17 02:20:36 +04:00
2014-11-19 02:42:09 +03:00
memset ( state , 0 , sizeof ( * state ) ) ;
for ( type = 0 ; type < MAXQUOTAS ; type + + ) {
if ( ! sb_has_quota_active ( sb , type ) )
continue ;
tstate = state - > s_state + type ;
mi = sb_dqopt ( sb ) - > info + type ;
tstate - > flags = QCI_ACCT_ENABLED ;
spin_lock ( & dq_data_lock ) ;
if ( mi - > dqi_flags & DQF_SYS_FILE )
tstate - > flags | = QCI_SYSFILE ;
if ( mi - > dqi_flags & DQF_ROOT_SQUASH )
tstate - > flags | = QCI_ROOT_SQUASH ;
if ( sb_has_quota_limits_enabled ( sb , type ) )
tstate - > flags | = QCI_LIMITS_ENFORCED ;
tstate - > spc_timelimit = mi - > dqi_bgrace ;
tstate - > ino_timelimit = mi - > dqi_igrace ;
tstate - > ino = dqopt - > files [ type ] - > i_ino ;
tstate - > blocks = dqopt - > files [ type ] - > i_blocks ;
tstate - > nextents = 1 ; /* We don't know... */
spin_unlock ( & dq_data_lock ) ;
2005-04-17 02:20:36 +04:00
}
return 0 ;
}
2014-11-19 02:42:09 +03:00
EXPORT_SYMBOL ( dquot_get_state ) ;
2005-04-17 02:20:36 +04:00
/* Generic routine for setting common part of quota file information */
2014-12-16 14:03:51 +03:00
int dquot_set_dqinfo ( struct super_block * sb , int type , struct qc_info * ii )
2005-04-17 02:20:36 +04:00
{
struct mem_dqinfo * mi ;
2008-08-20 19:50:32 +04:00
int err = 0 ;
2005-04-17 02:20:36 +04:00
2014-12-16 14:03:51 +03:00
if ( ( ii - > i_fieldmask & QC_WARNS_MASK ) | |
( ii - > i_fieldmask & QC_RT_SPC_TIMER ) )
return - EINVAL ;
2016-11-23 15:35:14 +03:00
if ( ! sb_has_quota_active ( sb , type ) )
return - ESRCH ;
2005-04-17 02:20:36 +04:00
mi = sb_dqopt ( sb ) - > info + type ;
2014-12-16 14:03:51 +03:00
if ( ii - > i_fieldmask & QC_FLAGS ) {
if ( ( ii - > i_flags & QCI_ROOT_SQUASH & &
2016-11-23 15:35:14 +03:00
mi - > dqi_format - > qf_fmt_id ! = QFMT_VFS_OLD ) )
return - EINVAL ;
2014-11-19 11:32:39 +03:00
}
2005-04-17 02:20:36 +04:00
spin_lock ( & dq_data_lock ) ;
2014-12-16 14:03:51 +03:00
if ( ii - > i_fieldmask & QC_SPC_TIMER )
mi - > dqi_bgrace = ii - > i_spc_timelimit ;
if ( ii - > i_fieldmask & QC_INO_TIMER )
mi - > dqi_igrace = ii - > i_ino_timelimit ;
if ( ii - > i_fieldmask & QC_FLAGS ) {
if ( ii - > i_flags & QCI_ROOT_SQUASH )
mi - > dqi_flags | = DQF_ROOT_SQUASH ;
else
mi - > dqi_flags & = ~ DQF_ROOT_SQUASH ;
}
2005-04-17 02:20:36 +04:00
spin_unlock ( & dq_data_lock ) ;
mark_info_dirty ( sb , type ) ;
/* Force write to disk */
sb - > dq_op - > write_info ( sb , type ) ;
2008-08-20 19:50:32 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
2010-05-19 15:16:45 +04:00
EXPORT_SYMBOL ( dquot_set_dqinfo ) ;
2005-04-17 02:20:36 +04:00
2014-10-06 20:40:51 +04:00
const struct quotactl_ops dquot_quotactl_sysfile_ops = {
. quota_enable = dquot_quota_enable ,
. quota_disable = dquot_quota_disable ,
. quota_sync = dquot_quota_sync ,
2014-11-19 02:42:09 +03:00
. get_state = dquot_get_state ,
2014-10-06 20:40:51 +04:00
. set_info = dquot_set_dqinfo ,
. get_dqblk = dquot_get_dqblk ,
2016-01-25 21:24:50 +03:00
. get_nextdqblk = dquot_get_next_dqblk ,
2014-10-06 20:40:51 +04:00
. set_dqblk = dquot_set_dqblk
} ;
EXPORT_SYMBOL ( dquot_quotactl_sysfile_ops ) ;
2010-04-26 20:03:33 +04:00
static int do_proc_dqstats ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
unsigned int type = ( int * ) table - > data - dqstats . stat ;
2010-05-27 01:21:58 +04:00
/* Update global table */
dqstats . stat [ type ] =
percpu_counter_sum_positive ( & dqstats . counter [ type ] ) ;
2010-04-26 20:03:33 +04:00
return proc_dointvec ( table , write , buffer , lenp , ppos ) ;
}
2013-06-14 06:37:49 +04:00
static struct ctl_table fs_dqstats_table [ ] = {
2005-04-17 02:20:36 +04:00
{
. procname = " lookups " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_LOOKUPS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " drops " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_DROPS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " reads " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_READS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " writes " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_WRITES ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " cache_hits " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_CACHE_HITS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " allocated_dquots " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_ALLOC_DQUOTS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " free_dquots " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_FREE_DQUOTS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
{
. procname = " syncs " ,
2010-04-26 20:03:33 +04:00
. data = & dqstats . stat [ DQST_SYNCS ] ,
2005-04-17 02:20:36 +04:00
. maxlen = sizeof ( int ) ,
. mode = 0444 ,
2010-04-26 20:03:33 +04:00
. proc_handler = do_proc_dqstats ,
2005-04-17 02:20:36 +04:00
} ,
2007-10-17 10:29:31 +04:00
# ifdef CONFIG_PRINT_QUOTA_WARNING
2005-04-17 02:20:36 +04:00
{
. procname = " warnings " ,
. data = & flag_print_warnings ,
. maxlen = sizeof ( int ) ,
. mode = 0644 ,
2009-11-16 14:11:48 +03:00
. proc_handler = proc_dointvec ,
2005-04-17 02:20:36 +04:00
} ,
2007-10-17 10:29:31 +04:00
# endif
2009-11-06 01:25:10 +03:00
{ } ,
2005-04-17 02:20:36 +04:00
} ;
2013-06-14 06:37:49 +04:00
static struct ctl_table fs_table [ ] = {
2005-04-17 02:20:36 +04:00
{
. procname = " quota " ,
. mode = 0555 ,
. child = fs_dqstats_table ,
} ,
2009-11-06 01:25:10 +03:00
{ } ,
2005-04-17 02:20:36 +04:00
} ;
2013-06-14 06:37:49 +04:00
static struct ctl_table sys_table [ ] = {
2005-04-17 02:20:36 +04:00
{
. procname = " fs " ,
. mode = 0555 ,
. child = fs_table ,
} ,
2009-11-06 01:25:10 +03:00
{ } ,
2005-04-17 02:20:36 +04:00
} ;
static int __init dquot_init ( void )
{
2010-05-27 01:21:58 +04:00
int i , ret ;
2005-04-17 02:20:36 +04:00
unsigned long nr_hash , order ;
printk ( KERN_NOTICE " VFS: Disk quotas %s \n " , __DQUOT_VERSION__ ) ;
2007-02-14 11:34:09 +03:00
register_sysctl_table ( sys_table ) ;
2005-04-17 02:20:36 +04:00
2007-07-20 05:11:58 +04:00
dquot_cachep = kmem_cache_create ( " dquot " ,
2005-04-17 02:20:36 +04:00
sizeof ( struct dquot ) , sizeof ( unsigned long ) * 4 ,
2006-03-24 14:16:06 +03:00
( SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD | SLAB_PANIC ) ,
2007-07-20 05:11:58 +04:00
NULL ) ;
2005-04-17 02:20:36 +04:00
order = 0 ;
dquot_hash = ( struct hlist_head * ) __get_free_pages ( GFP_ATOMIC , order ) ;
if ( ! dquot_hash )
panic ( " Cannot create dquot hash table " ) ;
2010-05-27 01:21:58 +04:00
for ( i = 0 ; i < _DQST_DQSTAT_LAST ; i + + ) {
2014-09-08 04:51:29 +04:00
ret = percpu_counter_init ( & dqstats . counter [ i ] , 0 , GFP_KERNEL ) ;
2010-05-27 01:21:58 +04:00
if ( ret )
panic ( " Cannot create dquot stat counters " ) ;
}
2010-04-26 20:03:33 +04:00
2005-04-17 02:20:36 +04:00
/* Find power-of-two hlist_heads which can fit into allocation */
nr_hash = ( 1UL < < order ) * PAGE_SIZE / sizeof ( struct hlist_head ) ;
dq_hash_bits = 0 ;
do {
dq_hash_bits + + ;
} while ( nr_hash > > dq_hash_bits ) ;
dq_hash_bits - - ;
nr_hash = 1UL < < dq_hash_bits ;
dq_hash_mask = nr_hash - 1 ;
for ( i = 0 ; i < nr_hash ; i + + )
INIT_HLIST_HEAD ( dquot_hash + i ) ;
2014-11-04 04:05:01 +03:00
pr_info ( " VFS: Dquot-cache hash table entries: %ld (order %ld, "
" %ld bytes) \n " , nr_hash , order , ( PAGE_SIZE < < order ) ) ;
2005-04-17 02:20:36 +04:00
2007-07-17 15:03:17 +04:00
register_shrinker ( & dqcache_shrinker ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2015-12-13 00:30:12 +03:00
fs_initcall ( dquot_init ) ;