2006-01-16 19:50:04 +03:00
/*
* Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2007-12-12 03:51:25 +03:00
* Copyright ( C ) 2004 - 2007 Red Hat , Inc . All rights reserved .
2006-01-16 19:50:04 +03:00
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2006-09-01 19:05:15 +04:00
* of the GNU General Public License version 2.
2006-01-16 19:50:04 +03:00
*/
/*
* Quota change tags are associated with each transaction that allocates or
* deallocates space . Those changes are accumulated locally to each node ( in a
* per - node file ) and then are periodically synced to the quota file . This
* avoids the bottleneck of constantly touching the quota file , but introduces
* fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously . So , it is possible for a user on
* multiple nodes to overrun their quota , but that overrun is controlable .
2009-09-15 23:42:56 +04:00
* Since quota tags are part of transactions , there is no need for a quota check
2006-01-16 19:50:04 +03:00
* program to be run on node crashes or anything like that .
*
* There are couple of knobs that let the administrator manage the quota
* fuzziness . " quota_quantum " sets the maximum time a quota change can be
* sitting on one node before being synced to the quota file . ( The default is
* 60 seconds . ) Another knob , " quota_scale " controls how quickly the frequency
* of quota file syncs increases as the user moves closer to their limit . The
* more frequent the syncs , the more accurate the quota enforcement , but that
* means that there is more contention between the nodes for the quota file .
* The default value is one . This sets the maximum theoretical quota overrun
* ( with infinite node with infinite bandwidth ) to twice the user ' s limit . ( In
* practice , the maximum overrun you see should be much less . ) A " quota_scale "
* number greater than one makes quota syncs more frequent and reduces the
* maximum overrun . Numbers less than one ( but greater than zero ) make quota
* syncs less frequent .
*
* GFS quotas also use per - ID Lock Value Blocks ( LVBs ) to cache the contents of
* the quota file , so it is not being constantly read .
*/
# include <linux/sched.h>
# include <linux/slab.h>
2011-05-25 04:12:27 +04:00
# include <linux/mm.h>
2006-01-16 19:50:04 +03:00
# include <linux/spinlock.h>
# include <linux/completion.h>
# include <linux/buffer_head.h>
# include <linux/sort.h>
2006-02-08 14:50:51 +03:00
# include <linux/fs.h>
2006-10-02 19:38:25 +04:00
# include <linux/bio.h>
2006-02-28 01:23:27 +03:00
# include <linux/gfs2_ondisk.h>
2008-11-17 17:25:37 +03:00
# include <linux/kthread.h>
# include <linux/freezer.h>
2009-09-28 15:49:15 +04:00
# include <linux/quota.h>
2009-09-11 18:57:27 +04:00
# include <linux/dqblk_xfs.h>
2013-11-01 22:52:06 +04:00
# include <linux/lockref.h>
2013-11-04 14:15:08 +04:00
# include <linux/list_lru.h>
2013-12-12 14:47:59 +04:00
# include <linux/rcupdate.h>
# include <linux/rculist_bl.h>
# include <linux/bit_spinlock.h>
# include <linux/jhash.h>
2006-01-16 19:50:04 +03:00
# include "gfs2.h"
2006-02-28 01:23:27 +03:00
# include "incore.h"
2006-01-16 19:50:04 +03:00
# include "bmap.h"
# include "glock.h"
# include "glops.h"
# include "log.h"
# include "meta_io.h"
# include "quota.h"
# include "rgrp.h"
# include "super.h"
# include "trans.h"
2006-02-08 14:50:51 +03:00
# include "inode.h"
2006-02-28 01:23:27 +03:00
# include "util.h"
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
# define GFS2_QD_HASH_SHIFT 12
# define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
# define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
2013-11-01 22:52:08 +04:00
static DEFINE_SPINLOCK ( qd_lock ) ;
2013-11-04 14:15:08 +04:00
struct list_lru gfs2_qd_lru ;
2009-01-08 01:03:37 +03:00
2013-12-12 14:47:59 +04:00
static struct hlist_bl_head qd_hash_table [ GFS2_QD_HASH_SIZE ] ;
static unsigned int gfs2_qd_hash ( const struct gfs2_sbd * sdp ,
const struct kqid qid )
{
unsigned int h ;
h = jhash ( & sdp , sizeof ( struct gfs2_sbd * ) , 0 ) ;
h = jhash ( & qid , sizeof ( struct kqid ) , h ) ;
return h & GFS2_QD_HASH_MASK ;
}
static inline void spin_lock_bucket ( unsigned int hash )
{
hlist_bl_lock ( & qd_hash_table [ hash ] ) ;
}
static inline void spin_unlock_bucket ( unsigned int hash )
{
hlist_bl_unlock ( & qd_hash_table [ hash ] ) ;
}
static void gfs2_qd_dealloc ( struct rcu_head * rcu )
{
struct gfs2_quota_data * qd = container_of ( rcu , struct gfs2_quota_data , qd_rcu ) ;
kmem_cache_free ( gfs2_quotad_cachep , qd ) ;
}
2013-11-04 14:15:08 +04:00
static void gfs2_qd_dispose ( struct list_head * list )
2009-01-08 01:03:37 +03:00
{
struct gfs2_quota_data * qd ;
struct gfs2_sbd * sdp ;
2013-11-04 14:15:08 +04:00
while ( ! list_empty ( list ) ) {
qd = list_entry ( list - > next , struct gfs2_quota_data , qd_lru ) ;
2009-01-08 01:03:37 +03:00
sdp = qd - > qd_gl - > gl_sbd ;
2013-11-04 14:15:08 +04:00
list_del ( & qd - > qd_lru ) ;
2009-01-08 01:03:37 +03:00
/* Free from the filesystem-specific list */
2013-11-04 14:15:08 +04:00
spin_lock ( & qd_lock ) ;
2009-01-08 01:03:37 +03:00
list_del ( & qd - > qd_list ) ;
2013-11-04 14:15:08 +04:00
spin_unlock ( & qd_lock ) ;
2009-01-08 01:03:37 +03:00
2013-12-12 14:47:59 +04:00
spin_lock_bucket ( qd - > qd_hash ) ;
hlist_bl_del_rcu ( & qd - > qd_hlist ) ;
spin_unlock_bucket ( qd - > qd_hash ) ;
2009-01-08 01:03:37 +03:00
gfs2_assert_warn ( sdp , ! qd - > qd_change ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_slot_count ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_bh_count ) ;
2009-01-12 13:43:39 +03:00
gfs2_glock_put ( qd - > qd_gl ) ;
2009-01-08 01:03:37 +03:00
atomic_dec ( & sdp - > sd_quota_count ) ;
/* Delete it from the common reclaim list */
2013-12-12 14:47:59 +04:00
call_rcu ( & qd - > qd_rcu , gfs2_qd_dealloc ) ;
2009-01-08 01:03:37 +03:00
}
2013-11-04 14:15:08 +04:00
}
static enum lru_status gfs2_qd_isolate ( struct list_head * item , spinlock_t * lock , void * arg )
{
struct list_head * dispose = arg ;
struct gfs2_quota_data * qd = list_entry ( item , struct gfs2_quota_data , qd_lru ) ;
if ( ! spin_trylock ( & qd - > qd_lockref . lock ) )
return LRU_SKIP ;
if ( qd - > qd_lockref . count = = 0 ) {
lockref_mark_dead ( & qd - > qd_lockref ) ;
list_move ( & qd - > qd_lru , dispose ) ;
}
spin_unlock ( & qd - > qd_lockref . lock ) ;
return LRU_REMOVED ;
}
static unsigned long gfs2_qd_shrink_scan ( struct shrinker * shrink ,
struct shrink_control * sc )
{
LIST_HEAD ( dispose ) ;
unsigned long freed ;
if ( ! ( sc - > gfp_mask & __GFP_FS ) )
return SHRINK_STOP ;
freed = list_lru_walk_node ( & gfs2_qd_lru , sc - > nid , gfs2_qd_isolate ,
& dispose , & sc - > nr_to_scan ) ;
gfs2_qd_dispose ( & dispose ) ;
2013-08-28 04:18:09 +04:00
return freed ;
}
2009-01-08 01:03:37 +03:00
2013-11-04 14:15:08 +04:00
static unsigned long gfs2_qd_shrink_count ( struct shrinker * shrink ,
struct shrink_control * sc )
2013-08-28 04:18:09 +04:00
{
2013-11-04 14:15:08 +04:00
return vfs_pressure_ratio ( list_lru_count_node ( & gfs2_qd_lru , sc - > nid ) ) ;
2009-01-08 01:03:37 +03:00
}
2013-11-04 14:15:08 +04:00
struct shrinker gfs2_qd_shrinker = {
. count_objects = gfs2_qd_shrink_count ,
. scan_objects = gfs2_qd_shrink_scan ,
. seeks = DEFAULT_SEEKS ,
. flags = SHRINKER_NUMA_AWARE ,
} ;
2013-02-01 06:33:38 +04:00
static u64 qd2index ( struct gfs2_quota_data * qd )
{
2013-02-01 07:52:08 +04:00
struct kqid qid = qd - > qd_id ;
return ( 2 * ( u64 ) from_kqid ( & init_user_ns , qid ) ) +
2013-05-10 19:59:18 +04:00
( ( qid . type = = USRQUOTA ) ? 0 : 1 ) ;
2013-02-01 06:33:38 +04:00
}
2006-09-04 20:49:07 +04:00
static u64 qd2offset ( struct gfs2_quota_data * qd )
2006-01-16 19:50:04 +03:00
{
2006-09-04 20:49:07 +04:00
u64 offset ;
2006-01-16 19:50:04 +03:00
2013-02-01 06:33:38 +04:00
offset = qd2index ( qd ) ;
2006-01-16 19:50:04 +03:00
offset * = sizeof ( struct gfs2_quota ) ;
return offset ;
}
2013-12-12 14:47:59 +04:00
static struct gfs2_quota_data * qd_alloc ( unsigned hash , struct gfs2_sbd * sdp , struct kqid qid )
2006-01-16 19:50:04 +03:00
{
struct gfs2_quota_data * qd ;
int error ;
2008-11-17 17:25:37 +03:00
qd = kmem_cache_zalloc ( gfs2_quotad_cachep , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! qd )
2013-12-12 14:47:59 +04:00
return NULL ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
qd - > qd_sbd = sdp ;
2013-11-01 22:52:06 +04:00
qd - > qd_lockref . count = 1 ;
spin_lock_init ( & qd - > qd_lockref . lock ) ;
2013-02-01 07:52:08 +04:00
qd - > qd_id = qid ;
2006-01-16 19:50:04 +03:00
qd - > qd_slot = - 1 ;
2013-11-04 14:15:08 +04:00
INIT_LIST_HEAD ( & qd - > qd_lru ) ;
2013-12-12 14:47:59 +04:00
qd - > qd_hash = hash ;
2006-01-16 19:50:04 +03:00
2013-02-01 06:33:38 +04:00
error = gfs2_glock_get ( sdp , qd2index ( qd ) ,
2006-01-16 19:50:04 +03:00
& gfs2_quota_glops , CREATE , & qd - > qd_gl ) ;
if ( error )
goto fail ;
2013-12-12 14:47:59 +04:00
return qd ;
2006-01-16 19:50:04 +03:00
2006-09-04 20:04:26 +04:00
fail :
2008-11-17 17:25:37 +03:00
kmem_cache_free ( gfs2_quotad_cachep , qd ) ;
2013-12-12 14:47:59 +04:00
return NULL ;
2006-01-16 19:50:04 +03:00
}
2013-12-12 14:47:59 +04:00
static struct gfs2_quota_data * gfs2_qd_search_bucket ( unsigned int hash ,
const struct gfs2_sbd * sdp ,
struct kqid qid )
2006-01-16 19:50:04 +03:00
{
2013-12-12 14:47:59 +04:00
struct gfs2_quota_data * qd ;
struct hlist_bl_node * h ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
hlist_bl_for_each_entry_rcu ( qd , h , & qd_hash_table [ hash ] , qd_hlist ) {
if ( ! qid_eq ( qd - > qd_id , qid ) )
continue ;
if ( qd - > qd_sbd ! = sdp )
continue ;
if ( lockref_get_not_dead ( & qd - > qd_lockref ) ) {
list_lru_del ( & gfs2_qd_lru , & qd - > qd_lru ) ;
return qd ;
2006-01-16 19:50:04 +03:00
}
2013-12-12 14:47:59 +04:00
}
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
return NULL ;
}
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
static int qd_get ( struct gfs2_sbd * sdp , struct kqid qid ,
struct gfs2_quota_data * * qdp )
{
struct gfs2_quota_data * qd , * new_qd ;
unsigned int hash = gfs2_qd_hash ( sdp , qid ) ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
rcu_read_lock ( ) ;
* qdp = qd = gfs2_qd_search_bucket ( hash , sdp , qid ) ;
rcu_read_unlock ( ) ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
if ( qd )
return 0 ;
new_qd = qd_alloc ( hash , sdp , qid ) ;
if ( ! new_qd )
return - ENOMEM ;
spin_lock ( & qd_lock ) ;
spin_lock_bucket ( hash ) ;
* qdp = qd = gfs2_qd_search_bucket ( hash , sdp , qid ) ;
if ( qd = = NULL ) {
* qdp = new_qd ;
list_add ( & new_qd - > qd_list , & sdp - > sd_quota_list ) ;
hlist_bl_add_head_rcu ( & new_qd - > qd_hlist , & qd_hash_table [ hash ] ) ;
atomic_inc ( & sdp - > sd_quota_count ) ;
}
spin_unlock_bucket ( hash ) ;
spin_unlock ( & qd_lock ) ;
if ( qd ) {
gfs2_glock_put ( new_qd - > qd_gl ) ;
kmem_cache_free ( gfs2_quotad_cachep , new_qd ) ;
2006-01-16 19:50:04 +03:00
}
2013-12-12 14:47:59 +04:00
return 0 ;
2006-01-16 19:50:04 +03:00
}
2013-12-12 14:47:59 +04:00
2006-01-16 19:50:04 +03:00
static void qd_hold ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2013-11-01 22:52:06 +04:00
gfs2_assert ( sdp , ! __lockref_is_dead ( & qd - > qd_lockref ) ) ;
lockref_get ( & qd - > qd_lockref ) ;
2006-01-16 19:50:04 +03:00
}
static void qd_put ( struct gfs2_quota_data * qd )
{
2013-11-04 14:15:08 +04:00
if ( lockref_put_or_lock ( & qd - > qd_lockref ) )
return ;
2013-11-01 22:52:06 +04:00
2013-11-04 14:15:08 +04:00
qd - > qd_lockref . count = 0 ;
list_lru_add ( & gfs2_qd_lru , & qd - > qd_lru ) ;
spin_unlock ( & qd - > qd_lockref . lock ) ;
2013-11-01 22:52:06 +04:00
2006-01-16 19:50:04 +03:00
}
static int slot_get ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
unsigned int c , o = 0 , b ;
unsigned char byte = 0 ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
if ( qd - > qd_slot_count + + ) {
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
for ( c = 0 ; c < sdp - > sd_quota_chunks ; c + + )
for ( o = 0 ; o < PAGE_SIZE ; o + + ) {
byte = sdp - > sd_quota_bitmap [ c ] [ o ] ;
if ( byte ! = 0xFF )
goto found ;
}
goto fail ;
2006-09-04 20:04:26 +04:00
found :
2006-01-16 19:50:04 +03:00
for ( b = 0 ; b < 8 ; b + + )
if ( ! ( byte & ( 1 < < b ) ) )
break ;
qd - > qd_slot = c * ( 8 * PAGE_SIZE ) + o * 8 + b ;
if ( qd - > qd_slot > = sdp - > sd_quota_slots )
goto fail ;
sdp - > sd_quota_bitmap [ c ] [ o ] | = 1 < < b ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd - > qd_slot_count - - ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
return - ENOSPC ;
}
static void slot_hold ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_slot_count ) ;
qd - > qd_slot_count + + ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
}
2013-10-02 17:47:02 +04:00
static void gfs2_icbit_munge ( struct gfs2_sbd * sdp , unsigned char * * bitmap ,
unsigned int bit , int new_value )
{
unsigned int c , o , b = bit ;
int old_value ;
c = b / ( 8 * PAGE_SIZE ) ;
b % = 8 * PAGE_SIZE ;
o = b / 8 ;
b % = 8 ;
old_value = ( bitmap [ c ] [ o ] & ( 1 < < b ) ) ;
gfs2_assert_withdraw ( sdp , ! old_value ! = ! new_value ) ;
if ( new_value )
bitmap [ c ] [ o ] | = 1 < < b ;
else
bitmap [ c ] [ o ] & = ~ ( 1 < < b ) ;
}
2006-01-16 19:50:04 +03:00
static void slot_put ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_slot_count ) ;
if ( ! - - qd - > qd_slot_count ) {
gfs2_icbit_munge ( sdp , sdp - > sd_quota_bitmap , qd - > qd_slot , 0 ) ;
qd - > qd_slot = - 1 ;
}
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
}
static int bh_get ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2006-01-16 19:50:04 +03:00
unsigned int block , offset ;
struct buffer_head * bh ;
int error ;
2006-10-14 01:25:45 +04:00
struct buffer_head bh_map = { . b_state = 0 , . b_blocknr = 0 } ;
2006-01-16 19:50:04 +03:00
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
if ( qd - > qd_bh_count + + ) {
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
block = qd - > qd_slot / sdp - > sd_qc_per_block ;
2007-12-12 03:51:25 +03:00
offset = qd - > qd_slot % sdp - > sd_qc_per_block ;
2006-01-16 19:50:04 +03:00
2006-10-14 01:25:45 +04:00
bh_map . b_size = 1 < < ip - > i_inode . i_blkbits ;
2007-12-10 23:13:27 +03:00
error = gfs2_block_map ( & ip - > i_inode , block , & bh_map , 0 ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
2006-09-22 01:05:23 +04:00
error = gfs2_meta_read ( ip - > i_gl , bh_map . b_blocknr , DIO_WAIT , & bh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
error = - EIO ;
if ( gfs2_metatype_check ( sdp , bh , GFS2_METATYPE_QC ) )
goto fail_brelse ;
qd - > qd_bh = bh ;
qd - > qd_bh_qc = ( struct gfs2_quota_change * )
( bh - > b_data + sizeof ( struct gfs2_meta_header ) +
offset * sizeof ( struct gfs2_quota_change ) ) ;
2007-02-20 08:03:29 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
2006-09-04 20:04:26 +04:00
fail_brelse :
2006-01-16 19:50:04 +03:00
brelse ( bh ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd - > qd_bh_count - - ;
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
static void bh_put ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_bh_count ) ;
if ( ! - - qd - > qd_bh_count ) {
brelse ( qd - > qd_bh ) ;
qd - > qd_bh = NULL ;
qd - > qd_bh_qc = NULL ;
}
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
}
2013-10-04 14:14:46 +04:00
static int qd_check_sync ( struct gfs2_sbd * sdp , struct gfs2_quota_data * qd ,
u64 * sync_gen )
{
if ( test_bit ( QDF_LOCKED , & qd - > qd_flags ) | |
! test_bit ( QDF_CHANGE , & qd - > qd_flags ) | |
( sync_gen & & ( qd - > qd_sync_gen > = * sync_gen ) ) )
return 0 ;
2013-11-04 14:15:08 +04:00
if ( ! lockref_get_not_dead ( & qd - > qd_lockref ) )
return 0 ;
2013-10-04 14:14:46 +04:00
2013-11-04 14:15:08 +04:00
list_move_tail ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
2013-10-04 14:14:46 +04:00
set_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
qd - > qd_change_sync = qd - > qd_change ;
gfs2_assert_warn ( sdp , qd - > qd_slot_count ) ;
qd - > qd_slot_count + + ;
return 1 ;
}
2006-01-16 19:50:04 +03:00
static int qd_fish ( struct gfs2_sbd * sdp , struct gfs2_quota_data * * qdp )
{
struct gfs2_quota_data * qd = NULL ;
int error ;
int found = 0 ;
* qdp = NULL ;
if ( sdp - > sd_vfs - > s_flags & MS_RDONLY )
return 0 ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
list_for_each_entry ( qd , & sdp - > sd_quota_list , qd_list ) {
2013-10-04 14:14:46 +04:00
found = qd_check_sync ( sdp , qd , & sdp - > sd_quota_sync_gen ) ;
if ( found )
break ;
2006-01-16 19:50:04 +03:00
}
if ( ! found )
qd = NULL ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
if ( qd ) {
gfs2_assert_warn ( sdp , qd - > qd_change_sync ) ;
error = bh_get ( qd ) ;
if ( error ) {
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
return error ;
}
}
* qdp = qd ;
return 0 ;
}
static void qd_unlock ( struct gfs2_quota_data * qd )
{
2006-02-27 20:00:42 +03:00
gfs2_assert_warn ( qd - > qd_gl - > gl_sbd ,
test_bit ( QDF_LOCKED , & qd - > qd_flags ) ) ;
2006-01-16 19:50:04 +03:00
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
bh_put ( qd ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
}
2013-02-01 07:35:56 +04:00
static int qdsb_get ( struct gfs2_sbd * sdp , struct kqid qid ,
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qdp )
{
int error ;
2013-02-01 07:52:08 +04:00
error = qd_get ( sdp , qid , qdp ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
error = slot_get ( * qdp ) ;
if ( error )
goto fail ;
error = bh_get ( * qdp ) ;
if ( error )
goto fail_slot ;
return 0 ;
2006-09-04 20:04:26 +04:00
fail_slot :
2006-01-16 19:50:04 +03:00
slot_put ( * qdp ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd_put ( * qdp ) ;
return error ;
}
static void qdsb_put ( struct gfs2_quota_data * qd )
{
bh_put ( qd ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
}
2013-02-01 08:27:54 +04:00
int gfs2_quota_hold ( struct gfs2_inode * ip , kuid_t uid , kgid_t gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2012-05-18 17:28:23 +04:00
struct gfs2_quota_data * * qd ;
2006-01-16 19:50:04 +03:00
int error ;
2012-10-12 19:45:08 +04:00
if ( ip - > i_res = = NULL ) {
error = gfs2_rs_alloc ( ip ) ;
if ( error )
return error ;
}
2012-05-18 17:28:23 +04:00
qd = ip - > i_res - > rs_qa_qd ;
if ( gfs2_assert_warn ( sdp , ! ip - > i_res - > rs_qa_qd_num ) | |
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ) )
return - EIO ;
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return 0 ;
2013-02-01 07:35:56 +04:00
error = qdsb_get ( sdp , make_kqid_uid ( ip - > i_inode . i_uid ) , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
2013-02-01 07:35:56 +04:00
error = qdsb_get ( sdp , make_kqid_gid ( ip - > i_inode . i_gid ) , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
2013-02-01 09:56:13 +04:00
if ( ! uid_eq ( uid , NO_UID_QUOTA_CHANGE ) & &
! uid_eq ( uid , ip - > i_inode . i_uid ) ) {
2013-02-01 07:35:56 +04:00
error = qdsb_get ( sdp , make_kqid_uid ( uid ) , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
}
2013-02-01 09:56:13 +04:00
if ( ! gid_eq ( gid , NO_GID_QUOTA_CHANGE ) & &
! gid_eq ( gid , ip - > i_inode . i_gid ) ) {
2013-02-01 07:35:56 +04:00
error = qdsb_get ( sdp , make_kqid_gid ( gid ) , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
}
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
if ( error )
gfs2_quota_unhold ( ip ) ;
return error ;
}
void gfs2_quota_unhold ( struct gfs2_inode * ip )
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
2012-05-18 17:28:23 +04:00
if ( ip - > i_res = = NULL )
return ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ) ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qdsb_put ( ip - > i_res - > rs_qa_qd [ x ] ) ;
ip - > i_res - > rs_qa_qd [ x ] = NULL ;
2006-01-16 19:50:04 +03:00
}
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num = 0 ;
2006-01-16 19:50:04 +03:00
}
static int sort_qd ( const void * a , const void * b )
{
2006-09-05 23:17:12 +04:00
const struct gfs2_quota_data * qd_a = * ( const struct gfs2_quota_data * * ) a ;
const struct gfs2_quota_data * qd_b = * ( const struct gfs2_quota_data * * ) b ;
2006-01-16 19:50:04 +03:00
2013-02-01 07:52:08 +04:00
if ( qid_lt ( qd_a - > qd_id , qd_b - > qd_id ) )
2006-09-05 23:17:12 +04:00
return - 1 ;
2013-02-01 07:52:08 +04:00
if ( qid_lt ( qd_b - > qd_id , qd_a - > qd_id ) )
2006-09-05 23:17:12 +04:00
return 1 ;
return 0 ;
2006-01-16 19:50:04 +03:00
}
2006-09-04 20:49:07 +04:00
static void do_qc ( struct gfs2_quota_data * qd , s64 change )
2006-01-16 19:50:04 +03:00
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_change * qc = qd - > qd_bh_qc ;
2006-09-04 20:49:07 +04:00
s64 x ;
2006-01-16 19:50:04 +03:00
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2012-12-14 16:36:02 +04:00
gfs2_trans_add_meta ( ip - > i_gl , qd - > qd_bh ) ;
2006-01-16 19:50:04 +03:00
if ( ! test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) {
qc - > qc_change = 0 ;
qc - > qc_flags = 0 ;
2013-02-01 07:52:08 +04:00
if ( qd - > qd_id . type = = USRQUOTA )
2006-01-16 19:50:04 +03:00
qc - > qc_flags = cpu_to_be32 ( GFS2_QCF_USER ) ;
2013-02-01 07:52:08 +04:00
qc - > qc_id = cpu_to_be32 ( from_kqid ( & init_user_ns , qd - > qd_id ) ) ;
2006-01-16 19:50:04 +03:00
}
2006-10-14 18:46:30 +04:00
x = be64_to_cpu ( qc - > qc_change ) + change ;
2006-01-16 19:50:04 +03:00
qc - > qc_change = cpu_to_be64 ( x ) ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
qd - > qd_change = x ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
if ( ! x ) {
gfs2_assert_warn ( sdp , test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) ;
clear_bit ( QDF_CHANGE , & qd - > qd_flags ) ;
qc - > qc_flags = 0 ;
qc - > qc_id = 0 ;
slot_put ( qd ) ;
qd_put ( qd ) ;
} else if ( ! test_and_set_bit ( QDF_CHANGE , & qd - > qd_flags ) ) {
qd_hold ( qd ) ;
slot_hold ( qd ) ;
}
2006-09-25 17:26:04 +04:00
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
}
2006-02-08 14:50:51 +03:00
/**
2009-09-15 23:42:56 +04:00
* gfs2_adjust_quota - adjust record of current block usage
* @ ip : The quota inode
* @ loc : Offset of the entry in the quota file
2009-09-23 16:50:49 +04:00
* @ change : The amount of usage change to record
2009-09-15 23:42:56 +04:00
* @ qd : The quota data
2009-09-23 16:50:49 +04:00
* @ fdq : The updated limits to record
2006-02-08 14:50:51 +03:00
*
* This function was mostly borrowed from gfs2_block_truncate_page which was
* in turn mostly borrowed from ext3
2009-09-15 23:42:56 +04:00
*
* Returns : 0 or - ve on error
2006-02-08 14:50:51 +03:00
*/
2009-09-15 23:42:56 +04:00
2006-02-08 14:50:51 +03:00
static int gfs2_adjust_quota ( struct gfs2_inode * ip , loff_t loc ,
2009-09-23 16:50:49 +04:00
s64 change , struct gfs2_quota_data * qd ,
struct fs_disk_quota * fdq )
2006-02-08 14:50:51 +03:00
{
2006-06-14 23:32:57 +04:00
struct inode * inode = & ip - > i_inode ;
2010-11-18 19:24:24 +03:00
struct gfs2_sbd * sdp = GFS2_SB ( inode ) ;
2006-02-08 14:50:51 +03:00
struct address_space * mapping = inode - > i_mapping ;
unsigned long index = loc > > PAGE_CACHE_SHIFT ;
2007-06-01 02:52:02 +04:00
unsigned offset = loc & ( PAGE_CACHE_SIZE - 1 ) ;
2006-02-08 14:50:51 +03:00
unsigned blocksize , iblock , pos ;
2011-08-15 17:20:36 +04:00
struct buffer_head * bh ;
2006-02-08 14:50:51 +03:00
struct page * page ;
2010-05-08 01:50:18 +04:00
void * kaddr , * ptr ;
2013-06-03 03:53:40 +04:00
struct gfs2_quota q ;
2010-05-08 01:50:18 +04:00
int err , nbytes ;
2009-09-23 16:50:49 +04:00
u64 size ;
2006-02-08 14:50:51 +03:00
2011-09-19 13:25:49 +04:00
if ( gfs2_is_stuffed ( ip ) ) {
err = gfs2_unstuff_dinode ( ip , NULL ) ;
if ( err )
return err ;
}
2010-05-08 01:50:18 +04:00
memset ( & q , 0 , sizeof ( struct gfs2_quota ) ) ;
2012-04-16 19:40:55 +04:00
err = gfs2_internal_read ( ip , ( char * ) & q , & loc , sizeof ( q ) ) ;
2010-05-08 01:50:18 +04:00
if ( err < 0 )
return err ;
err = - EIO ;
2013-06-03 03:53:40 +04:00
be64_add_cpu ( & q . qu_value , change ) ;
qd - > qd_qb . qb_value = q . qu_value ;
2010-05-08 01:50:18 +04:00
if ( fdq ) {
if ( fdq - > d_fieldmask & FS_DQ_BSOFT ) {
2013-06-03 03:53:40 +04:00
q . qu_warn = cpu_to_be64 ( fdq - > d_blk_softlimit > > sdp - > sd_fsb2bb_shift ) ;
qd - > qd_qb . qb_warn = q . qu_warn ;
2010-05-08 01:50:18 +04:00
}
if ( fdq - > d_fieldmask & FS_DQ_BHARD ) {
2013-06-03 03:53:40 +04:00
q . qu_limit = cpu_to_be64 ( fdq - > d_blk_hardlimit > > sdp - > sd_fsb2bb_shift ) ;
qd - > qd_qb . qb_limit = q . qu_limit ;
2010-05-08 01:50:18 +04:00
}
2010-11-18 19:26:46 +03:00
if ( fdq - > d_fieldmask & FS_DQ_BCOUNT ) {
2013-06-03 03:53:40 +04:00
q . qu_value = cpu_to_be64 ( fdq - > d_bcount > > sdp - > sd_fsb2bb_shift ) ;
qd - > qd_qb . qb_value = q . qu_value ;
2010-11-18 19:26:46 +03:00
}
2010-05-08 01:50:18 +04:00
}
/* Write the quota into the quota file on disk */
2013-06-03 03:53:40 +04:00
ptr = & q ;
2010-05-08 01:50:18 +04:00
nbytes = sizeof ( struct gfs2_quota ) ;
get_a_page :
2012-03-19 23:25:50 +04:00
page = find_or_create_page ( mapping , index , GFP_NOFS ) ;
2006-02-08 14:50:51 +03:00
if ( ! page )
return - ENOMEM ;
blocksize = inode - > i_sb - > s_blocksize ;
iblock = index < < ( PAGE_CACHE_SHIFT - inode - > i_sb - > s_blocksize_bits ) ;
if ( ! page_has_buffers ( page ) )
create_empty_buffers ( page , blocksize , 0 ) ;
bh = page_buffers ( page ) ;
pos = blocksize ;
while ( offset > = pos ) {
bh = bh - > b_this_page ;
iblock + + ;
pos + = blocksize ;
}
if ( ! buffer_mapped ( bh ) ) {
2007-12-10 23:13:27 +03:00
gfs2_block_map ( inode , iblock , bh , 1 ) ;
2006-02-08 14:50:51 +03:00
if ( ! buffer_mapped ( bh ) )
2010-05-08 01:50:18 +04:00
goto unlock_out ;
/* If it's a newly allocated disk block for quota, zero it */
2010-07-04 09:33:24 +04:00
if ( buffer_new ( bh ) )
zero_user ( page , pos - blocksize , bh - > b_size ) ;
2006-02-08 14:50:51 +03:00
}
if ( PageUptodate ( page ) )
set_buffer_uptodate ( bh ) ;
if ( ! buffer_uptodate ( bh ) ) {
2011-10-31 13:52:02 +04:00
ll_rw_block ( READ | REQ_META , 1 , & bh ) ;
2006-02-08 14:50:51 +03:00
wait_on_buffer ( bh ) ;
if ( ! buffer_uptodate ( bh ) )
2010-05-08 01:50:18 +04:00
goto unlock_out ;
2006-02-08 14:50:51 +03:00
}
2013-05-10 19:59:18 +04:00
gfs2_trans_add_data ( ip - > i_gl , bh ) ;
2006-02-08 14:50:51 +03:00
2011-11-25 19:14:30 +04:00
kaddr = kmap_atomic ( page ) ;
2010-05-08 01:50:18 +04:00
if ( offset + sizeof ( struct gfs2_quota ) > PAGE_CACHE_SIZE )
nbytes = PAGE_CACHE_SIZE - offset ;
memcpy ( kaddr + offset , ptr , nbytes ) ;
2006-02-08 14:50:51 +03:00
flush_dcache_page ( page ) ;
2011-11-25 19:14:30 +04:00
kunmap_atomic ( kaddr ) ;
2010-05-08 01:50:18 +04:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
/* If quota straddles page boundary, we need to update the rest of the
* quota at the beginning of the next page */
2010-07-04 09:33:24 +04:00
if ( ( offset + sizeof ( struct gfs2_quota ) ) > PAGE_CACHE_SIZE ) {
2010-05-08 01:50:18 +04:00
ptr = ptr + nbytes ;
nbytes = sizeof ( struct gfs2_quota ) - nbytes ;
offset = 0 ;
index + + ;
goto get_a_page ;
}
2009-09-23 16:50:49 +04:00
size = loc + sizeof ( struct gfs2_quota ) ;
2010-08-11 12:53:11 +04:00
if ( size > inode - > i_size )
2009-09-23 16:50:49 +04:00
i_size_write ( inode , size ) ;
inode - > i_mtime = inode - > i_atime = CURRENT_TIME ;
mark_inode_dirty ( inode ) ;
2012-05-15 22:51:54 +04:00
return 0 ;
2011-08-15 17:20:36 +04:00
2010-05-08 01:50:18 +04:00
unlock_out :
2006-02-08 14:50:51 +03:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
return err ;
}
2006-01-16 19:50:04 +03:00
static int do_sync ( unsigned int num_qd , struct gfs2_quota_data * * qda )
{
struct gfs2_sbd * sdp = ( * qda ) - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
2013-10-02 14:13:25 +04:00
struct gfs2_alloc_parms ap = { . aflags = 0 , } ;
2006-01-16 19:50:04 +03:00
unsigned int data_blocks , ind_blocks ;
struct gfs2_holder * ghs , i_gh ;
unsigned int qx , x ;
struct gfs2_quota_data * qd ;
2012-07-30 17:53:19 +04:00
unsigned reserved ;
2006-01-30 21:34:10 +03:00
loff_t offset ;
2008-03-07 02:43:52 +03:00
unsigned int nalloc = 0 , blocks ;
2006-01-16 19:50:04 +03:00
int error ;
2012-06-06 14:17:59 +04:00
error = gfs2_rs_alloc ( ip ) ;
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
gfs2_write_calc_reserv ( ip , sizeof ( struct gfs2_quota ) ,
& data_blocks , & ind_blocks ) ;
2008-04-09 17:33:41 +04:00
ghs = kcalloc ( num_qd , sizeof ( struct gfs2_holder ) , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! ghs )
return - ENOMEM ;
sort ( qda , num_qd , sizeof ( struct gfs2_quota_data * ) , sort_qd , NULL ) ;
2012-09-06 00:55:11 +04:00
mutex_lock ( & ip - > i_inode . i_mutex ) ;
2006-01-16 19:50:04 +03:00
for ( qx = 0 ; qx < num_qd ; qx + + ) {
2009-09-15 23:42:56 +04:00
error = gfs2_glock_nq_init ( qda [ qx ] - > qd_gl , LM_ST_EXCLUSIVE ,
2006-01-16 19:50:04 +03:00
GL_NOCACHE , & ghs [ qx ] ) ;
if ( error )
goto out ;
}
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , & i_gh ) ;
if ( error )
goto out ;
for ( x = 0 ; x < num_qd ; x + + ) {
offset = qd2offset ( qda [ x ] ) ;
2010-06-25 03:21:20 +04:00
if ( gfs2_write_alloc_required ( ip , offset ,
sizeof ( struct gfs2_quota ) ) )
2006-01-16 19:50:04 +03:00
nalloc + + ;
}
2008-03-07 02:43:52 +03:00
/*
* 1 blk for unstuffing inode if stuffed . We add this extra
* block to the reservation unconditionally . If the inode
* doesn ' t need unstuffing , the block will be released to the
* rgrp since it won ' t be allocated during the transaction
*/
2010-05-08 01:50:18 +04:00
/* +3 in the end for unstuffing block, inode size update block
* and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3 ;
2006-01-16 19:50:04 +03:00
2012-07-30 17:53:19 +04:00
reserved = 1 + ( nalloc * ( data_blocks + ind_blocks ) ) ;
2013-10-02 14:13:25 +04:00
ap . target = reserved ;
error = gfs2_inplace_reserve ( ip , & ap ) ;
2008-03-07 02:43:52 +03:00
if ( error )
goto out_alloc ;
2006-01-16 19:50:04 +03:00
2008-03-07 02:43:52 +03:00
if ( nalloc )
2012-07-30 17:53:19 +04:00
blocks + = gfs2_rg_blocks ( ip , reserved ) + nalloc * ind_blocks + RES_STATFS ;
2008-03-07 02:43:52 +03:00
error = gfs2_trans_begin ( sdp , blocks , 0 ) ;
if ( error )
goto out_ipres ;
2006-01-16 19:50:04 +03:00
for ( x = 0 ; x < num_qd ; x + + ) {
qd = qda [ x ] ;
offset = qd2offset ( qd ) ;
2009-09-23 16:50:49 +04:00
error = gfs2_adjust_quota ( ip , offset , qd - > qd_change_sync , qd , NULL ) ;
2006-02-08 14:50:51 +03:00
if ( error )
2006-01-16 19:50:04 +03:00
goto out_end_trans ;
do_qc ( qd , - qd - > qd_change_sync ) ;
2011-03-08 18:40:42 +03:00
set_bit ( QDF_REFRESH , & qd - > qd_flags ) ;
2006-01-16 19:50:04 +03:00
}
error = 0 ;
2006-09-04 20:04:26 +04:00
out_end_trans :
2006-01-16 19:50:04 +03:00
gfs2_trans_end ( sdp ) ;
2006-09-04 20:04:26 +04:00
out_ipres :
2008-03-07 02:43:52 +03:00
gfs2_inplace_release ( ip ) ;
2006-09-04 20:04:26 +04:00
out_alloc :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
while ( qx - - )
gfs2_glock_dq_uninit ( & ghs [ qx ] ) ;
2009-09-23 16:50:49 +04:00
mutex_unlock ( & ip - > i_inode . i_mutex ) ;
2006-01-16 19:50:04 +03:00
kfree ( ghs ) ;
2006-04-07 19:17:32 +04:00
gfs2_log_flush ( ip - > i_gl - > gl_sbd , ip - > i_gl ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
2009-09-23 16:50:49 +04:00
static int update_qd ( struct gfs2_sbd * sdp , struct gfs2_quota_data * qd )
{
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
struct gfs2_quota q ;
struct gfs2_quota_lvb * qlvb ;
loff_t pos ;
int error ;
memset ( & q , 0 , sizeof ( struct gfs2_quota ) ) ;
pos = qd2offset ( qd ) ;
2012-04-16 19:40:55 +04:00
error = gfs2_internal_read ( ip , ( char * ) & q , & pos , sizeof ( q ) ) ;
2009-09-23 16:50:49 +04:00
if ( error < 0 )
return error ;
2012-11-14 22:47:37 +04:00
qlvb = ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lksb . sb_lvbptr ;
2009-09-23 16:50:49 +04:00
qlvb - > qb_magic = cpu_to_be32 ( GFS2_MAGIC ) ;
qlvb - > __pad = 0 ;
qlvb - > qb_limit = q . qu_limit ;
qlvb - > qb_warn = q . qu_warn ;
qlvb - > qb_value = q . qu_value ;
qd - > qd_qb = * qlvb ;
return 0 ;
}
2006-01-16 19:50:04 +03:00
static int do_glock ( struct gfs2_quota_data * qd , int force_refresh ,
struct gfs2_holder * q_gh )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder i_gh ;
int error ;
2006-09-04 20:04:26 +04:00
restart :
2006-01-16 19:50:04 +03:00
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_SHARED , 0 , q_gh ) ;
if ( error )
return error ;
2012-11-14 22:47:37 +04:00
qd - > qd_qb = * ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lksb . sb_lvbptr ;
2006-01-16 19:50:04 +03:00
2006-09-01 19:05:15 +04:00
if ( force_refresh | | qd - > qd_qb . qb_magic ! = cpu_to_be32 ( GFS2_MAGIC ) ) {
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( q_gh ) ;
2009-09-11 18:21:56 +04:00
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_EXCLUSIVE ,
GL_NOCACHE , q_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
2006-09-01 19:05:15 +04:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , 0 , & i_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
2009-09-23 16:50:49 +04:00
error = update_qd ( sdp , qd ) ;
if ( error )
2009-09-15 23:42:56 +04:00
goto fail_gunlock ;
2006-01-16 19:50:04 +03:00
2009-09-23 16:50:49 +04:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2009-09-11 18:21:56 +04:00
gfs2_glock_dq_uninit ( q_gh ) ;
force_refresh = 0 ;
goto restart ;
2006-01-16 19:50:04 +03:00
}
return 0 ;
2006-09-04 20:04:26 +04:00
fail_gunlock :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( q_gh ) ;
return error ;
}
2013-02-01 08:27:54 +04:00
int gfs2_quota_lock ( struct gfs2_inode * ip , kuid_t uid , kgid_t gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2011-03-08 18:40:42 +03:00
struct gfs2_quota_data * qd ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error = 0 ;
2011-09-19 13:25:49 +04:00
error = gfs2_quota_hold ( ip , uid , gid ) ;
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
if ( capable ( CAP_SYS_RESOURCE ) | |
sdp - > sd_args . ar_quota ! = GFS2_QUOTA_ON )
return 0 ;
2012-05-18 17:28:23 +04:00
sort ( ip - > i_res - > rs_qa_qd , ip - > i_res - > rs_qa_qd_num ,
sizeof ( struct gfs2_quota_data * ) , sort_qd , NULL ) ;
2006-01-16 19:50:04 +03:00
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
2011-03-08 18:40:42 +03:00
int force = NO_FORCE ;
2012-05-18 17:28:23 +04:00
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2011-03-08 18:40:42 +03:00
if ( test_and_clear_bit ( QDF_REFRESH , & qd - > qd_flags ) )
force = FORCE ;
2012-05-18 17:28:23 +04:00
error = do_glock ( qd , force , & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2006-01-16 19:50:04 +03:00
if ( error )
break ;
}
if ( ! error )
set_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ;
else {
while ( x - - )
2012-05-18 17:28:23 +04:00
gfs2_glock_dq_uninit ( & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2006-01-16 19:50:04 +03:00
gfs2_quota_unhold ( ip ) ;
}
return error ;
}
static int need_sync ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
struct gfs2_tune * gt = & sdp - > sd_tune ;
2006-09-04 20:49:07 +04:00
s64 value ;
2006-01-16 19:50:04 +03:00
unsigned int num , den ;
int do_sync = 1 ;
if ( ! qd - > qd_qb . qb_limit )
return 0 ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
value = qd - > qd_change ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
spin_lock ( & gt - > gt_spin ) ;
num = gt - > gt_quota_scale_num ;
den = gt - > gt_quota_scale_den ;
spin_unlock ( & gt - > gt_spin ) ;
if ( value < 0 )
do_sync = 0 ;
2006-09-01 19:05:15 +04:00
else if ( ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) > =
( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) )
2006-01-16 19:50:04 +03:00
do_sync = 0 ;
else {
value * = gfs2_jindex_size ( sdp ) * num ;
2008-07-11 17:39:56 +04:00
value = div_s64 ( value , den ) ;
2006-09-01 19:05:15 +04:00
value + = ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) ;
2006-09-04 20:49:07 +04:00
if ( value < ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) )
2006-01-16 19:50:04 +03:00
do_sync = 0 ;
}
return do_sync ;
}
void gfs2_quota_unlock ( struct gfs2_inode * ip )
{
2013-10-04 14:31:05 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qda [ 4 ] ;
unsigned int count = 0 ;
unsigned int x ;
2013-10-04 14:31:05 +04:00
int found ;
2006-01-16 19:50:04 +03:00
if ( ! test_and_clear_bit ( GIF_QD_LOCKED , & ip - > i_flags ) )
goto out ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qd ;
int sync ;
2012-05-18 17:28:23 +04:00
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
sync = need_sync ( qd ) ;
2012-05-18 17:28:23 +04:00
gfs2_glock_dq_uninit ( & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2013-10-04 14:31:05 +04:00
if ( ! sync )
continue ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2013-10-04 14:31:05 +04:00
found = qd_check_sync ( sdp , qd , NULL ) ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2013-10-04 14:31:05 +04:00
if ( ! found )
continue ;
gfs2_assert_warn ( sdp , qd - > qd_change_sync ) ;
if ( bh_get ( qd ) ) {
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
continue ;
}
2006-01-16 19:50:04 +03:00
2013-10-04 14:31:05 +04:00
qda [ count + + ] = qd ;
2006-01-16 19:50:04 +03:00
}
if ( count ) {
do_sync ( count , qda ) ;
for ( x = 0 ; x < count ; x + + )
qd_unlock ( qda [ x ] ) ;
}
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
gfs2_quota_unhold ( ip ) ;
}
# define MAX_LINE 256
static int print_message ( struct gfs2_quota_data * qd , char * type )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2009-09-28 15:49:15 +04:00
printk ( KERN_INFO " GFS2: fsid=%s: quota %s for %s %u \n " ,
2006-07-03 19:20:06 +04:00
sdp - > sd_fsname , type ,
2013-02-01 07:52:08 +04:00
( qd - > qd_id . type = = USRQUOTA ) ? " user " : " group " ,
from_kqid ( & init_user_ns , qd - > qd_id ) ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
2013-02-01 08:27:54 +04:00
int gfs2_quota_check ( struct gfs2_inode * ip , kuid_t uid , kgid_t gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qd ;
2006-09-04 20:49:07 +04:00
s64 value ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error = 0 ;
if ( ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) )
return 0 ;
if ( sdp - > sd_args . ar_quota ! = GFS2_QUOTA_ON )
return 0 ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
2013-02-01 07:52:08 +04:00
if ( ! ( qid_eq ( qd - > qd_id , make_kqid_uid ( uid ) ) | |
qid_eq ( qd - > qd_id , make_kqid_gid ( gid ) ) ) )
2006-01-16 19:50:04 +03:00
continue ;
2006-09-01 19:05:15 +04:00
value = ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
value + = qd - > qd_change ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
2006-09-04 20:49:07 +04:00
if ( be64_to_cpu ( qd - > qd_qb . qb_limit ) & & ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) < value ) {
2006-01-16 19:50:04 +03:00
print_message ( qd , " exceeded " ) ;
2013-02-01 07:52:08 +04:00
quota_send_warning ( qd - > qd_id ,
2009-09-28 15:49:15 +04:00
sdp - > sd_vfs - > s_dev , QUOTA_NL_BHARDWARN ) ;
2006-01-16 19:50:04 +03:00
error = - EDQUOT ;
break ;
2006-09-01 19:05:15 +04:00
} else if ( be64_to_cpu ( qd - > qd_qb . qb_warn ) & &
2006-09-04 20:49:07 +04:00
( s64 ) be64_to_cpu ( qd - > qd_qb . qb_warn ) < value & &
2006-01-16 19:50:04 +03:00
time_after_eq ( jiffies , qd - > qd_last_warn +
2006-02-27 20:00:42 +03:00
gfs2_tune_get ( sdp ,
gt_quota_warn_period ) * HZ ) ) {
2013-02-01 07:52:08 +04:00
quota_send_warning ( qd - > qd_id ,
2009-09-28 15:49:15 +04:00
sdp - > sd_vfs - > s_dev , QUOTA_NL_BSOFTWARN ) ;
2006-01-16 19:50:04 +03:00
error = print_message ( qd , " warning " ) ;
qd - > qd_last_warn = jiffies ;
}
}
return error ;
}
2006-09-04 20:49:07 +04:00
void gfs2_quota_change ( struct gfs2_inode * ip , s64 change ,
2013-02-01 08:27:54 +04:00
kuid_t uid , kgid_t gid )
2006-01-16 19:50:04 +03:00
{
struct gfs2_quota_data * qd ;
unsigned int x ;
2006-06-14 23:32:57 +04:00
if ( gfs2_assert_warn ( GFS2_SB ( & ip - > i_inode ) , change ) )
2006-01-16 19:50:04 +03:00
return ;
2008-11-04 13:05:22 +03:00
if ( ip - > i_diskflags & GFS2_DIF_SYSTEM )
2006-01-16 19:50:04 +03:00
return ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
2013-02-01 07:52:08 +04:00
if ( qid_eq ( qd - > qd_id , make_kqid_uid ( uid ) ) | |
qid_eq ( qd - > qd_id , make_kqid_gid ( gid ) ) ) {
2006-01-16 19:50:04 +03:00
do_qc ( qd , change ) ;
}
}
}
2012-07-03 18:45:28 +04:00
int gfs2_quota_sync ( struct super_block * sb , int type )
2006-01-16 19:50:04 +03:00
{
2009-09-11 17:36:44 +04:00
struct gfs2_sbd * sdp = sb - > s_fs_info ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qda ;
2013-10-03 21:43:20 +04:00
unsigned int max_qd = PAGE_SIZE / sizeof ( struct gfs2_holder ) ;
2006-01-16 19:50:04 +03:00
unsigned int num_qd ;
unsigned int x ;
int error = 0 ;
qda = kcalloc ( max_qd , sizeof ( struct gfs2_quota_data * ) , GFP_KERNEL ) ;
if ( ! qda )
return - ENOMEM ;
2013-10-04 15:29:34 +04:00
mutex_lock ( & sdp - > sd_quota_sync_mutex ) ;
sdp - > sd_quota_sync_gen + + ;
2006-01-16 19:50:04 +03:00
do {
num_qd = 0 ;
for ( ; ; ) {
error = qd_fish ( sdp , qda + num_qd ) ;
if ( error | | ! qda [ num_qd ] )
break ;
if ( + + num_qd = = max_qd )
break ;
}
if ( num_qd ) {
if ( ! error )
error = do_sync ( num_qd , qda ) ;
if ( ! error )
for ( x = 0 ; x < num_qd ; x + + )
qda [ x ] - > qd_sync_gen =
sdp - > sd_quota_sync_gen ;
for ( x = 0 ; x < num_qd ; x + + )
qd_unlock ( qda [ x ] ) ;
}
} while ( ! error & & num_qd = = max_qd ) ;
2013-10-04 15:29:34 +04:00
mutex_unlock ( & sdp - > sd_quota_sync_mutex ) ;
2006-01-16 19:50:04 +03:00
kfree ( qda ) ;
return error ;
}
2013-02-01 07:42:40 +04:00
int gfs2_quota_refresh ( struct gfs2_sbd * sdp , struct kqid qid )
2006-01-16 19:50:04 +03:00
{
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh ;
int error ;
2013-02-01 07:52:08 +04:00
error = qd_get ( sdp , qid , & qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
error = do_glock ( qd , FORCE , & q_gh ) ;
if ( ! error )
gfs2_glock_dq_uninit ( & q_gh ) ;
qd_put ( qd ) ;
return error ;
}
int gfs2_quota_init ( struct gfs2_sbd * sdp )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2010-08-11 12:53:11 +04:00
u64 size = i_size_read ( sdp - > sd_qc_inode ) ;
unsigned int blocks = size > > sdp - > sd_sb . sb_bsize_shift ;
2006-01-16 19:50:04 +03:00
unsigned int x , slot = 0 ;
unsigned int found = 0 ;
2013-12-12 14:47:59 +04:00
unsigned int hash ;
2006-09-04 20:49:07 +04:00
u64 dblock ;
u32 extlen = 0 ;
2006-01-16 19:50:04 +03:00
int error ;
2010-08-11 12:53:11 +04:00
if ( gfs2_check_internal_file_size ( sdp - > sd_qc_inode , 1 , 64 < < 20 ) )
2006-09-25 17:26:04 +04:00
return - EIO ;
2010-08-11 12:53:11 +04:00
2006-01-16 19:50:04 +03:00
sdp - > sd_quota_slots = blocks * sdp - > sd_qc_per_block ;
2006-02-28 01:23:27 +03:00
sdp - > sd_quota_chunks = DIV_ROUND_UP ( sdp - > sd_quota_slots , 8 * PAGE_SIZE ) ;
2006-01-16 19:50:04 +03:00
error = - ENOMEM ;
sdp - > sd_quota_bitmap = kcalloc ( sdp - > sd_quota_chunks ,
2008-04-09 17:33:41 +04:00
sizeof ( unsigned char * ) , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! sdp - > sd_quota_bitmap )
return error ;
for ( x = 0 ; x < sdp - > sd_quota_chunks ; x + + ) {
2008-04-09 17:33:41 +04:00
sdp - > sd_quota_bitmap [ x ] = kzalloc ( PAGE_SIZE , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! sdp - > sd_quota_bitmap [ x ] )
goto fail ;
}
for ( x = 0 ; x < blocks ; x + + ) {
struct buffer_head * bh ;
2013-11-26 19:17:09 +04:00
const struct gfs2_quota_change * qc ;
2006-01-16 19:50:04 +03:00
unsigned int y ;
if ( ! extlen ) {
int new = 0 ;
2006-06-14 23:32:57 +04:00
error = gfs2_extent_map ( & ip - > i_inode , x , & new , & dblock , & extlen ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
}
error = - EIO ;
2006-09-22 01:05:23 +04:00
bh = gfs2_meta_ra ( ip - > i_gl , dblock , extlen ) ;
if ( ! bh )
goto fail ;
2006-01-16 19:50:04 +03:00
if ( gfs2_metatype_check ( sdp , bh , GFS2_METATYPE_QC ) ) {
brelse ( bh ) ;
goto fail ;
}
2013-11-26 19:17:09 +04:00
qc = ( const struct gfs2_quota_change * ) ( bh - > b_data + sizeof ( struct gfs2_meta_header ) ) ;
2006-09-22 01:05:23 +04:00
for ( y = 0 ; y < sdp - > sd_qc_per_block & & slot < sdp - > sd_quota_slots ;
2006-01-16 19:50:04 +03:00
y + + , slot + + ) {
struct gfs2_quota_data * qd ;
2013-11-26 19:17:09 +04:00
s64 qc_change = be64_to_cpu ( qc - > qc_change ) ;
u32 qc_flags = be32_to_cpu ( qc - > qc_flags ) ;
enum quota_type qtype = ( qc_flags & GFS2_QCF_USER ) ?
USRQUOTA : GRPQUOTA ;
struct kqid qc_id = make_kqid ( & init_user_ns , qtype ,
be32_to_cpu ( qc - > qc_id ) ) ;
qc + + ;
if ( ! qc_change )
2006-01-16 19:50:04 +03:00
continue ;
2013-12-12 14:47:59 +04:00
hash = gfs2_qd_hash ( sdp , qc_id ) ;
qd = qd_alloc ( hash , sdp , qc_id ) ;
if ( qd = = NULL ) {
2006-01-16 19:50:04 +03:00
brelse ( bh ) ;
goto fail ;
}
set_bit ( QDF_CHANGE , & qd - > qd_flags ) ;
2013-11-26 19:17:09 +04:00
qd - > qd_change = qc_change ;
2006-01-16 19:50:04 +03:00
qd - > qd_slot = slot ;
qd - > qd_slot_count = 1 ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_icbit_munge ( sdp , sdp - > sd_quota_bitmap , slot , 1 ) ;
list_add ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
atomic_inc ( & sdp - > sd_quota_count ) ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
spin_lock_bucket ( hash ) ;
hlist_bl_add_head_rcu ( & qd - > qd_hlist , & qd_hash_table [ hash ] ) ;
spin_unlock_bucket ( hash ) ;
2006-01-16 19:50:04 +03:00
found + + ;
}
brelse ( bh ) ;
dblock + + ;
extlen - - ;
}
if ( found )
fs_info ( sdp , " found %u quota changes \n " , found ) ;
return 0 ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
gfs2_quota_cleanup ( sdp ) ;
return error ;
}
void gfs2_quota_cleanup ( struct gfs2_sbd * sdp )
{
struct list_head * head = & sdp - > sd_quota_list ;
struct gfs2_quota_data * qd ;
unsigned int x ;
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
while ( ! list_empty ( head ) ) {
qd = list_entry ( head - > prev , struct gfs2_quota_data , qd_list ) ;
list_del ( & qd - > qd_list ) ;
2013-12-12 14:47:59 +04:00
2009-01-08 01:03:37 +03:00
/* Also remove if this qd exists in the reclaim list */
2013-11-04 14:15:08 +04:00
list_lru_del ( & gfs2_qd_lru , & qd - > qd_lru ) ;
2006-01-16 19:50:04 +03:00
atomic_dec ( & sdp - > sd_quota_count ) ;
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
2013-12-12 14:47:59 +04:00
spin_lock_bucket ( qd - > qd_hash ) ;
hlist_bl_del_rcu ( & qd - > qd_hlist ) ;
spin_unlock_bucket ( qd - > qd_hash ) ;
2013-12-12 15:34:09 +04:00
gfs2_assert_warn ( sdp , ! qd - > qd_change ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_slot_count ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! qd - > qd_bh_count ) ;
2009-01-12 13:43:39 +03:00
gfs2_glock_put ( qd - > qd_gl ) ;
2013-12-12 14:47:59 +04:00
call_rcu ( & qd - > qd_rcu , gfs2_qd_dealloc ) ;
2006-01-16 19:50:04 +03:00
2013-11-01 22:52:08 +04:00
spin_lock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
}
2013-11-01 22:52:08 +04:00
spin_unlock ( & qd_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! atomic_read ( & sdp - > sd_quota_count ) ) ;
if ( sdp - > sd_quota_bitmap ) {
for ( x = 0 ; x < sdp - > sd_quota_chunks ; x + + )
kfree ( sdp - > sd_quota_bitmap [ x ] ) ;
kfree ( sdp - > sd_quota_bitmap ) ;
}
}
2008-11-17 17:25:37 +03:00
static void quotad_error ( struct gfs2_sbd * sdp , const char * msg , int error )
{
if ( error = = 0 | | error = = - EROFS )
return ;
if ( ! test_bit ( SDF_SHUTDOWN , & sdp - > sd_flags ) )
fs_err ( sdp , " gfs2_quotad: %s error %d \n " , msg , error ) ;
}
static void quotad_check_timeo ( struct gfs2_sbd * sdp , const char * msg ,
2009-09-11 17:36:44 +04:00
int ( * fxn ) ( struct super_block * sb , int type ) ,
2008-11-17 17:25:37 +03:00
unsigned long t , unsigned long * timeo ,
unsigned int * new_timeo )
{
if ( t > = * timeo ) {
2009-09-11 17:36:44 +04:00
int error = fxn ( sdp - > sd_vfs , 0 ) ;
2008-11-17 17:25:37 +03:00
quotad_error ( sdp , msg , error ) ;
* timeo = gfs2_tune_get_i ( & sdp - > sd_tune , new_timeo ) * HZ ;
} else {
* timeo - = t ;
}
}
2008-11-18 16:38:48 +03:00
static void quotad_check_trunc_list ( struct gfs2_sbd * sdp )
{
struct gfs2_inode * ip ;
while ( 1 ) {
ip = NULL ;
spin_lock ( & sdp - > sd_trunc_lock ) ;
if ( ! list_empty ( & sdp - > sd_trunc_list ) ) {
ip = list_entry ( sdp - > sd_trunc_list . next ,
struct gfs2_inode , i_trunc_list ) ;
list_del_init ( & ip - > i_trunc_list ) ;
}
spin_unlock ( & sdp - > sd_trunc_lock ) ;
if ( ip = = NULL )
return ;
gfs2_glock_finish_truncate ( ip ) ;
}
}
2009-10-20 11:39:44 +04:00
void gfs2_wake_up_statfs ( struct gfs2_sbd * sdp ) {
if ( ! sdp - > sd_statfs_force_sync ) {
sdp - > sd_statfs_force_sync = 1 ;
wake_up ( & sdp - > sd_quota_wait ) ;
}
}
2008-11-17 17:25:37 +03:00
/**
* gfs2_quotad - Write cached quota changes into the quota file
* @ sdp : Pointer to GFS2 superblock
*
*/
int gfs2_quotad ( void * data )
{
struct gfs2_sbd * sdp = data ;
struct gfs2_tune * tune = & sdp - > sd_tune ;
unsigned long statfs_timeo = 0 ;
unsigned long quotad_timeo = 0 ;
unsigned long t = 0 ;
DEFINE_WAIT ( wait ) ;
2008-11-18 16:38:48 +03:00
int empty ;
2008-11-17 17:25:37 +03:00
while ( ! kthread_should_stop ( ) ) {
/* Update the master statfs file */
2009-10-20 11:39:44 +04:00
if ( sdp - > sd_statfs_force_sync ) {
int error = gfs2_statfs_sync ( sdp - > sd_vfs , 0 ) ;
quotad_error ( sdp , " statfs " , error ) ;
statfs_timeo = gfs2_tune_get ( sdp , gt_statfs_quantum ) * HZ ;
}
else
quotad_check_timeo ( sdp , " statfs " , gfs2_statfs_sync , t ,
& statfs_timeo ,
& tune - > gt_statfs_quantum ) ;
2008-11-17 17:25:37 +03:00
/* Update quota file */
2013-06-03 14:12:59 +04:00
quotad_check_timeo ( sdp , " sync " , gfs2_quota_sync , t ,
2008-11-17 17:25:37 +03:00
& quotad_timeo , & tune - > gt_quota_quantum ) ;
2008-11-18 16:38:48 +03:00
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list ( sdp ) ;
2011-11-22 00:32:22 +04:00
try_to_freeze ( ) ;
2008-11-17 17:25:37 +03:00
t = min ( quotad_timeo , statfs_timeo ) ;
2009-03-31 18:49:08 +04:00
prepare_to_wait ( & sdp - > sd_quota_wait , & wait , TASK_INTERRUPTIBLE ) ;
2008-11-18 16:38:48 +03:00
spin_lock ( & sdp - > sd_trunc_lock ) ;
empty = list_empty ( & sdp - > sd_trunc_list ) ;
spin_unlock ( & sdp - > sd_trunc_lock ) ;
2009-10-20 11:39:44 +04:00
if ( empty & & ! sdp - > sd_statfs_force_sync )
2008-11-18 16:38:48 +03:00
t - = schedule_timeout ( t ) ;
else
t = 0 ;
2008-11-17 17:25:37 +03:00
finish_wait ( & sdp - > sd_quota_wait , & wait ) ;
}
return 0 ;
}
2009-09-11 18:57:27 +04:00
static int gfs2_quota_get_xstate ( struct super_block * sb ,
struct fs_quota_stat * fqs )
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
memset ( fqs , 0 , sizeof ( struct fs_quota_stat ) ) ;
fqs - > qs_version = FS_QSTAT_VERSION ;
2010-05-05 02:10:56 +04:00
switch ( sdp - > sd_args . ar_quota ) {
case GFS2_QUOTA_ON :
2010-06-04 12:56:01 +04:00
fqs - > qs_flags | = ( FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD ) ;
2010-05-05 02:10:56 +04:00
/*FALLTHRU*/
case GFS2_QUOTA_ACCOUNT :
2010-06-04 12:56:01 +04:00
fqs - > qs_flags | = ( FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT ) ;
2010-05-05 02:10:56 +04:00
break ;
case GFS2_QUOTA_OFF :
break ;
}
2009-09-11 18:57:27 +04:00
if ( sdp - > sd_quota_inode ) {
fqs - > qs_uquota . qfs_ino = GFS2_I ( sdp - > sd_quota_inode ) - > i_no_addr ;
fqs - > qs_uquota . qfs_nblks = sdp - > sd_quota_inode - > i_blocks ;
}
fqs - > qs_uquota . qfs_nextents = 1 ; /* unsupported */
fqs - > qs_gquota = fqs - > qs_uquota ; /* its the same inode in both cases */
2013-11-04 14:15:08 +04:00
fqs - > qs_incoredqs = list_lru_count ( & gfs2_qd_lru ) ;
2009-09-11 18:57:27 +04:00
return 0 ;
}
2012-09-16 13:07:49 +04:00
static int gfs2_get_dqblk ( struct super_block * sb , struct kqid qid ,
2010-05-07 01:04:58 +04:00
struct fs_disk_quota * fdq )
2009-09-28 14:52:16 +04:00
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
struct gfs2_quota_lvb * qlvb ;
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh ;
int error ;
memset ( fdq , 0 , sizeof ( struct fs_disk_quota ) ) ;
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return - ESRCH ; /* Crazy XFS error code */
2013-02-01 08:09:30 +04:00
if ( ( qid . type ! = USRQUOTA ) & &
( qid . type ! = GRPQUOTA ) )
2009-09-28 14:52:16 +04:00
return - EINVAL ;
2013-02-01 07:52:08 +04:00
error = qd_get ( sdp , qid , & qd ) ;
2009-09-28 14:52:16 +04:00
if ( error )
return error ;
error = do_glock ( qd , FORCE , & q_gh ) ;
if ( error )
goto out ;
2012-11-14 22:47:37 +04:00
qlvb = ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lksb . sb_lvbptr ;
2009-09-28 14:52:16 +04:00
fdq - > d_version = FS_DQUOT_VERSION ;
2013-02-01 08:09:30 +04:00
fdq - > d_flags = ( qid . type = = USRQUOTA ) ? FS_USER_QUOTA : FS_GROUP_QUOTA ;
2013-02-01 06:15:33 +04:00
fdq - > d_id = from_kqid_munged ( current_user_ns ( ) , qid ) ;
2010-11-18 19:24:24 +03:00
fdq - > d_blk_hardlimit = be64_to_cpu ( qlvb - > qb_limit ) < < sdp - > sd_fsb2bb_shift ;
fdq - > d_blk_softlimit = be64_to_cpu ( qlvb - > qb_warn ) < < sdp - > sd_fsb2bb_shift ;
fdq - > d_bcount = be64_to_cpu ( qlvb - > qb_value ) < < sdp - > sd_fsb2bb_shift ;
2009-09-28 14:52:16 +04:00
gfs2_glock_dq_uninit ( & q_gh ) ;
out :
qd_put ( qd ) ;
return error ;
}
2009-09-23 16:50:49 +04:00
/* GFS2 only supports a subset of the XFS fields */
2010-11-18 19:26:46 +03:00
# define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
2009-09-23 16:50:49 +04:00
2012-09-16 13:07:49 +04:00
static int gfs2_set_dqblk ( struct super_block * sb , struct kqid qid ,
2010-05-07 01:05:17 +04:00
struct fs_disk_quota * fdq )
2009-09-23 16:50:49 +04:00
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh , i_gh ;
unsigned int data_blocks , ind_blocks ;
unsigned int blocks = 0 ;
int alloc_required ;
loff_t offset ;
int error ;
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return - ESRCH ; /* Crazy XFS error code */
2013-02-01 08:09:30 +04:00
if ( ( qid . type ! = USRQUOTA ) & &
( qid . type ! = GRPQUOTA ) )
2009-09-23 16:50:49 +04:00
return - EINVAL ;
if ( fdq - > d_fieldmask & ~ GFS2_FIELDMASK )
return - EINVAL ;
2013-02-01 07:52:08 +04:00
error = qd_get ( sdp , qid , & qd ) ;
2009-09-23 16:50:49 +04:00
if ( error )
return error ;
2012-06-06 14:17:59 +04:00
error = gfs2_rs_alloc ( ip ) ;
if ( error )
goto out_put ;
2009-09-23 16:50:49 +04:00
mutex_lock ( & ip - > i_inode . i_mutex ) ;
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_EXCLUSIVE , 0 , & q_gh ) ;
if ( error )
2012-06-06 14:17:59 +04:00
goto out_unlockput ;
2009-09-23 16:50:49 +04:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , & i_gh ) ;
if ( error )
goto out_q ;
/* Check for existing entry, if none then alloc new blocks */
error = update_qd ( sdp , qd ) ;
if ( error )
goto out_i ;
/* If nothing has changed, this is a no-op */
if ( ( fdq - > d_fieldmask & FS_DQ_BSOFT ) & &
2010-11-18 19:24:24 +03:00
( ( fdq - > d_blk_softlimit > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_warn ) ) )
2009-09-23 16:50:49 +04:00
fdq - > d_fieldmask ^ = FS_DQ_BSOFT ;
2010-11-18 19:26:46 +03:00
2009-09-23 16:50:49 +04:00
if ( ( fdq - > d_fieldmask & FS_DQ_BHARD ) & &
2010-11-18 19:24:24 +03:00
( ( fdq - > d_blk_hardlimit > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_limit ) ) )
2009-09-23 16:50:49 +04:00
fdq - > d_fieldmask ^ = FS_DQ_BHARD ;
2010-11-18 19:26:46 +03:00
if ( ( fdq - > d_fieldmask & FS_DQ_BCOUNT ) & &
( ( fdq - > d_bcount > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_value ) ) )
fdq - > d_fieldmask ^ = FS_DQ_BCOUNT ;
2009-09-23 16:50:49 +04:00
if ( fdq - > d_fieldmask = = 0 )
goto out_i ;
offset = qd2offset ( qd ) ;
2010-06-25 03:21:20 +04:00
alloc_required = gfs2_write_alloc_required ( ip , offset , sizeof ( struct gfs2_quota ) ) ;
2011-02-07 19:22:41 +03:00
if ( gfs2_is_stuffed ( ip ) )
alloc_required = 1 ;
2009-09-23 16:50:49 +04:00
if ( alloc_required ) {
2013-10-02 14:13:25 +04:00
struct gfs2_alloc_parms ap = { . aflags = 0 , } ;
2009-09-23 16:50:49 +04:00
gfs2_write_calc_reserv ( ip , sizeof ( struct gfs2_quota ) ,
& data_blocks , & ind_blocks ) ;
2011-11-21 22:36:17 +04:00
blocks = 1 + data_blocks + ind_blocks ;
2013-10-02 14:13:25 +04:00
ap . target = blocks ;
error = gfs2_inplace_reserve ( ip , & ap ) ;
2009-09-23 16:50:49 +04:00
if ( error )
2011-11-21 22:36:17 +04:00
goto out_i ;
2012-07-30 17:53:19 +04:00
blocks + = gfs2_rg_blocks ( ip , blocks ) ;
2009-09-23 16:50:49 +04:00
}
2011-02-07 19:22:41 +03:00
/* Some quotas span block boundaries and can update two blocks,
adding an extra block to the transaction to handle such quotas */
error = gfs2_trans_begin ( sdp , blocks + RES_DINODE + 2 , 0 ) ;
2009-09-23 16:50:49 +04:00
if ( error )
goto out_release ;
/* Apply changes */
error = gfs2_adjust_quota ( ip , offset , 0 , qd , fdq ) ;
gfs2_trans_end ( sdp ) ;
out_release :
2011-11-21 22:36:17 +04:00
if ( alloc_required )
2009-09-23 16:50:49 +04:00
gfs2_inplace_release ( ip ) ;
out_i :
gfs2_glock_dq_uninit ( & i_gh ) ;
out_q :
gfs2_glock_dq_uninit ( & q_gh ) ;
2012-06-06 14:17:59 +04:00
out_unlockput :
2009-09-23 16:50:49 +04:00
mutex_unlock ( & ip - > i_inode . i_mutex ) ;
2012-06-06 14:17:59 +04:00
out_put :
2009-09-23 16:50:49 +04:00
qd_put ( qd ) ;
return error ;
}
2009-09-15 12:59:02 +04:00
const struct quotactl_ops gfs2_quotactl_ops = {
. quota_sync = gfs2_quota_sync ,
2009-09-11 18:57:27 +04:00
. get_xstate = gfs2_quota_get_xstate ,
2010-05-07 01:04:58 +04:00
. get_dqblk = gfs2_get_dqblk ,
2010-05-07 01:05:17 +04:00
. set_dqblk = gfs2_set_dqblk ,
2009-09-15 12:59:02 +04:00
} ;
2013-12-12 14:47:59 +04:00
void __init gfs2_quota_hash_init ( void )
{
unsigned i ;
for ( i = 0 ; i < GFS2_QD_HASH_SIZE ; i + + )
INIT_HLIST_BL_HEAD ( & qd_hash_table [ i ] ) ;
}