2006-01-16 19:50:04 +03:00
/*
* Copyright ( C ) Sistina Software , Inc . 1997 - 2003 All rights reserved .
2007-12-12 03:51:25 +03:00
* Copyright ( C ) 2004 - 2007 Red Hat , Inc . All rights reserved .
2006-01-16 19:50:04 +03:00
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2006-09-01 19:05:15 +04:00
* of the GNU General Public License version 2.
2006-01-16 19:50:04 +03:00
*/
/*
* Quota change tags are associated with each transaction that allocates or
* deallocates space . Those changes are accumulated locally to each node ( in a
* per - node file ) and then are periodically synced to the quota file . This
* avoids the bottleneck of constantly touching the quota file , but introduces
* fuzziness in the current usage value of IDs that are being used on different
* nodes in the cluster simultaneously . So , it is possible for a user on
* multiple nodes to overrun their quota , but that overrun is controlable .
2009-09-15 23:42:56 +04:00
* Since quota tags are part of transactions , there is no need for a quota check
2006-01-16 19:50:04 +03:00
* program to be run on node crashes or anything like that .
*
* There are couple of knobs that let the administrator manage the quota
* fuzziness . " quota_quantum " sets the maximum time a quota change can be
* sitting on one node before being synced to the quota file . ( The default is
* 60 seconds . ) Another knob , " quota_scale " controls how quickly the frequency
* of quota file syncs increases as the user moves closer to their limit . The
* more frequent the syncs , the more accurate the quota enforcement , but that
* means that there is more contention between the nodes for the quota file .
* The default value is one . This sets the maximum theoretical quota overrun
* ( with infinite node with infinite bandwidth ) to twice the user ' s limit . ( In
* practice , the maximum overrun you see should be much less . ) A " quota_scale "
* number greater than one makes quota syncs more frequent and reduces the
* maximum overrun . Numbers less than one ( but greater than zero ) make quota
* syncs less frequent .
*
* GFS quotas also use per - ID Lock Value Blocks ( LVBs ) to cache the contents of
* the quota file , so it is not being constantly read .
*/
# include <linux/sched.h>
# include <linux/slab.h>
2011-05-25 04:12:27 +04:00
# include <linux/mm.h>
2006-01-16 19:50:04 +03:00
# include <linux/spinlock.h>
# include <linux/completion.h>
# include <linux/buffer_head.h>
# include <linux/sort.h>
2006-02-08 14:50:51 +03:00
# include <linux/fs.h>
2006-10-02 19:38:25 +04:00
# include <linux/bio.h>
2006-02-28 01:23:27 +03:00
# include <linux/gfs2_ondisk.h>
2008-11-17 17:25:37 +03:00
# include <linux/kthread.h>
# include <linux/freezer.h>
2009-09-28 15:49:15 +04:00
# include <linux/quota.h>
2009-09-11 18:57:27 +04:00
# include <linux/dqblk_xfs.h>
2006-01-16 19:50:04 +03:00
# include "gfs2.h"
2006-02-28 01:23:27 +03:00
# include "incore.h"
2006-01-16 19:50:04 +03:00
# include "bmap.h"
# include "glock.h"
# include "glops.h"
# include "log.h"
# include "meta_io.h"
# include "quota.h"
# include "rgrp.h"
# include "super.h"
# include "trans.h"
2006-02-08 14:50:51 +03:00
# include "inode.h"
2006-02-28 01:23:27 +03:00
# include "util.h"
2006-01-16 19:50:04 +03:00
# define QUOTA_USER 1
# define QUOTA_GROUP 0
2007-06-01 17:11:58 +04:00
struct gfs2_quota_change_host {
u64 qc_change ;
u32 qc_flags ; /* GFS2_QCF_... */
u32 qc_id ;
} ;
2009-01-08 01:03:37 +03:00
static LIST_HEAD ( qd_lru_list ) ;
static atomic_t qd_lru_count = ATOMIC_INIT ( 0 ) ;
2009-04-14 10:54:14 +04:00
static DEFINE_SPINLOCK ( qd_lru_lock ) ;
2009-01-08 01:03:37 +03:00
2011-05-25 04:12:27 +04:00
int gfs2_shrink_qd_memory ( struct shrinker * shrink , struct shrink_control * sc )
2009-01-08 01:03:37 +03:00
{
struct gfs2_quota_data * qd ;
struct gfs2_sbd * sdp ;
2011-05-25 04:12:27 +04:00
int nr_to_scan = sc - > nr_to_scan ;
2009-01-08 01:03:37 +03:00
2011-05-25 04:12:27 +04:00
if ( nr_to_scan = = 0 )
2009-01-08 01:03:37 +03:00
goto out ;
2011-05-25 04:12:27 +04:00
if ( ! ( sc - > gfp_mask & __GFP_FS ) )
2009-01-08 01:03:37 +03:00
return - 1 ;
spin_lock ( & qd_lru_lock ) ;
2011-05-25 04:12:27 +04:00
while ( nr_to_scan & & ! list_empty ( & qd_lru_list ) ) {
2009-01-08 01:03:37 +03:00
qd = list_entry ( qd_lru_list . next ,
struct gfs2_quota_data , qd_reclaim ) ;
sdp = qd - > qd_gl - > gl_sbd ;
/* Free from the filesystem-specific list */
list_del ( & qd - > qd_list ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_change ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_slot_count ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_bh_count ) ;
2009-01-12 13:43:39 +03:00
gfs2_glock_put ( qd - > qd_gl ) ;
2009-01-08 01:03:37 +03:00
atomic_dec ( & sdp - > sd_quota_count ) ;
/* Delete it from the common reclaim list */
list_del_init ( & qd - > qd_reclaim ) ;
atomic_dec ( & qd_lru_count ) ;
spin_unlock ( & qd_lru_lock ) ;
kmem_cache_free ( gfs2_quotad_cachep , qd ) ;
spin_lock ( & qd_lru_lock ) ;
2011-05-25 04:12:27 +04:00
nr_to_scan - - ;
2009-01-08 01:03:37 +03:00
}
spin_unlock ( & qd_lru_lock ) ;
out :
return ( atomic_read ( & qd_lru_count ) * sysctl_vfs_cache_pressure ) / 100 ;
}
2006-09-04 20:49:07 +04:00
static u64 qd2offset ( struct gfs2_quota_data * qd )
2006-01-16 19:50:04 +03:00
{
2006-09-04 20:49:07 +04:00
u64 offset ;
2006-01-16 19:50:04 +03:00
2006-09-04 20:49:07 +04:00
offset = 2 * ( u64 ) qd - > qd_id + ! test_bit ( QDF_USER , & qd - > qd_flags ) ;
2006-01-16 19:50:04 +03:00
offset * = sizeof ( struct gfs2_quota ) ;
return offset ;
}
2006-09-04 20:49:07 +04:00
static int qd_alloc ( struct gfs2_sbd * sdp , int user , u32 id ,
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qdp )
{
struct gfs2_quota_data * qd ;
int error ;
2008-11-17 17:25:37 +03:00
qd = kmem_cache_zalloc ( gfs2_quotad_cachep , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! qd )
return - ENOMEM ;
2009-01-08 01:03:37 +03:00
atomic_set ( & qd - > qd_count , 1 ) ;
2006-01-16 19:50:04 +03:00
qd - > qd_id = id ;
if ( user )
set_bit ( QDF_USER , & qd - > qd_flags ) ;
qd - > qd_slot = - 1 ;
2009-01-08 01:03:37 +03:00
INIT_LIST_HEAD ( & qd - > qd_reclaim ) ;
2006-01-16 19:50:04 +03:00
2006-09-04 20:49:07 +04:00
error = gfs2_glock_get ( sdp , 2 * ( u64 ) id + ! user ,
2006-01-16 19:50:04 +03:00
& gfs2_quota_glops , CREATE , & qd - > qd_gl ) ;
if ( error )
goto fail ;
* qdp = qd ;
return 0 ;
2006-09-04 20:04:26 +04:00
fail :
2008-11-17 17:25:37 +03:00
kmem_cache_free ( gfs2_quotad_cachep , qd ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
2009-09-15 19:30:38 +04:00
static int qd_get ( struct gfs2_sbd * sdp , int user , u32 id ,
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qdp )
{
struct gfs2_quota_data * qd = NULL , * new_qd = NULL ;
int error , found ;
* qdp = NULL ;
for ( ; ; ) {
found = 0 ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
list_for_each_entry ( qd , & sdp - > sd_quota_list , qd_list ) {
if ( qd - > qd_id = = id & &
! test_bit ( QDF_USER , & qd - > qd_flags ) = = ! user ) {
2009-01-08 01:03:37 +03:00
if ( ! atomic_read ( & qd - > qd_count ) & &
! list_empty ( & qd - > qd_reclaim ) ) {
/* Remove it from reclaim list */
list_del_init ( & qd - > qd_reclaim ) ;
atomic_dec ( & qd_lru_count ) ;
}
atomic_inc ( & qd - > qd_count ) ;
2006-01-16 19:50:04 +03:00
found = 1 ;
break ;
}
}
if ( ! found )
qd = NULL ;
if ( ! qd & & new_qd ) {
qd = new_qd ;
list_add ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
atomic_inc ( & sdp - > sd_quota_count ) ;
new_qd = NULL ;
}
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
2009-09-15 19:30:38 +04:00
if ( qd ) {
2006-01-16 19:50:04 +03:00
if ( new_qd ) {
2009-01-12 13:43:39 +03:00
gfs2_glock_put ( new_qd - > qd_gl ) ;
2008-11-17 17:25:37 +03:00
kmem_cache_free ( gfs2_quotad_cachep , new_qd ) ;
2006-01-16 19:50:04 +03:00
}
* qdp = qd ;
return 0 ;
}
error = qd_alloc ( sdp , user , id , & new_qd ) ;
if ( error )
return error ;
}
}
static void qd_hold ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2009-01-08 01:03:37 +03:00
gfs2_assert ( sdp , atomic_read ( & qd - > qd_count ) ) ;
atomic_inc ( & qd - > qd_count ) ;
2006-01-16 19:50:04 +03:00
}
static void qd_put ( struct gfs2_quota_data * qd )
{
2009-01-08 01:03:37 +03:00
if ( atomic_dec_and_lock ( & qd - > qd_count , & qd_lru_lock ) ) {
/* Add to the reclaim list */
list_add_tail ( & qd - > qd_reclaim , & qd_lru_list ) ;
atomic_inc ( & qd_lru_count ) ;
spin_unlock ( & qd_lru_lock ) ;
}
2006-01-16 19:50:04 +03:00
}
static int slot_get ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
unsigned int c , o = 0 , b ;
unsigned char byte = 0 ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
if ( qd - > qd_slot_count + + ) {
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
for ( c = 0 ; c < sdp - > sd_quota_chunks ; c + + )
for ( o = 0 ; o < PAGE_SIZE ; o + + ) {
byte = sdp - > sd_quota_bitmap [ c ] [ o ] ;
if ( byte ! = 0xFF )
goto found ;
}
goto fail ;
2006-09-04 20:04:26 +04:00
found :
2006-01-16 19:50:04 +03:00
for ( b = 0 ; b < 8 ; b + + )
if ( ! ( byte & ( 1 < < b ) ) )
break ;
qd - > qd_slot = c * ( 8 * PAGE_SIZE ) + o * 8 + b ;
if ( qd - > qd_slot > = sdp - > sd_quota_slots )
goto fail ;
sdp - > sd_quota_bitmap [ c ] [ o ] | = 1 < < b ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd - > qd_slot_count - - ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
return - ENOSPC ;
}
static void slot_hold ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_slot_count ) ;
qd - > qd_slot_count + + ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
}
static void slot_put ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_slot_count ) ;
if ( ! - - qd - > qd_slot_count ) {
gfs2_icbit_munge ( sdp , sdp - > sd_quota_bitmap , qd - > qd_slot , 0 ) ;
qd - > qd_slot = - 1 ;
}
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
}
static int bh_get ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2006-01-16 19:50:04 +03:00
unsigned int block , offset ;
struct buffer_head * bh ;
int error ;
2006-10-14 01:25:45 +04:00
struct buffer_head bh_map = { . b_state = 0 , . b_blocknr = 0 } ;
2006-01-16 19:50:04 +03:00
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
if ( qd - > qd_bh_count + + ) {
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
block = qd - > qd_slot / sdp - > sd_qc_per_block ;
2007-12-12 03:51:25 +03:00
offset = qd - > qd_slot % sdp - > sd_qc_per_block ;
2006-01-16 19:50:04 +03:00
2006-10-14 01:25:45 +04:00
bh_map . b_size = 1 < < ip - > i_inode . i_blkbits ;
2007-12-10 23:13:27 +03:00
error = gfs2_block_map ( & ip - > i_inode , block , & bh_map , 0 ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
2006-09-22 01:05:23 +04:00
error = gfs2_meta_read ( ip - > i_gl , bh_map . b_blocknr , DIO_WAIT , & bh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
error = - EIO ;
if ( gfs2_metatype_check ( sdp , bh , GFS2_METATYPE_QC ) )
goto fail_brelse ;
qd - > qd_bh = bh ;
qd - > qd_bh_qc = ( struct gfs2_quota_change * )
( bh - > b_data + sizeof ( struct gfs2_meta_header ) +
offset * sizeof ( struct gfs2_quota_change ) ) ;
2007-02-20 08:03:29 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
2006-09-04 20:04:26 +04:00
fail_brelse :
2006-01-16 19:50:04 +03:00
brelse ( bh ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd - > qd_bh_count - - ;
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
static void bh_put ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert ( sdp , qd - > qd_bh_count ) ;
if ( ! - - qd - > qd_bh_count ) {
brelse ( qd - > qd_bh ) ;
qd - > qd_bh = NULL ;
qd - > qd_bh_qc = NULL ;
}
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
}
static int qd_fish ( struct gfs2_sbd * sdp , struct gfs2_quota_data * * qdp )
{
struct gfs2_quota_data * qd = NULL ;
int error ;
int found = 0 ;
* qdp = NULL ;
if ( sdp - > sd_vfs - > s_flags & MS_RDONLY )
return 0 ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
list_for_each_entry ( qd , & sdp - > sd_quota_list , qd_list ) {
if ( test_bit ( QDF_LOCKED , & qd - > qd_flags ) | |
! test_bit ( QDF_CHANGE , & qd - > qd_flags ) | |
qd - > qd_sync_gen > = sdp - > sd_quota_sync_gen )
continue ;
list_move_tail ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
set_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
2009-01-08 01:03:37 +03:00
gfs2_assert_warn ( sdp , atomic_read ( & qd - > qd_count ) ) ;
atomic_inc ( & qd - > qd_count ) ;
2006-01-16 19:50:04 +03:00
qd - > qd_change_sync = qd - > qd_change ;
gfs2_assert_warn ( sdp , qd - > qd_slot_count ) ;
qd - > qd_slot_count + + ;
found = 1 ;
break ;
}
if ( ! found )
qd = NULL ;
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
if ( qd ) {
gfs2_assert_warn ( sdp , qd - > qd_change_sync ) ;
error = bh_get ( qd ) ;
if ( error ) {
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
return error ;
}
}
* qdp = qd ;
return 0 ;
}
static int qd_trylock ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
if ( sdp - > sd_vfs - > s_flags & MS_RDONLY )
return 0 ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
if ( test_bit ( QDF_LOCKED , & qd - > qd_flags ) | |
! test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) {
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
list_move_tail ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
set_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
2009-01-08 01:03:37 +03:00
gfs2_assert_warn ( sdp , atomic_read ( & qd - > qd_count ) ) ;
atomic_inc ( & qd - > qd_count ) ;
2006-01-16 19:50:04 +03:00
qd - > qd_change_sync = qd - > qd_change ;
gfs2_assert_warn ( sdp , qd - > qd_slot_count ) ;
qd - > qd_slot_count + + ;
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , qd - > qd_change_sync ) ;
if ( bh_get ( qd ) ) {
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
return 0 ;
}
return 1 ;
}
static void qd_unlock ( struct gfs2_quota_data * qd )
{
2006-02-27 20:00:42 +03:00
gfs2_assert_warn ( qd - > qd_gl - > gl_sbd ,
test_bit ( QDF_LOCKED , & qd - > qd_flags ) ) ;
2006-01-16 19:50:04 +03:00
clear_bit ( QDF_LOCKED , & qd - > qd_flags ) ;
bh_put ( qd ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
}
2009-09-15 19:25:40 +04:00
static int qdsb_get ( struct gfs2_sbd * sdp , int user , u32 id ,
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qdp )
{
int error ;
2009-09-15 19:30:38 +04:00
error = qd_get ( sdp , user , id , qdp ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
error = slot_get ( * qdp ) ;
if ( error )
goto fail ;
error = bh_get ( * qdp ) ;
if ( error )
goto fail_slot ;
return 0 ;
2006-09-04 20:04:26 +04:00
fail_slot :
2006-01-16 19:50:04 +03:00
slot_put ( * qdp ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
qd_put ( * qdp ) ;
return error ;
}
static void qdsb_put ( struct gfs2_quota_data * qd )
{
bh_put ( qd ) ;
slot_put ( qd ) ;
qd_put ( qd ) ;
}
2006-09-04 20:49:07 +04:00
int gfs2_quota_hold ( struct gfs2_inode * ip , u32 uid , u32 gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2012-05-18 17:28:23 +04:00
struct gfs2_quota_data * * qd ;
2006-01-16 19:50:04 +03:00
int error ;
2012-05-18 17:28:23 +04:00
if ( ip - > i_res = = NULL )
gfs2_rs_alloc ( ip ) ;
qd = ip - > i_res - > rs_qa_qd ;
if ( gfs2_assert_warn ( sdp , ! ip - > i_res - > rs_qa_qd_num ) | |
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ) )
return - EIO ;
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return 0 ;
2009-09-15 19:25:40 +04:00
error = qdsb_get ( sdp , QUOTA_USER , ip - > i_inode . i_uid , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
2009-09-15 19:25:40 +04:00
error = qdsb_get ( sdp , QUOTA_GROUP , ip - > i_inode . i_gid , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
2006-11-01 21:23:29 +03:00
if ( uid ! = NO_QUOTA_CHANGE & & uid ! = ip - > i_inode . i_uid ) {
2009-09-15 19:25:40 +04:00
error = qdsb_get ( sdp , QUOTA_USER , uid , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
}
2006-11-01 21:23:29 +03:00
if ( gid ! = NO_QUOTA_CHANGE & & gid ! = ip - > i_inode . i_gid ) {
2009-09-15 19:25:40 +04:00
error = qdsb_get ( sdp , QUOTA_GROUP , gid , qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto out ;
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num + + ;
2006-01-16 19:50:04 +03:00
qd + + ;
}
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
if ( error )
gfs2_quota_unhold ( ip ) ;
return error ;
}
void gfs2_quota_unhold ( struct gfs2_inode * ip )
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
2012-05-18 17:28:23 +04:00
if ( ip - > i_res = = NULL )
return ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ) ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qdsb_put ( ip - > i_res - > rs_qa_qd [ x ] ) ;
ip - > i_res - > rs_qa_qd [ x ] = NULL ;
2006-01-16 19:50:04 +03:00
}
2012-05-18 17:28:23 +04:00
ip - > i_res - > rs_qa_qd_num = 0 ;
2006-01-16 19:50:04 +03:00
}
static int sort_qd ( const void * a , const void * b )
{
2006-09-05 23:17:12 +04:00
const struct gfs2_quota_data * qd_a = * ( const struct gfs2_quota_data * * ) a ;
const struct gfs2_quota_data * qd_b = * ( const struct gfs2_quota_data * * ) b ;
2006-01-16 19:50:04 +03:00
if ( ! test_bit ( QDF_USER , & qd_a - > qd_flags ) ! =
! test_bit ( QDF_USER , & qd_b - > qd_flags ) ) {
if ( test_bit ( QDF_USER , & qd_a - > qd_flags ) )
2006-09-05 23:17:12 +04:00
return - 1 ;
2006-01-16 19:50:04 +03:00
else
2006-09-05 23:17:12 +04:00
return 1 ;
2006-01-16 19:50:04 +03:00
}
2006-09-05 23:17:12 +04:00
if ( qd_a - > qd_id < qd_b - > qd_id )
return - 1 ;
if ( qd_a - > qd_id > qd_b - > qd_id )
return 1 ;
2006-01-16 19:50:04 +03:00
2006-09-05 23:17:12 +04:00
return 0 ;
2006-01-16 19:50:04 +03:00
}
2006-09-04 20:49:07 +04:00
static void do_qc ( struct gfs2_quota_data * qd , s64 change )
2006-01-16 19:50:04 +03:00
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_change * qc = qd - > qd_bh_qc ;
2006-09-04 20:49:07 +04:00
s64 x ;
2006-01-16 19:50:04 +03:00
2006-02-21 15:51:39 +03:00
mutex_lock ( & sdp - > sd_quota_mutex ) ;
2006-01-18 14:19:28 +03:00
gfs2_trans_add_bh ( ip - > i_gl , qd - > qd_bh , 1 ) ;
2006-01-16 19:50:04 +03:00
if ( ! test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) {
qc - > qc_change = 0 ;
qc - > qc_flags = 0 ;
if ( test_bit ( QDF_USER , & qd - > qd_flags ) )
qc - > qc_flags = cpu_to_be32 ( GFS2_QCF_USER ) ;
qc - > qc_id = cpu_to_be32 ( qd - > qd_id ) ;
}
2006-10-14 18:46:30 +04:00
x = be64_to_cpu ( qc - > qc_change ) + change ;
2006-01-16 19:50:04 +03:00
qc - > qc_change = cpu_to_be64 ( x ) ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
qd - > qd_change = x ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
if ( ! x ) {
gfs2_assert_warn ( sdp , test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) ;
clear_bit ( QDF_CHANGE , & qd - > qd_flags ) ;
qc - > qc_flags = 0 ;
qc - > qc_id = 0 ;
slot_put ( qd ) ;
qd_put ( qd ) ;
} else if ( ! test_and_set_bit ( QDF_CHANGE , & qd - > qd_flags ) ) {
qd_hold ( qd ) ;
slot_hold ( qd ) ;
}
2006-09-25 17:26:04 +04:00
2006-02-21 15:51:39 +03:00
mutex_unlock ( & sdp - > sd_quota_mutex ) ;
2006-01-16 19:50:04 +03:00
}
2006-02-08 14:50:51 +03:00
/**
2009-09-15 23:42:56 +04:00
* gfs2_adjust_quota - adjust record of current block usage
* @ ip : The quota inode
* @ loc : Offset of the entry in the quota file
2009-09-23 16:50:49 +04:00
* @ change : The amount of usage change to record
2009-09-15 23:42:56 +04:00
* @ qd : The quota data
2009-09-23 16:50:49 +04:00
* @ fdq : The updated limits to record
2006-02-08 14:50:51 +03:00
*
* This function was mostly borrowed from gfs2_block_truncate_page which was
* in turn mostly borrowed from ext3
2009-09-15 23:42:56 +04:00
*
* Returns : 0 or - ve on error
2006-02-08 14:50:51 +03:00
*/
2009-09-15 23:42:56 +04:00
2006-02-08 14:50:51 +03:00
static int gfs2_adjust_quota ( struct gfs2_inode * ip , loff_t loc ,
2009-09-23 16:50:49 +04:00
s64 change , struct gfs2_quota_data * qd ,
struct fs_disk_quota * fdq )
2006-02-08 14:50:51 +03:00
{
2006-06-14 23:32:57 +04:00
struct inode * inode = & ip - > i_inode ;
2010-11-18 19:24:24 +03:00
struct gfs2_sbd * sdp = GFS2_SB ( inode ) ;
2006-02-08 14:50:51 +03:00
struct address_space * mapping = inode - > i_mapping ;
unsigned long index = loc > > PAGE_CACHE_SHIFT ;
2007-06-01 02:52:02 +04:00
unsigned offset = loc & ( PAGE_CACHE_SIZE - 1 ) ;
2006-02-08 14:50:51 +03:00
unsigned blocksize , iblock , pos ;
2011-08-15 17:20:36 +04:00
struct buffer_head * bh ;
2006-02-08 14:50:51 +03:00
struct page * page ;
2010-05-08 01:50:18 +04:00
void * kaddr , * ptr ;
struct gfs2_quota q , * qp ;
int err , nbytes ;
2009-09-23 16:50:49 +04:00
u64 size ;
2006-02-08 14:50:51 +03:00
2011-09-19 13:25:49 +04:00
if ( gfs2_is_stuffed ( ip ) ) {
err = gfs2_unstuff_dinode ( ip , NULL ) ;
if ( err )
return err ;
}
2010-05-08 01:50:18 +04:00
memset ( & q , 0 , sizeof ( struct gfs2_quota ) ) ;
2012-04-16 19:40:55 +04:00
err = gfs2_internal_read ( ip , ( char * ) & q , & loc , sizeof ( q ) ) ;
2010-05-08 01:50:18 +04:00
if ( err < 0 )
return err ;
err = - EIO ;
qp = & q ;
qp - > qu_value = be64_to_cpu ( qp - > qu_value ) ;
qp - > qu_value + = change ;
qp - > qu_value = cpu_to_be64 ( qp - > qu_value ) ;
qd - > qd_qb . qb_value = qp - > qu_value ;
if ( fdq ) {
if ( fdq - > d_fieldmask & FS_DQ_BSOFT ) {
2010-11-18 19:24:24 +03:00
qp - > qu_warn = cpu_to_be64 ( fdq - > d_blk_softlimit > > sdp - > sd_fsb2bb_shift ) ;
2010-05-08 01:50:18 +04:00
qd - > qd_qb . qb_warn = qp - > qu_warn ;
}
if ( fdq - > d_fieldmask & FS_DQ_BHARD ) {
2010-11-18 19:24:24 +03:00
qp - > qu_limit = cpu_to_be64 ( fdq - > d_blk_hardlimit > > sdp - > sd_fsb2bb_shift ) ;
2010-05-08 01:50:18 +04:00
qd - > qd_qb . qb_limit = qp - > qu_limit ;
}
2010-11-18 19:26:46 +03:00
if ( fdq - > d_fieldmask & FS_DQ_BCOUNT ) {
qp - > qu_value = cpu_to_be64 ( fdq - > d_bcount > > sdp - > sd_fsb2bb_shift ) ;
qd - > qd_qb . qb_value = qp - > qu_value ;
}
2010-05-08 01:50:18 +04:00
}
/* Write the quota into the quota file on disk */
ptr = qp ;
nbytes = sizeof ( struct gfs2_quota ) ;
get_a_page :
2012-03-19 23:25:50 +04:00
page = find_or_create_page ( mapping , index , GFP_NOFS ) ;
2006-02-08 14:50:51 +03:00
if ( ! page )
return - ENOMEM ;
blocksize = inode - > i_sb - > s_blocksize ;
iblock = index < < ( PAGE_CACHE_SHIFT - inode - > i_sb - > s_blocksize_bits ) ;
if ( ! page_has_buffers ( page ) )
create_empty_buffers ( page , blocksize , 0 ) ;
bh = page_buffers ( page ) ;
pos = blocksize ;
while ( offset > = pos ) {
bh = bh - > b_this_page ;
iblock + + ;
pos + = blocksize ;
}
if ( ! buffer_mapped ( bh ) ) {
2007-12-10 23:13:27 +03:00
gfs2_block_map ( inode , iblock , bh , 1 ) ;
2006-02-08 14:50:51 +03:00
if ( ! buffer_mapped ( bh ) )
2010-05-08 01:50:18 +04:00
goto unlock_out ;
/* If it's a newly allocated disk block for quota, zero it */
2010-07-04 09:33:24 +04:00
if ( buffer_new ( bh ) )
zero_user ( page , pos - blocksize , bh - > b_size ) ;
2006-02-08 14:50:51 +03:00
}
if ( PageUptodate ( page ) )
set_buffer_uptodate ( bh ) ;
if ( ! buffer_uptodate ( bh ) ) {
2011-10-31 13:52:02 +04:00
ll_rw_block ( READ | REQ_META , 1 , & bh ) ;
2006-02-08 14:50:51 +03:00
wait_on_buffer ( bh ) ;
if ( ! buffer_uptodate ( bh ) )
2010-05-08 01:50:18 +04:00
goto unlock_out ;
2006-02-08 14:50:51 +03:00
}
gfs2_trans_add_bh ( ip - > i_gl , bh , 0 ) ;
2011-11-25 19:14:30 +04:00
kaddr = kmap_atomic ( page ) ;
2010-05-08 01:50:18 +04:00
if ( offset + sizeof ( struct gfs2_quota ) > PAGE_CACHE_SIZE )
nbytes = PAGE_CACHE_SIZE - offset ;
memcpy ( kaddr + offset , ptr , nbytes ) ;
2006-02-08 14:50:51 +03:00
flush_dcache_page ( page ) ;
2011-11-25 19:14:30 +04:00
kunmap_atomic ( kaddr ) ;
2010-05-08 01:50:18 +04:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
/* If quota straddles page boundary, we need to update the rest of the
* quota at the beginning of the next page */
2010-07-04 09:33:24 +04:00
if ( ( offset + sizeof ( struct gfs2_quota ) ) > PAGE_CACHE_SIZE ) {
2010-05-08 01:50:18 +04:00
ptr = ptr + nbytes ;
nbytes = sizeof ( struct gfs2_quota ) - nbytes ;
offset = 0 ;
index + + ;
goto get_a_page ;
}
2009-09-23 16:50:49 +04:00
size = loc + sizeof ( struct gfs2_quota ) ;
2010-08-11 12:53:11 +04:00
if ( size > inode - > i_size )
2009-09-23 16:50:49 +04:00
i_size_write ( inode , size ) ;
inode - > i_mtime = inode - > i_atime = CURRENT_TIME ;
mark_inode_dirty ( inode ) ;
2012-05-15 22:51:54 +04:00
return 0 ;
2011-08-15 17:20:36 +04:00
2010-05-08 01:50:18 +04:00
unlock_out :
2006-02-08 14:50:51 +03:00
unlock_page ( page ) ;
page_cache_release ( page ) ;
return err ;
}
2006-01-16 19:50:04 +03:00
static int do_sync ( unsigned int num_qd , struct gfs2_quota_data * * qda )
{
struct gfs2_sbd * sdp = ( * qda ) - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
2006-01-16 19:50:04 +03:00
unsigned int data_blocks , ind_blocks ;
struct gfs2_holder * ghs , i_gh ;
unsigned int qx , x ;
struct gfs2_quota_data * qd ;
2006-01-30 21:34:10 +03:00
loff_t offset ;
2008-03-07 02:43:52 +03:00
unsigned int nalloc = 0 , blocks ;
2006-01-16 19:50:04 +03:00
int error ;
2012-06-06 14:17:59 +04:00
error = gfs2_rs_alloc ( ip ) ;
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
gfs2_write_calc_reserv ( ip , sizeof ( struct gfs2_quota ) ,
& data_blocks , & ind_blocks ) ;
2008-04-09 17:33:41 +04:00
ghs = kcalloc ( num_qd , sizeof ( struct gfs2_holder ) , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! ghs )
return - ENOMEM ;
sort ( qda , num_qd , sizeof ( struct gfs2_quota_data * ) , sort_qd , NULL ) ;
2009-09-23 16:50:49 +04:00
mutex_lock_nested ( & ip - > i_inode . i_mutex , I_MUTEX_QUOTA ) ;
2006-01-16 19:50:04 +03:00
for ( qx = 0 ; qx < num_qd ; qx + + ) {
2009-09-15 23:42:56 +04:00
error = gfs2_glock_nq_init ( qda [ qx ] - > qd_gl , LM_ST_EXCLUSIVE ,
2006-01-16 19:50:04 +03:00
GL_NOCACHE , & ghs [ qx ] ) ;
if ( error )
goto out ;
}
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , & i_gh ) ;
if ( error )
goto out ;
for ( x = 0 ; x < num_qd ; x + + ) {
offset = qd2offset ( qda [ x ] ) ;
2010-06-25 03:21:20 +04:00
if ( gfs2_write_alloc_required ( ip , offset ,
sizeof ( struct gfs2_quota ) ) )
2006-01-16 19:50:04 +03:00
nalloc + + ;
}
2008-03-07 02:43:52 +03:00
/*
* 1 blk for unstuffing inode if stuffed . We add this extra
* block to the reservation unconditionally . If the inode
* doesn ' t need unstuffing , the block will be released to the
* rgrp since it won ' t be allocated during the transaction
*/
2010-05-08 01:50:18 +04:00
/* +3 in the end for unstuffing block, inode size update block
* and another block in case quota straddles page boundary and
* two blocks need to be updated instead of 1 */
blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3 ;
2006-01-16 19:50:04 +03:00
2011-11-21 22:36:17 +04:00
error = gfs2_inplace_reserve ( ip , 1 +
( nalloc * ( data_blocks + ind_blocks ) ) ) ;
2008-03-07 02:43:52 +03:00
if ( error )
goto out_alloc ;
2006-01-16 19:50:04 +03:00
2008-03-07 02:43:52 +03:00
if ( nalloc )
2011-09-01 16:31:59 +04:00
blocks + = gfs2_rg_blocks ( ip ) + nalloc * ind_blocks + RES_STATFS ;
2008-03-07 02:43:52 +03:00
error = gfs2_trans_begin ( sdp , blocks , 0 ) ;
if ( error )
goto out_ipres ;
2006-01-16 19:50:04 +03:00
for ( x = 0 ; x < num_qd ; x + + ) {
qd = qda [ x ] ;
offset = qd2offset ( qd ) ;
2009-09-23 16:50:49 +04:00
error = gfs2_adjust_quota ( ip , offset , qd - > qd_change_sync , qd , NULL ) ;
2006-02-08 14:50:51 +03:00
if ( error )
2006-01-16 19:50:04 +03:00
goto out_end_trans ;
do_qc ( qd , - qd - > qd_change_sync ) ;
2011-03-08 18:40:42 +03:00
set_bit ( QDF_REFRESH , & qd - > qd_flags ) ;
2006-01-16 19:50:04 +03:00
}
error = 0 ;
2006-09-04 20:04:26 +04:00
out_end_trans :
2006-01-16 19:50:04 +03:00
gfs2_trans_end ( sdp ) ;
2006-09-04 20:04:26 +04:00
out_ipres :
2008-03-07 02:43:52 +03:00
gfs2_inplace_release ( ip ) ;
2006-09-04 20:04:26 +04:00
out_alloc :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
while ( qx - - )
gfs2_glock_dq_uninit ( & ghs [ qx ] ) ;
2009-09-23 16:50:49 +04:00
mutex_unlock ( & ip - > i_inode . i_mutex ) ;
2006-01-16 19:50:04 +03:00
kfree ( ghs ) ;
2006-04-07 19:17:32 +04:00
gfs2_log_flush ( ip - > i_gl - > gl_sbd , ip - > i_gl ) ;
2006-01-16 19:50:04 +03:00
return error ;
}
2009-09-23 16:50:49 +04:00
static int update_qd ( struct gfs2_sbd * sdp , struct gfs2_quota_data * qd )
{
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
struct gfs2_quota q ;
struct gfs2_quota_lvb * qlvb ;
loff_t pos ;
int error ;
memset ( & q , 0 , sizeof ( struct gfs2_quota ) ) ;
pos = qd2offset ( qd ) ;
2012-04-16 19:40:55 +04:00
error = gfs2_internal_read ( ip , ( char * ) & q , & pos , sizeof ( q ) ) ;
2009-09-23 16:50:49 +04:00
if ( error < 0 )
return error ;
qlvb = ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lvb ;
qlvb - > qb_magic = cpu_to_be32 ( GFS2_MAGIC ) ;
qlvb - > __pad = 0 ;
qlvb - > qb_limit = q . qu_limit ;
qlvb - > qb_warn = q . qu_warn ;
qlvb - > qb_value = q . qu_value ;
qd - > qd_qb = * qlvb ;
return 0 ;
}
2006-01-16 19:50:04 +03:00
static int do_glock ( struct gfs2_quota_data * qd , int force_refresh ,
struct gfs2_holder * q_gh )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_holder i_gh ;
int error ;
2006-09-04 20:04:26 +04:00
restart :
2006-01-16 19:50:04 +03:00
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_SHARED , 0 , q_gh ) ;
if ( error )
return error ;
2006-09-01 19:05:15 +04:00
qd - > qd_qb = * ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lvb ;
2006-01-16 19:50:04 +03:00
2006-09-01 19:05:15 +04:00
if ( force_refresh | | qd - > qd_qb . qb_magic ! = cpu_to_be32 ( GFS2_MAGIC ) ) {
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( q_gh ) ;
2009-09-11 18:21:56 +04:00
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_EXCLUSIVE ,
GL_NOCACHE , q_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
2006-09-01 19:05:15 +04:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_SHARED , 0 , & i_gh ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
2009-09-23 16:50:49 +04:00
error = update_qd ( sdp , qd ) ;
if ( error )
2009-09-15 23:42:56 +04:00
goto fail_gunlock ;
2006-01-16 19:50:04 +03:00
2009-09-23 16:50:49 +04:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2009-09-11 18:21:56 +04:00
gfs2_glock_dq_uninit ( q_gh ) ;
force_refresh = 0 ;
goto restart ;
2006-01-16 19:50:04 +03:00
}
return 0 ;
2006-09-04 20:04:26 +04:00
fail_gunlock :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( & i_gh ) ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
gfs2_glock_dq_uninit ( q_gh ) ;
return error ;
}
2006-09-04 20:49:07 +04:00
int gfs2_quota_lock ( struct gfs2_inode * ip , u32 uid , u32 gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2011-03-08 18:40:42 +03:00
struct gfs2_quota_data * qd ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error = 0 ;
2011-09-19 13:25:49 +04:00
error = gfs2_quota_hold ( ip , uid , gid ) ;
if ( error )
return error ;
2006-01-16 19:50:04 +03:00
if ( capable ( CAP_SYS_RESOURCE ) | |
sdp - > sd_args . ar_quota ! = GFS2_QUOTA_ON )
return 0 ;
2012-05-18 17:28:23 +04:00
sort ( ip - > i_res - > rs_qa_qd , ip - > i_res - > rs_qa_qd_num ,
sizeof ( struct gfs2_quota_data * ) , sort_qd , NULL ) ;
2006-01-16 19:50:04 +03:00
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
2011-03-08 18:40:42 +03:00
int force = NO_FORCE ;
2012-05-18 17:28:23 +04:00
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2011-03-08 18:40:42 +03:00
if ( test_and_clear_bit ( QDF_REFRESH , & qd - > qd_flags ) )
force = FORCE ;
2012-05-18 17:28:23 +04:00
error = do_glock ( qd , force , & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2006-01-16 19:50:04 +03:00
if ( error )
break ;
}
if ( ! error )
set_bit ( GIF_QD_LOCKED , & ip - > i_flags ) ;
else {
while ( x - - )
2012-05-18 17:28:23 +04:00
gfs2_glock_dq_uninit ( & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2006-01-16 19:50:04 +03:00
gfs2_quota_unhold ( ip ) ;
}
return error ;
}
static int need_sync ( struct gfs2_quota_data * qd )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
struct gfs2_tune * gt = & sdp - > sd_tune ;
2006-09-04 20:49:07 +04:00
s64 value ;
2006-01-16 19:50:04 +03:00
unsigned int num , den ;
int do_sync = 1 ;
if ( ! qd - > qd_qb . qb_limit )
return 0 ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
value = qd - > qd_change ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
spin_lock ( & gt - > gt_spin ) ;
num = gt - > gt_quota_scale_num ;
den = gt - > gt_quota_scale_den ;
spin_unlock ( & gt - > gt_spin ) ;
if ( value < 0 )
do_sync = 0 ;
2006-09-01 19:05:15 +04:00
else if ( ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) > =
( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) )
2006-01-16 19:50:04 +03:00
do_sync = 0 ;
else {
value * = gfs2_jindex_size ( sdp ) * num ;
2008-07-11 17:39:56 +04:00
value = div_s64 ( value , den ) ;
2006-09-01 19:05:15 +04:00
value + = ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) ;
2006-09-04 20:49:07 +04:00
if ( value < ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) )
2006-01-16 19:50:04 +03:00
do_sync = 0 ;
}
return do_sync ;
}
void gfs2_quota_unlock ( struct gfs2_inode * ip )
{
struct gfs2_quota_data * qda [ 4 ] ;
unsigned int count = 0 ;
unsigned int x ;
if ( ! test_and_clear_bit ( GIF_QD_LOCKED , & ip - > i_flags ) )
goto out ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qd ;
int sync ;
2012-05-18 17:28:23 +04:00
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
sync = need_sync ( qd ) ;
2012-05-18 17:28:23 +04:00
gfs2_glock_dq_uninit ( & ip - > i_res - > rs_qa_qd_ghs [ x ] ) ;
2006-01-16 19:50:04 +03:00
if ( sync & & qd_trylock ( qd ) )
qda [ count + + ] = qd ;
}
if ( count ) {
do_sync ( count , qda ) ;
for ( x = 0 ; x < count ; x + + )
qd_unlock ( qda [ x ] ) ;
}
2006-09-04 20:04:26 +04:00
out :
2006-01-16 19:50:04 +03:00
gfs2_quota_unhold ( ip ) ;
}
# define MAX_LINE 256
static int print_message ( struct gfs2_quota_data * qd , char * type )
{
struct gfs2_sbd * sdp = qd - > qd_gl - > gl_sbd ;
2009-09-28 15:49:15 +04:00
printk ( KERN_INFO " GFS2: fsid=%s: quota %s for %s %u \n " ,
2006-07-03 19:20:06 +04:00
sdp - > sd_fsname , type ,
( test_bit ( QDF_USER , & qd - > qd_flags ) ) ? " user " : " group " ,
qd - > qd_id ) ;
2006-01-16 19:50:04 +03:00
return 0 ;
}
2006-09-04 20:49:07 +04:00
int gfs2_quota_check ( struct gfs2_inode * ip , u32 uid , u32 gid )
2006-01-16 19:50:04 +03:00
{
2006-06-14 23:32:57 +04:00
struct gfs2_sbd * sdp = GFS2_SB ( & ip - > i_inode ) ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qd ;
2006-09-04 20:49:07 +04:00
s64 value ;
2006-01-16 19:50:04 +03:00
unsigned int x ;
int error = 0 ;
if ( ! test_bit ( GIF_QD_LOCKED , & ip - > i_flags ) )
return 0 ;
if ( sdp - > sd_args . ar_quota ! = GFS2_QUOTA_ON )
return 0 ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
if ( ! ( ( qd - > qd_id = = uid & & test_bit ( QDF_USER , & qd - > qd_flags ) ) | |
( qd - > qd_id = = gid & & ! test_bit ( QDF_USER , & qd - > qd_flags ) ) ) )
continue ;
2006-09-01 19:05:15 +04:00
value = ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_value ) ;
2009-01-08 17:28:42 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
value + = qd - > qd_change ;
2009-01-08 17:28:42 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
2006-09-04 20:49:07 +04:00
if ( be64_to_cpu ( qd - > qd_qb . qb_limit ) & & ( s64 ) be64_to_cpu ( qd - > qd_qb . qb_limit ) < value ) {
2006-01-16 19:50:04 +03:00
print_message ( qd , " exceeded " ) ;
2009-09-28 15:49:15 +04:00
quota_send_warning ( test_bit ( QDF_USER , & qd - > qd_flags ) ?
USRQUOTA : GRPQUOTA , qd - > qd_id ,
sdp - > sd_vfs - > s_dev , QUOTA_NL_BHARDWARN ) ;
2006-01-16 19:50:04 +03:00
error = - EDQUOT ;
break ;
2006-09-01 19:05:15 +04:00
} else if ( be64_to_cpu ( qd - > qd_qb . qb_warn ) & &
2006-09-04 20:49:07 +04:00
( s64 ) be64_to_cpu ( qd - > qd_qb . qb_warn ) < value & &
2006-01-16 19:50:04 +03:00
time_after_eq ( jiffies , qd - > qd_last_warn +
2006-02-27 20:00:42 +03:00
gfs2_tune_get ( sdp ,
gt_quota_warn_period ) * HZ ) ) {
2009-09-28 15:49:15 +04:00
quota_send_warning ( test_bit ( QDF_USER , & qd - > qd_flags ) ?
USRQUOTA : GRPQUOTA , qd - > qd_id ,
sdp - > sd_vfs - > s_dev , QUOTA_NL_BSOFTWARN ) ;
2006-01-16 19:50:04 +03:00
error = print_message ( qd , " warning " ) ;
qd - > qd_last_warn = jiffies ;
}
}
return error ;
}
2006-09-04 20:49:07 +04:00
void gfs2_quota_change ( struct gfs2_inode * ip , s64 change ,
u32 uid , u32 gid )
2006-01-16 19:50:04 +03:00
{
struct gfs2_quota_data * qd ;
unsigned int x ;
2006-06-14 23:32:57 +04:00
if ( gfs2_assert_warn ( GFS2_SB ( & ip - > i_inode ) , change ) )
2006-01-16 19:50:04 +03:00
return ;
2008-11-04 13:05:22 +03:00
if ( ip - > i_diskflags & GFS2_DIF_SYSTEM )
2006-01-16 19:50:04 +03:00
return ;
2012-05-18 17:28:23 +04:00
for ( x = 0 ; x < ip - > i_res - > rs_qa_qd_num ; x + + ) {
qd = ip - > i_res - > rs_qa_qd [ x ] ;
2006-01-16 19:50:04 +03:00
if ( ( qd - > qd_id = = uid & & test_bit ( QDF_USER , & qd - > qd_flags ) ) | |
( qd - > qd_id = = gid & & ! test_bit ( QDF_USER , & qd - > qd_flags ) ) ) {
do_qc ( qd , change ) ;
}
}
}
2012-07-03 18:45:28 +04:00
int gfs2_quota_sync ( struct super_block * sb , int type )
2006-01-16 19:50:04 +03:00
{
2009-09-11 17:36:44 +04:00
struct gfs2_sbd * sdp = sb - > s_fs_info ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * * qda ;
unsigned int max_qd = gfs2_tune_get ( sdp , gt_quota_simul_sync ) ;
unsigned int num_qd ;
unsigned int x ;
int error = 0 ;
sdp - > sd_quota_sync_gen + + ;
qda = kcalloc ( max_qd , sizeof ( struct gfs2_quota_data * ) , GFP_KERNEL ) ;
if ( ! qda )
return - ENOMEM ;
do {
num_qd = 0 ;
for ( ; ; ) {
error = qd_fish ( sdp , qda + num_qd ) ;
if ( error | | ! qda [ num_qd ] )
break ;
if ( + + num_qd = = max_qd )
break ;
}
if ( num_qd ) {
if ( ! error )
error = do_sync ( num_qd , qda ) ;
if ( ! error )
for ( x = 0 ; x < num_qd ; x + + )
qda [ x ] - > qd_sync_gen =
sdp - > sd_quota_sync_gen ;
for ( x = 0 ; x < num_qd ; x + + )
qd_unlock ( qda [ x ] ) ;
}
} while ( ! error & & num_qd = = max_qd ) ;
kfree ( qda ) ;
return error ;
}
2010-02-16 11:44:52 +03:00
static int gfs2_quota_sync_timeo ( struct super_block * sb , int type )
{
2012-07-03 18:45:28 +04:00
return gfs2_quota_sync ( sb , type ) ;
2010-02-16 11:44:52 +03:00
}
2006-09-04 20:49:07 +04:00
int gfs2_quota_refresh ( struct gfs2_sbd * sdp , int user , u32 id )
2006-01-16 19:50:04 +03:00
{
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh ;
int error ;
2009-09-15 19:30:38 +04:00
error = qd_get ( sdp , user , id , & qd ) ;
2006-01-16 19:50:04 +03:00
if ( error )
return error ;
error = do_glock ( qd , FORCE , & q_gh ) ;
if ( ! error )
gfs2_glock_dq_uninit ( & q_gh ) ;
qd_put ( qd ) ;
return error ;
}
2007-06-01 17:11:58 +04:00
static void gfs2_quota_change_in ( struct gfs2_quota_change_host * qc , const void * buf )
{
const struct gfs2_quota_change * str = buf ;
qc - > qc_change = be64_to_cpu ( str - > qc_change ) ;
qc - > qc_flags = be32_to_cpu ( str - > qc_flags ) ;
qc - > qc_id = be32_to_cpu ( str - > qc_id ) ;
}
2006-01-16 19:50:04 +03:00
int gfs2_quota_init ( struct gfs2_sbd * sdp )
{
2006-06-14 23:32:57 +04:00
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_qc_inode ) ;
2010-08-11 12:53:11 +04:00
u64 size = i_size_read ( sdp - > sd_qc_inode ) ;
unsigned int blocks = size > > sdp - > sd_sb . sb_bsize_shift ;
2006-01-16 19:50:04 +03:00
unsigned int x , slot = 0 ;
unsigned int found = 0 ;
2006-09-04 20:49:07 +04:00
u64 dblock ;
u32 extlen = 0 ;
2006-01-16 19:50:04 +03:00
int error ;
2010-08-11 12:53:11 +04:00
if ( gfs2_check_internal_file_size ( sdp - > sd_qc_inode , 1 , 64 < < 20 ) )
2006-09-25 17:26:04 +04:00
return - EIO ;
2010-08-11 12:53:11 +04:00
2006-01-16 19:50:04 +03:00
sdp - > sd_quota_slots = blocks * sdp - > sd_qc_per_block ;
2006-02-28 01:23:27 +03:00
sdp - > sd_quota_chunks = DIV_ROUND_UP ( sdp - > sd_quota_slots , 8 * PAGE_SIZE ) ;
2006-01-16 19:50:04 +03:00
error = - ENOMEM ;
sdp - > sd_quota_bitmap = kcalloc ( sdp - > sd_quota_chunks ,
2008-04-09 17:33:41 +04:00
sizeof ( unsigned char * ) , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! sdp - > sd_quota_bitmap )
return error ;
for ( x = 0 ; x < sdp - > sd_quota_chunks ; x + + ) {
2008-04-09 17:33:41 +04:00
sdp - > sd_quota_bitmap [ x ] = kzalloc ( PAGE_SIZE , GFP_NOFS ) ;
2006-01-16 19:50:04 +03:00
if ( ! sdp - > sd_quota_bitmap [ x ] )
goto fail ;
}
for ( x = 0 ; x < blocks ; x + + ) {
struct buffer_head * bh ;
unsigned int y ;
if ( ! extlen ) {
int new = 0 ;
2006-06-14 23:32:57 +04:00
error = gfs2_extent_map ( & ip - > i_inode , x , & new , & dblock , & extlen ) ;
2006-01-16 19:50:04 +03:00
if ( error )
goto fail ;
}
error = - EIO ;
2006-09-22 01:05:23 +04:00
bh = gfs2_meta_ra ( ip - > i_gl , dblock , extlen ) ;
if ( ! bh )
goto fail ;
2006-01-16 19:50:04 +03:00
if ( gfs2_metatype_check ( sdp , bh , GFS2_METATYPE_QC ) ) {
brelse ( bh ) ;
goto fail ;
}
2006-09-22 01:05:23 +04:00
for ( y = 0 ; y < sdp - > sd_qc_per_block & & slot < sdp - > sd_quota_slots ;
2006-01-16 19:50:04 +03:00
y + + , slot + + ) {
2006-10-14 07:46:46 +04:00
struct gfs2_quota_change_host qc ;
2006-01-16 19:50:04 +03:00
struct gfs2_quota_data * qd ;
gfs2_quota_change_in ( & qc , bh - > b_data +
sizeof ( struct gfs2_meta_header ) +
y * sizeof ( struct gfs2_quota_change ) ) ;
if ( ! qc . qc_change )
continue ;
error = qd_alloc ( sdp , ( qc . qc_flags & GFS2_QCF_USER ) ,
qc . qc_id , & qd ) ;
if ( error ) {
brelse ( bh ) ;
goto fail ;
}
set_bit ( QDF_CHANGE , & qd - > qd_flags ) ;
qd - > qd_change = qc . qc_change ;
qd - > qd_slot = slot ;
qd - > qd_slot_count = 1 ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_icbit_munge ( sdp , sdp - > sd_quota_bitmap , slot , 1 ) ;
list_add ( & qd - > qd_list , & sdp - > sd_quota_list ) ;
atomic_inc ( & sdp - > sd_quota_count ) ;
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
found + + ;
}
brelse ( bh ) ;
dblock + + ;
extlen - - ;
}
if ( found )
fs_info ( sdp , " found %u quota changes \n " , found ) ;
return 0 ;
2006-09-04 20:04:26 +04:00
fail :
2006-01-16 19:50:04 +03:00
gfs2_quota_cleanup ( sdp ) ;
return error ;
}
void gfs2_quota_cleanup ( struct gfs2_sbd * sdp )
{
struct list_head * head = & sdp - > sd_quota_list ;
struct gfs2_quota_data * qd ;
unsigned int x ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
while ( ! list_empty ( head ) ) {
qd = list_entry ( head - > prev , struct gfs2_quota_data , qd_list ) ;
2009-01-08 01:03:37 +03:00
if ( atomic_read ( & qd - > qd_count ) > 1 | |
( atomic_read ( & qd - > qd_count ) & &
! test_bit ( QDF_CHANGE , & qd - > qd_flags ) ) ) {
list_move ( & qd - > qd_list , head ) ;
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
schedule ( ) ;
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
continue ;
}
list_del ( & qd - > qd_list ) ;
2009-01-08 01:03:37 +03:00
/* Also remove if this qd exists in the reclaim list */
if ( ! list_empty ( & qd - > qd_reclaim ) ) {
list_del_init ( & qd - > qd_reclaim ) ;
atomic_dec ( & qd_lru_count ) ;
}
2006-01-16 19:50:04 +03:00
atomic_dec ( & sdp - > sd_quota_count ) ;
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
2009-01-08 01:03:37 +03:00
if ( ! atomic_read ( & qd - > qd_count ) ) {
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! qd - > qd_change ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_slot_count ) ;
} else
gfs2_assert_warn ( sdp , qd - > qd_slot_count = = 1 ) ;
gfs2_assert_warn ( sdp , ! qd - > qd_bh_count ) ;
2009-01-12 13:43:39 +03:00
gfs2_glock_put ( qd - > qd_gl ) ;
2008-11-17 17:25:37 +03:00
kmem_cache_free ( gfs2_quotad_cachep , qd ) ;
2006-01-16 19:50:04 +03:00
2009-01-08 01:03:37 +03:00
spin_lock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
}
2009-01-08 01:03:37 +03:00
spin_unlock ( & qd_lru_lock ) ;
2006-01-16 19:50:04 +03:00
gfs2_assert_warn ( sdp , ! atomic_read ( & sdp - > sd_quota_count ) ) ;
if ( sdp - > sd_quota_bitmap ) {
for ( x = 0 ; x < sdp - > sd_quota_chunks ; x + + )
kfree ( sdp - > sd_quota_bitmap [ x ] ) ;
kfree ( sdp - > sd_quota_bitmap ) ;
}
}
2008-11-17 17:25:37 +03:00
static void quotad_error ( struct gfs2_sbd * sdp , const char * msg , int error )
{
if ( error = = 0 | | error = = - EROFS )
return ;
if ( ! test_bit ( SDF_SHUTDOWN , & sdp - > sd_flags ) )
fs_err ( sdp , " gfs2_quotad: %s error %d \n " , msg , error ) ;
}
static void quotad_check_timeo ( struct gfs2_sbd * sdp , const char * msg ,
2009-09-11 17:36:44 +04:00
int ( * fxn ) ( struct super_block * sb , int type ) ,
2008-11-17 17:25:37 +03:00
unsigned long t , unsigned long * timeo ,
unsigned int * new_timeo )
{
if ( t > = * timeo ) {
2009-09-11 17:36:44 +04:00
int error = fxn ( sdp - > sd_vfs , 0 ) ;
2008-11-17 17:25:37 +03:00
quotad_error ( sdp , msg , error ) ;
* timeo = gfs2_tune_get_i ( & sdp - > sd_tune , new_timeo ) * HZ ;
} else {
* timeo - = t ;
}
}
2008-11-18 16:38:48 +03:00
static void quotad_check_trunc_list ( struct gfs2_sbd * sdp )
{
struct gfs2_inode * ip ;
while ( 1 ) {
ip = NULL ;
spin_lock ( & sdp - > sd_trunc_lock ) ;
if ( ! list_empty ( & sdp - > sd_trunc_list ) ) {
ip = list_entry ( sdp - > sd_trunc_list . next ,
struct gfs2_inode , i_trunc_list ) ;
list_del_init ( & ip - > i_trunc_list ) ;
}
spin_unlock ( & sdp - > sd_trunc_lock ) ;
if ( ip = = NULL )
return ;
gfs2_glock_finish_truncate ( ip ) ;
}
}
2009-10-20 11:39:44 +04:00
void gfs2_wake_up_statfs ( struct gfs2_sbd * sdp ) {
if ( ! sdp - > sd_statfs_force_sync ) {
sdp - > sd_statfs_force_sync = 1 ;
wake_up ( & sdp - > sd_quota_wait ) ;
}
}
2008-11-17 17:25:37 +03:00
/**
* gfs2_quotad - Write cached quota changes into the quota file
* @ sdp : Pointer to GFS2 superblock
*
*/
int gfs2_quotad ( void * data )
{
struct gfs2_sbd * sdp = data ;
struct gfs2_tune * tune = & sdp - > sd_tune ;
unsigned long statfs_timeo = 0 ;
unsigned long quotad_timeo = 0 ;
unsigned long t = 0 ;
DEFINE_WAIT ( wait ) ;
2008-11-18 16:38:48 +03:00
int empty ;
2008-11-17 17:25:37 +03:00
while ( ! kthread_should_stop ( ) ) {
/* Update the master statfs file */
2009-10-20 11:39:44 +04:00
if ( sdp - > sd_statfs_force_sync ) {
int error = gfs2_statfs_sync ( sdp - > sd_vfs , 0 ) ;
quotad_error ( sdp , " statfs " , error ) ;
statfs_timeo = gfs2_tune_get ( sdp , gt_statfs_quantum ) * HZ ;
}
else
quotad_check_timeo ( sdp , " statfs " , gfs2_statfs_sync , t ,
& statfs_timeo ,
& tune - > gt_statfs_quantum ) ;
2008-11-17 17:25:37 +03:00
/* Update quota file */
2010-02-16 11:44:52 +03:00
quotad_check_timeo ( sdp , " sync " , gfs2_quota_sync_timeo , t ,
2008-11-17 17:25:37 +03:00
& quotad_timeo , & tune - > gt_quota_quantum ) ;
2008-11-18 16:38:48 +03:00
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list ( sdp ) ;
2011-11-22 00:32:22 +04:00
try_to_freeze ( ) ;
2008-11-17 17:25:37 +03:00
t = min ( quotad_timeo , statfs_timeo ) ;
2009-03-31 18:49:08 +04:00
prepare_to_wait ( & sdp - > sd_quota_wait , & wait , TASK_INTERRUPTIBLE ) ;
2008-11-18 16:38:48 +03:00
spin_lock ( & sdp - > sd_trunc_lock ) ;
empty = list_empty ( & sdp - > sd_trunc_list ) ;
spin_unlock ( & sdp - > sd_trunc_lock ) ;
2009-10-20 11:39:44 +04:00
if ( empty & & ! sdp - > sd_statfs_force_sync )
2008-11-18 16:38:48 +03:00
t - = schedule_timeout ( t ) ;
else
t = 0 ;
2008-11-17 17:25:37 +03:00
finish_wait ( & sdp - > sd_quota_wait , & wait ) ;
}
return 0 ;
}
2009-09-11 18:57:27 +04:00
static int gfs2_quota_get_xstate ( struct super_block * sb ,
struct fs_quota_stat * fqs )
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
memset ( fqs , 0 , sizeof ( struct fs_quota_stat ) ) ;
fqs - > qs_version = FS_QSTAT_VERSION ;
2010-05-05 02:10:56 +04:00
switch ( sdp - > sd_args . ar_quota ) {
case GFS2_QUOTA_ON :
2010-06-04 12:56:01 +04:00
fqs - > qs_flags | = ( FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD ) ;
2010-05-05 02:10:56 +04:00
/*FALLTHRU*/
case GFS2_QUOTA_ACCOUNT :
2010-06-04 12:56:01 +04:00
fqs - > qs_flags | = ( FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT ) ;
2010-05-05 02:10:56 +04:00
break ;
case GFS2_QUOTA_OFF :
break ;
}
2009-09-11 18:57:27 +04:00
if ( sdp - > sd_quota_inode ) {
fqs - > qs_uquota . qfs_ino = GFS2_I ( sdp - > sd_quota_inode ) - > i_no_addr ;
fqs - > qs_uquota . qfs_nblks = sdp - > sd_quota_inode - > i_blocks ;
}
fqs - > qs_uquota . qfs_nextents = 1 ; /* unsupported */
fqs - > qs_gquota = fqs - > qs_uquota ; /* its the same inode in both cases */
fqs - > qs_incoredqs = atomic_read ( & qd_lru_count ) ;
return 0 ;
}
2012-09-16 13:07:49 +04:00
static int gfs2_get_dqblk ( struct super_block * sb , struct kqid qid ,
2010-05-07 01:04:58 +04:00
struct fs_disk_quota * fdq )
2009-09-28 14:52:16 +04:00
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
struct gfs2_quota_lvb * qlvb ;
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh ;
int error ;
2012-09-16 13:07:49 +04:00
int type ;
2009-09-28 14:52:16 +04:00
memset ( fdq , 0 , sizeof ( struct fs_disk_quota ) ) ;
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return - ESRCH ; /* Crazy XFS error code */
2012-09-16 13:07:49 +04:00
if ( qid . type = = USRQUOTA )
2009-09-28 14:52:16 +04:00
type = QUOTA_USER ;
2012-09-16 13:07:49 +04:00
else if ( qid . type = = GRPQUOTA )
2009-09-28 14:52:16 +04:00
type = QUOTA_GROUP ;
else
return - EINVAL ;
2012-09-16 13:07:49 +04:00
error = qd_get ( sdp , type , from_kqid ( & init_user_ns , qid ) , & qd ) ;
2009-09-28 14:52:16 +04:00
if ( error )
return error ;
error = do_glock ( qd , FORCE , & q_gh ) ;
if ( error )
goto out ;
qlvb = ( struct gfs2_quota_lvb * ) qd - > qd_gl - > gl_lvb ;
fdq - > d_version = FS_DQUOT_VERSION ;
2010-06-04 12:56:01 +04:00
fdq - > d_flags = ( type = = QUOTA_USER ) ? FS_USER_QUOTA : FS_GROUP_QUOTA ;
2012-09-16 13:07:49 +04:00
fdq - > d_id = from_kqid ( & init_user_ns , qid ) ;
2010-11-18 19:24:24 +03:00
fdq - > d_blk_hardlimit = be64_to_cpu ( qlvb - > qb_limit ) < < sdp - > sd_fsb2bb_shift ;
fdq - > d_blk_softlimit = be64_to_cpu ( qlvb - > qb_warn ) < < sdp - > sd_fsb2bb_shift ;
fdq - > d_bcount = be64_to_cpu ( qlvb - > qb_value ) < < sdp - > sd_fsb2bb_shift ;
2009-09-28 14:52:16 +04:00
gfs2_glock_dq_uninit ( & q_gh ) ;
out :
qd_put ( qd ) ;
return error ;
}
2009-09-23 16:50:49 +04:00
/* GFS2 only supports a subset of the XFS fields */
2010-11-18 19:26:46 +03:00
# define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
2009-09-23 16:50:49 +04:00
2012-09-16 13:07:49 +04:00
static int gfs2_set_dqblk ( struct super_block * sb , struct kqid qid ,
2010-05-07 01:05:17 +04:00
struct fs_disk_quota * fdq )
2009-09-23 16:50:49 +04:00
{
struct gfs2_sbd * sdp = sb - > s_fs_info ;
struct gfs2_inode * ip = GFS2_I ( sdp - > sd_quota_inode ) ;
struct gfs2_quota_data * qd ;
struct gfs2_holder q_gh , i_gh ;
unsigned int data_blocks , ind_blocks ;
unsigned int blocks = 0 ;
int alloc_required ;
loff_t offset ;
int error ;
2012-09-16 13:07:49 +04:00
int type ;
2009-09-23 16:50:49 +04:00
if ( sdp - > sd_args . ar_quota = = GFS2_QUOTA_OFF )
return - ESRCH ; /* Crazy XFS error code */
2012-09-16 13:07:49 +04:00
switch ( qid . type ) {
2009-09-23 16:50:49 +04:00
case USRQUOTA :
type = QUOTA_USER ;
2010-06-04 12:56:01 +04:00
if ( fdq - > d_flags ! = FS_USER_QUOTA )
2009-09-23 16:50:49 +04:00
return - EINVAL ;
break ;
case GRPQUOTA :
type = QUOTA_GROUP ;
2010-06-04 12:56:01 +04:00
if ( fdq - > d_flags ! = FS_GROUP_QUOTA )
2009-09-23 16:50:49 +04:00
return - EINVAL ;
break ;
default :
return - EINVAL ;
}
if ( fdq - > d_fieldmask & ~ GFS2_FIELDMASK )
return - EINVAL ;
2012-09-16 13:07:49 +04:00
if ( fdq - > d_id ! = from_kqid ( & init_user_ns , qid ) )
2009-09-23 16:50:49 +04:00
return - EINVAL ;
2012-09-16 13:07:49 +04:00
error = qd_get ( sdp , type , from_kqid ( & init_user_ns , qid ) , & qd ) ;
2009-09-23 16:50:49 +04:00
if ( error )
return error ;
2012-06-06 14:17:59 +04:00
error = gfs2_rs_alloc ( ip ) ;
if ( error )
goto out_put ;
2009-09-23 16:50:49 +04:00
mutex_lock ( & ip - > i_inode . i_mutex ) ;
error = gfs2_glock_nq_init ( qd - > qd_gl , LM_ST_EXCLUSIVE , 0 , & q_gh ) ;
if ( error )
2012-06-06 14:17:59 +04:00
goto out_unlockput ;
2009-09-23 16:50:49 +04:00
error = gfs2_glock_nq_init ( ip - > i_gl , LM_ST_EXCLUSIVE , 0 , & i_gh ) ;
if ( error )
goto out_q ;
/* Check for existing entry, if none then alloc new blocks */
error = update_qd ( sdp , qd ) ;
if ( error )
goto out_i ;
/* If nothing has changed, this is a no-op */
if ( ( fdq - > d_fieldmask & FS_DQ_BSOFT ) & &
2010-11-18 19:24:24 +03:00
( ( fdq - > d_blk_softlimit > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_warn ) ) )
2009-09-23 16:50:49 +04:00
fdq - > d_fieldmask ^ = FS_DQ_BSOFT ;
2010-11-18 19:26:46 +03:00
2009-09-23 16:50:49 +04:00
if ( ( fdq - > d_fieldmask & FS_DQ_BHARD ) & &
2010-11-18 19:24:24 +03:00
( ( fdq - > d_blk_hardlimit > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_limit ) ) )
2009-09-23 16:50:49 +04:00
fdq - > d_fieldmask ^ = FS_DQ_BHARD ;
2010-11-18 19:26:46 +03:00
if ( ( fdq - > d_fieldmask & FS_DQ_BCOUNT ) & &
( ( fdq - > d_bcount > > sdp - > sd_fsb2bb_shift ) = = be64_to_cpu ( qd - > qd_qb . qb_value ) ) )
fdq - > d_fieldmask ^ = FS_DQ_BCOUNT ;
2009-09-23 16:50:49 +04:00
if ( fdq - > d_fieldmask = = 0 )
goto out_i ;
offset = qd2offset ( qd ) ;
2010-06-25 03:21:20 +04:00
alloc_required = gfs2_write_alloc_required ( ip , offset , sizeof ( struct gfs2_quota ) ) ;
2011-02-07 19:22:41 +03:00
if ( gfs2_is_stuffed ( ip ) )
alloc_required = 1 ;
2009-09-23 16:50:49 +04:00
if ( alloc_required ) {
gfs2_write_calc_reserv ( ip , sizeof ( struct gfs2_quota ) ,
& data_blocks , & ind_blocks ) ;
2011-11-21 22:36:17 +04:00
blocks = 1 + data_blocks + ind_blocks ;
error = gfs2_inplace_reserve ( ip , blocks ) ;
2009-09-23 16:50:49 +04:00
if ( error )
2011-11-21 22:36:17 +04:00
goto out_i ;
2011-09-01 16:31:59 +04:00
blocks + = gfs2_rg_blocks ( ip ) ;
2009-09-23 16:50:49 +04:00
}
2011-02-07 19:22:41 +03:00
/* Some quotas span block boundaries and can update two blocks,
adding an extra block to the transaction to handle such quotas */
error = gfs2_trans_begin ( sdp , blocks + RES_DINODE + 2 , 0 ) ;
2009-09-23 16:50:49 +04:00
if ( error )
goto out_release ;
/* Apply changes */
error = gfs2_adjust_quota ( ip , offset , 0 , qd , fdq ) ;
gfs2_trans_end ( sdp ) ;
out_release :
2011-11-21 22:36:17 +04:00
if ( alloc_required )
2009-09-23 16:50:49 +04:00
gfs2_inplace_release ( ip ) ;
out_i :
gfs2_glock_dq_uninit ( & i_gh ) ;
out_q :
gfs2_glock_dq_uninit ( & q_gh ) ;
2012-06-06 14:17:59 +04:00
out_unlockput :
2009-09-23 16:50:49 +04:00
mutex_unlock ( & ip - > i_inode . i_mutex ) ;
2012-06-06 14:17:59 +04:00
out_put :
2009-09-23 16:50:49 +04:00
qd_put ( qd ) ;
return error ;
}
2009-09-15 12:59:02 +04:00
const struct quotactl_ops gfs2_quotactl_ops = {
. quota_sync = gfs2_quota_sync ,
2009-09-11 18:57:27 +04:00
. get_xstate = gfs2_quota_get_xstate ,
2010-05-07 01:04:58 +04:00
. get_dqblk = gfs2_get_dqblk ,
2010-05-07 01:05:17 +04:00
. set_dqblk = gfs2_set_dqblk ,
2009-09-15 12:59:02 +04:00
} ;