2009-05-22 01:01:26 +04:00
/*
* Copyright ( C ) 2008 Red Hat , Inc . , Eric Paris < eparis @ redhat . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
# include <linux/fs.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/mutex.h>
# include <linux/spinlock.h>
# include <asm/atomic.h>
# include <linux/fsnotify_backend.h>
# include "fsnotify.h"
2011-03-22 14:23:40 +03:00
# include "../internal.h"
2009-05-22 01:01:26 +04:00
/*
* Recalculate the mask of events relevant to a given inode locked .
*/
static void fsnotify_recalc_inode_mask_locked ( struct inode * inode )
{
2009-12-18 05:24:24 +03:00
struct fsnotify_mark * mark ;
2009-05-22 01:01:26 +04:00
struct hlist_node * pos ;
__u32 new_mask = 0 ;
assert_spin_locked ( & inode - > i_lock ) ;
2009-12-18 05:24:24 +03:00
hlist_for_each_entry ( mark , pos , & inode - > i_fsnotify_marks , i . i_list )
new_mask | = mark - > mask ;
2009-05-22 01:01:26 +04:00
inode - > i_fsnotify_mask = new_mask ;
}
/*
* Recalculate the inode - > i_fsnotify_mask , or the mask of all FS_ * event types
* any notifier is interested in hearing for this inode .
*/
void fsnotify_recalc_inode_mask ( struct inode * inode )
{
spin_lock ( & inode - > i_lock ) ;
fsnotify_recalc_inode_mask_locked ( inode ) ;
spin_unlock ( & inode - > i_lock ) ;
2009-05-22 01:01:29 +04:00
__fsnotify_update_child_dentry_flags ( inode ) ;
2009-05-22 01:01:26 +04:00
}
2009-12-18 05:24:27 +03:00
void fsnotify_destroy_inode_mark ( struct fsnotify_mark * mark )
2009-05-22 01:01:26 +04:00
{
2009-12-18 05:24:27 +03:00
struct inode * inode = mark - > i . inode ;
2009-05-22 01:01:26 +04:00
2009-12-18 05:24:27 +03:00
assert_spin_locked ( & mark - > lock ) ;
assert_spin_locked ( & mark - > group - > mark_lock ) ;
2009-05-22 01:01:26 +04:00
spin_lock ( & inode - > i_lock ) ;
2010-07-28 18:18:38 +04:00
hlist_del_init_rcu ( & mark - > i . i_list ) ;
2009-12-18 05:24:24 +03:00
mark - > i . inode = NULL ;
2009-05-22 01:01:26 +04:00
/*
2009-12-18 05:24:24 +03:00
* this mark is now off the inode - > i_fsnotify_marks list and we
2009-05-22 01:01:26 +04:00
* hold the inode - > i_lock , so this is the perfect time to update the
* inode - > i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked ( inode ) ;
spin_unlock ( & inode - > i_lock ) ;
}
/*
* Given an inode , destroy all of the marks associated with that inode .
*/
void fsnotify_clear_marks_by_inode ( struct inode * inode )
{
2009-12-18 05:24:24 +03:00
struct fsnotify_mark * mark , * lmark ;
2009-05-22 01:01:26 +04:00
struct hlist_node * pos , * n ;
LIST_HEAD ( free_list ) ;
spin_lock ( & inode - > i_lock ) ;
2009-12-18 05:24:24 +03:00
hlist_for_each_entry_safe ( mark , pos , n , & inode - > i_fsnotify_marks , i . i_list ) {
list_add ( & mark - > i . free_i_list , & free_list ) ;
2010-07-28 18:18:38 +04:00
hlist_del_init_rcu ( & mark - > i . i_list ) ;
2009-12-18 05:24:24 +03:00
fsnotify_get_mark ( mark ) ;
2009-05-22 01:01:26 +04:00
}
spin_unlock ( & inode - > i_lock ) ;
2009-12-18 05:24:24 +03:00
list_for_each_entry_safe ( mark , lmark , & free_list , i . free_i_list ) {
fsnotify_destroy_mark ( mark ) ;
fsnotify_put_mark ( mark ) ;
2009-05-22 01:01:26 +04:00
}
}
2009-12-18 05:24:34 +03:00
/*
* Given a group clear all of the inode marks associated with that group .
*/
void fsnotify_clear_inode_marks_by_group ( struct fsnotify_group * group )
{
fsnotify_clear_marks_by_group_flags ( group , FSNOTIFY_MARK_FLAG_INODE ) ;
}
2009-05-22 01:01:26 +04:00
/*
* given a group and inode , find the mark associated with that combination .
* if found take a reference to that mark and return it , else return NULL
*/
2009-12-18 05:24:27 +03:00
struct fsnotify_mark * fsnotify_find_inode_mark_locked ( struct fsnotify_group * group ,
struct inode * inode )
2009-05-22 01:01:26 +04:00
{
2009-12-18 05:24:24 +03:00
struct fsnotify_mark * mark ;
2009-05-22 01:01:26 +04:00
struct hlist_node * pos ;
assert_spin_locked ( & inode - > i_lock ) ;
2009-12-18 05:24:24 +03:00
hlist_for_each_entry ( mark , pos , & inode - > i_fsnotify_marks , i . i_list ) {
if ( mark - > group = = group ) {
fsnotify_get_mark ( mark ) ;
return mark ;
2009-05-22 01:01:26 +04:00
}
}
return NULL ;
}
/*
2009-12-18 05:24:25 +03:00
* given a group and inode , find the mark associated with that combination .
* if found take a reference to that mark and return it , else return NULL
2009-05-22 01:01:26 +04:00
*/
2009-12-18 05:24:27 +03:00
struct fsnotify_mark * fsnotify_find_inode_mark ( struct fsnotify_group * group ,
struct inode * inode )
2009-12-18 05:24:25 +03:00
{
struct fsnotify_mark * mark ;
spin_lock ( & inode - > i_lock ) ;
2009-12-18 05:24:27 +03:00
mark = fsnotify_find_inode_mark_locked ( group , inode ) ;
2009-12-18 05:24:25 +03:00
spin_unlock ( & inode - > i_lock ) ;
return mark ;
}
2009-05-22 01:01:26 +04:00
/*
2009-12-18 05:24:33 +03:00
* If we are setting a mark mask on an inode mark we should pin the inode
* in memory .
*/
void fsnotify_set_inode_mark_mask_locked ( struct fsnotify_mark * mark ,
__u32 mask )
2009-05-22 01:01:26 +04:00
{
2009-12-18 05:24:33 +03:00
struct inode * inode ;
assert_spin_locked ( & mark - > lock ) ;
if ( mask & &
mark - > i . inode & &
! ( mark - > flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED ) ) {
mark - > flags | = FSNOTIFY_MARK_FLAG_OBJECT_PINNED ;
inode = igrab ( mark - > i . inode ) ;
/*
* we shouldn ' t be able to get here if the inode wasn ' t
* already safely held in memory . But bug in case it
* ever is wrong .
*/
BUG_ON ( ! inode ) ;
}
2009-05-22 01:01:26 +04:00
}
/*
2010-07-28 18:18:38 +04:00
* Attach an initialized mark to a given inode .
2009-05-22 01:01:26 +04:00
* These marks may be used for the fsnotify backend to determine which
2010-07-28 18:18:38 +04:00
* event types should be delivered to which group and for which inodes . These
2010-10-29 01:21:56 +04:00
* marks are ordered according to priority , highest number first , and then by
* the group ' s location in memory .
2009-05-22 01:01:26 +04:00
*/
2009-12-18 05:24:27 +03:00
int fsnotify_add_inode_mark ( struct fsnotify_mark * mark ,
struct fsnotify_group * group , struct inode * inode ,
int allow_dups )
2009-05-22 01:01:26 +04:00
{
2010-07-28 18:18:38 +04:00
struct fsnotify_mark * lmark ;
struct hlist_node * node , * last = NULL ;
2009-05-22 01:01:26 +04:00
int ret = 0 ;
2010-07-28 18:18:38 +04:00
mark - > flags | = FSNOTIFY_MARK_FLAG_INODE ;
2009-12-18 05:24:24 +03:00
2009-12-18 05:24:27 +03:00
assert_spin_locked ( & mark - > lock ) ;
assert_spin_locked ( & group - > mark_lock ) ;
2009-05-22 01:01:54 +04:00
2009-05-22 01:01:26 +04:00
spin_lock ( & inode - > i_lock ) ;
2010-07-28 18:18:38 +04:00
mark - > i . inode = inode ;
2009-09-11 21:03:19 +04:00
2010-07-28 18:18:38 +04:00
/* is mark the first mark? */
if ( hlist_empty ( & inode - > i_fsnotify_marks ) ) {
2010-07-28 18:18:38 +04:00
hlist_add_head_rcu ( & mark - > i . i_list , & inode - > i_fsnotify_marks ) ;
2010-07-28 18:18:38 +04:00
goto out ;
}
2009-05-22 01:01:26 +04:00
2010-07-28 18:18:38 +04:00
/* should mark be in the middle of the current list? */
hlist_for_each_entry ( lmark , node , & inode - > i_fsnotify_marks , i . i_list ) {
last = node ;
2009-05-22 01:01:26 +04:00
2010-07-28 18:18:38 +04:00
if ( ( lmark - > group = = group ) & & ! allow_dups ) {
ret = - EEXIST ;
goto out ;
}
2009-05-22 01:01:26 +04:00
2010-10-29 01:21:56 +04:00
if ( mark - > group - > priority < lmark - > group - > priority )
continue ;
if ( ( mark - > group - > priority = = lmark - > group - > priority ) & &
( mark - > group < lmark - > group ) )
2010-07-28 18:18:38 +04:00
continue ;
2009-05-22 01:01:26 +04:00
2010-07-28 18:18:38 +04:00
hlist_add_before_rcu ( & mark - > i . i_list , & lmark - > i . i_list ) ;
2010-07-28 18:18:38 +04:00
goto out ;
2009-05-22 01:01:26 +04:00
}
2010-07-28 18:18:38 +04:00
BUG_ON ( last = = NULL ) ;
/* mark should be the last entry. last is the current last entry */
2010-07-28 18:18:38 +04:00
hlist_add_after_rcu ( last , & mark - > i . i_list ) ;
2010-07-28 18:18:38 +04:00
out :
fsnotify_recalc_inode_mask_locked ( inode ) ;
2009-05-22 01:01:26 +04:00
spin_unlock ( & inode - > i_lock ) ;
return ret ;
}
2009-05-22 01:01:58 +04:00
/**
* fsnotify_unmount_inodes - an sb is unmounting . handle any watched inodes .
* @ list : list of inodes being unmounted ( sb - > s_inodes )
*
2011-03-22 14:23:40 +03:00
* Called during unmount with no locks held , so needs to be safe against
* concurrent modifiers . We temporarily drop inode_sb_list_lock and CAN block .
2009-05-22 01:01:58 +04:00
*/
void fsnotify_unmount_inodes ( struct list_head * list )
{
struct inode * inode , * next_i , * need_iput = NULL ;
2011-03-22 14:23:40 +03:00
spin_lock ( & inode_sb_list_lock ) ;
2009-05-22 01:01:58 +04:00
list_for_each_entry_safe ( inode , next_i , list , i_sb_list ) {
struct inode * need_iput_tmp ;
/*
2010-06-03 01:38:30 +04:00
* We cannot __iget ( ) an inode in state I_FREEING ,
2009-05-22 01:01:58 +04:00
* I_WILL_FREE , or I_NEW which is fine because by that point
* the inode cannot have any associated watches .
*/
2011-03-22 14:23:36 +03:00
spin_lock ( & inode - > i_lock ) ;
if ( inode - > i_state & ( I_FREEING | I_WILL_FREE | I_NEW ) ) {
spin_unlock ( & inode - > i_lock ) ;
2009-05-22 01:01:58 +04:00
continue ;
2011-03-22 14:23:36 +03:00
}
2009-05-22 01:01:58 +04:00
/*
* If i_count is zero , the inode cannot have any watches and
* doing an __iget / iput with MS_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do .
*/
2011-03-22 14:23:36 +03:00
if ( ! atomic_read ( & inode - > i_count ) ) {
spin_unlock ( & inode - > i_lock ) ;
2009-05-22 01:01:58 +04:00
continue ;
2011-03-22 14:23:36 +03:00
}
2009-05-22 01:01:58 +04:00
need_iput_tmp = need_iput ;
need_iput = NULL ;
/* In case fsnotify_inode_delete() drops a reference. */
if ( inode ! = need_iput_tmp )
__iget ( inode ) ;
else
need_iput_tmp = NULL ;
2011-03-22 14:23:36 +03:00
spin_unlock ( & inode - > i_lock ) ;
2009-05-22 01:01:58 +04:00
/* In case the dropping of a reference would nuke next_i. */
if ( ( & next_i - > i_sb_list ! = list ) & &
2011-03-22 14:23:36 +03:00
atomic_read ( & next_i - > i_count ) ) {
spin_lock ( & next_i - > i_lock ) ;
if ( ! ( next_i - > i_state & ( I_FREEING | I_WILL_FREE ) ) ) {
__iget ( next_i ) ;
need_iput = next_i ;
}
spin_unlock ( & next_i - > i_lock ) ;
2009-05-22 01:01:58 +04:00
}
/*
2011-03-22 14:23:40 +03:00
* We can safely drop inode_sb_list_lock here because we hold
2009-05-22 01:01:58 +04:00
* references on both inode and next_i . Also no new inodes
2011-03-22 14:23:40 +03:00
* will be added since the umount has begun .
2009-05-22 01:01:58 +04:00
*/
2011-03-22 14:23:40 +03:00
spin_unlock ( & inode_sb_list_lock ) ;
2009-05-22 01:01:58 +04:00
if ( need_iput_tmp )
iput ( need_iput_tmp ) ;
/* for each watch, send FS_UNMOUNT and then remove it */
fsnotify ( inode , FS_UNMOUNT , inode , FSNOTIFY_EVENT_INODE , NULL , 0 ) ;
fsnotify_inode_delete ( inode ) ;
iput ( inode ) ;
2011-03-22 14:23:40 +03:00
spin_lock ( & inode_sb_list_lock ) ;
2009-05-22 01:01:58 +04:00
}
2011-03-22 14:23:40 +03:00
spin_unlock ( & inode_sb_list_lock ) ;
2009-05-22 01:01:58 +04:00
}