2009-05-22 01:01:20 +04:00
/*
* Copyright ( C ) 2008 Red Hat , Inc . , Eric Paris < eparis @ redhat . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 , or ( at your option )
* any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; see the file COPYING . If not , write to
* the Free Software Foundation , 675 Mass Ave , Cambridge , MA 0213 9 , USA .
*/
2009-05-22 01:01:37 +04:00
/*
* Basic idea behind the notification queue : An fsnotify group ( like inotify )
2012-09-20 05:48:02 +04:00
* sends the userspace notification about events asynchronously some time after
2009-05-22 01:01:37 +04:00
* the event happened . When inotify gets an event it will need to add that
* event to the group notify queue . Since a single event might need to be on
* multiple group ' s notification queues we can ' t add the event directly to each
* queue and instead add a small " event_holder " to each queue . This event_holder
* has a pointer back to the original event . Since the majority of events are
* going to end up on one , and only one , notification queue we embed one
* event_holder into each event . This means we have a single allocation instead
* of always needing two . If the embedded event_holder is already in use by
* another group a new event_holder ( from fsnotify_event_holder_cachep ) will be
* allocated and used .
*/
2009-05-22 01:01:20 +04:00
# include <linux/fs.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/list.h>
2009-05-22 01:01:47 +04:00
# include <linux/module.h>
2009-05-22 01:01:20 +04:00
# include <linux/mount.h>
# include <linux/mutex.h>
# include <linux/namei.h>
# include <linux/path.h>
# include <linux/slab.h>
# include <linux/spinlock.h>
2011-07-27 03:09:06 +04:00
# include <linux/atomic.h>
2009-05-22 01:01:20 +04:00
# include <linux/fsnotify_backend.h>
# include "fsnotify.h"
2009-05-22 01:01:47 +04:00
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT ( 0 ) ;
/**
* fsnotify_get_cookie - return a unique cookie for use in synchronizing events .
* Called from fsnotify_move , which is inlined into filesystem modules .
*/
u32 fsnotify_get_cookie ( void )
{
return atomic_inc_return ( & fsnotify_sync_cookie ) ;
}
EXPORT_SYMBOL_GPL ( fsnotify_get_cookie ) ;
2009-05-22 01:01:37 +04:00
/* return true if the notify queue is empty, false otherwise */
bool fsnotify_notify_queue_is_empty ( struct fsnotify_group * group )
{
2016-10-08 02:57:01 +03:00
assert_spin_locked ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
return list_empty ( & group - > notification_list ) ? true : false ;
}
2009-05-22 01:01:20 +04:00
2014-01-22 03:48:14 +04:00
void fsnotify_destroy_event ( struct fsnotify_group * group ,
struct fsnotify_event * event )
2009-05-22 01:01:20 +04:00
{
2014-01-22 03:48:14 +04:00
/* Overflow events are per-group and we don't want to free them */
if ( ! event | | event - > mask = = FS_Q_OVERFLOW )
2009-05-22 01:01:20 +04:00
return ;
2016-10-08 02:56:58 +03:00
/*
* If the event is still queued , we have a problem . . . Do an unreliable
* lockless check first to avoid locking in the common case . The
* locking may be necessary for permission events which got removed
* from the list by a different CPU than the one freeing the event .
*/
if ( ! list_empty ( & event - > list ) ) {
spin_lock ( & group - > notification_lock ) ;
WARN_ON ( ! list_empty ( & event - > list ) ) ;
spin_unlock ( & group - > notification_lock ) ;
}
2014-01-22 03:48:14 +04:00
group - > ops - > free_event ( event ) ;
2009-05-22 01:01:50 +04:00
}
2009-05-22 01:01:20 +04:00
/*
2009-05-22 01:01:37 +04:00
* Add an event to the group notification queue . The group can later pull this
2014-01-28 21:53:22 +04:00
* event off the queue to deal with . The function returns 0 if the event was
2014-02-21 22:07:54 +04:00
* added to the queue , 1 if the event was merged with some other queued event ,
2016-09-20 00:44:27 +03:00
* 2 if the event was not queued - either the queue of events has overflown
* or the group is shutting down .
2009-05-22 01:01:20 +04:00
*/
2014-08-07 03:03:26 +04:00
int fsnotify_add_event ( struct fsnotify_group * group ,
struct fsnotify_event * event ,
int ( * merge ) ( struct list_head * ,
struct fsnotify_event * ) )
2009-05-22 01:01:37 +04:00
{
2014-01-28 21:53:22 +04:00
int ret = 0 ;
2009-05-22 01:01:37 +04:00
struct list_head * list = & group - > notification_list ;
2009-05-22 01:01:50 +04:00
2014-01-22 03:48:14 +04:00
pr_debug ( " %s: group=%p event=%p \n " , __func__ , group , event ) ;
2009-05-22 01:01:37 +04:00
2016-10-08 02:56:52 +03:00
spin_lock ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
2016-09-20 00:44:27 +03:00
if ( group - > shutdown ) {
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2016-09-20 00:44:27 +03:00
return 2 ;
}
2018-02-21 17:07:52 +03:00
if ( event = = group - > overflow_event | |
group - > q_len > = group - > max_events ) {
2014-02-21 22:07:54 +04:00
ret = 2 ;
2014-01-22 03:48:14 +04:00
/* Queue overflow event only if it isn't already queued */
2014-02-21 22:14:11 +04:00
if ( ! list_empty ( & group - > overflow_event - > list ) ) {
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2014-02-21 22:07:54 +04:00
return ret ;
}
2014-02-21 22:14:11 +04:00
event = group - > overflow_event ;
2014-02-21 22:07:54 +04:00
goto queue ;
2009-05-22 01:01:50 +04:00
}
2009-05-22 01:01:37 +04:00
2009-12-18 05:24:21 +03:00
if ( ! list_empty ( list ) & & merge ) {
2014-01-28 21:53:22 +04:00
ret = merge ( list , event ) ;
if ( ret ) {
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2014-01-28 21:53:22 +04:00
return ret ;
2010-07-28 18:18:37 +04:00
}
2009-05-22 01:01:37 +04:00
}
2014-02-21 22:07:54 +04:00
queue :
2009-05-22 01:01:37 +04:00
group - > q_len + + ;
2014-01-22 03:48:14 +04:00
list_add_tail ( & event - > list , list ) ;
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
wake_up ( & group - > notification_waitq ) ;
2011-10-15 01:43:39 +04:00
kill_fasync ( & group - > fsn_fa , SIGIO , POLL_IN ) ;
2014-01-28 21:53:22 +04:00
return ret ;
2009-05-22 01:01:37 +04:00
}
/*
2014-01-22 03:48:14 +04:00
* Remove and return the first event from the notification list . It is the
* responsibility of the caller to destroy the obtained event
2009-05-22 01:01:37 +04:00
*/
2014-08-07 03:03:26 +04:00
struct fsnotify_event * fsnotify_remove_first_event ( struct fsnotify_group * group )
2009-05-22 01:01:20 +04:00
{
struct fsnotify_event * event ;
2016-10-08 02:57:01 +03:00
assert_spin_locked ( & group - > notification_lock ) ;
2009-05-22 01:01:20 +04:00
2010-07-28 18:18:37 +04:00
pr_debug ( " %s: group=%p \n " , __func__ , group ) ;
2014-01-22 03:48:14 +04:00
event = list_first_entry ( & group - > notification_list ,
struct fsnotify_event , list ) ;
2014-02-21 22:02:34 +04:00
/*
* We need to init list head for the case of overflow event so that
2014-08-07 03:03:26 +04:00
* check in fsnotify_add_event ( ) works
2014-02-21 22:02:34 +04:00
*/
list_del_init ( & event - > list ) ;
2009-05-22 01:01:37 +04:00
group - > q_len - - ;
return event ;
}
/*
2014-08-07 03:03:26 +04:00
* This will not remove the event , that must be done with
* fsnotify_remove_first_event ( )
2009-05-22 01:01:37 +04:00
*/
2014-08-07 03:03:26 +04:00
struct fsnotify_event * fsnotify_peek_first_event ( struct fsnotify_group * group )
2009-05-22 01:01:37 +04:00
{
2016-10-08 02:57:01 +03:00
assert_spin_locked ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
2014-01-22 03:48:14 +04:00
return list_first_entry ( & group - > notification_list ,
struct fsnotify_event , list ) ;
2009-05-22 01:01:37 +04:00
}
/*
* Called when a group is being torn down to clean up any outstanding
* event notifications .
*/
void fsnotify_flush_notify ( struct fsnotify_group * group )
{
struct fsnotify_event * event ;
2016-10-08 02:56:52 +03:00
spin_lock ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
while ( ! fsnotify_notify_queue_is_empty ( group ) ) {
2014-08-07 03:03:26 +04:00
event = fsnotify_remove_first_event ( group ) ;
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2014-01-22 03:48:14 +04:00
fsnotify_destroy_event ( group , event ) ;
2016-10-08 02:56:52 +03:00
spin_lock ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
}
2016-10-08 02:56:52 +03:00
spin_unlock ( & group - > notification_lock ) ;
2009-05-22 01:01:37 +04:00
}
/*
* fsnotify_create_event - Allocate a new event which will be sent to each
* group ' s handle_event function if the group was interested in this
* particular event .
*
2014-01-22 03:48:14 +04:00
* @ inode the inode which is supposed to receive the event ( sometimes a
2009-05-22 01:01:37 +04:00
* parent of the inode to which the event happened .
* @ mask what actually happened .
* @ data pointer to the object which was actually affected
* @ data_type flag indication if the data is a file , path , inode , nothing . . .
2009-05-22 01:01:43 +04:00
* @ name the filename , if available
2009-05-22 01:01:37 +04:00
*/
2014-01-22 03:48:14 +04:00
void fsnotify_init_event ( struct fsnotify_event * event , struct inode * inode ,
u32 mask )
2009-05-22 01:01:37 +04:00
{
2014-01-22 03:48:14 +04:00
INIT_LIST_HEAD ( & event - > list ) ;
event - > inode = inode ;
2009-05-22 01:01:20 +04:00
event - > mask = mask ;
}