2010-03-01 11:14:18 +03:00
/*
* v4l2 - event . c
*
* V4L2 events .
*
* Copyright ( C ) 2009 - - 2010 Nokia Corporation .
*
* Contact : Sakari Ailus < sakari . ailus @ maxwell . research . nokia . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA
* 02110 - 1301 USA
*/
# include <media/v4l2-dev.h>
# include <media/v4l2-fh.h>
# include <media/v4l2-event.h>
# include <linux/sched.h>
# include <linux/slab.h>
2011-08-01 23:26:38 +04:00
# include <linux/export.h>
2010-03-01 11:14:18 +03:00
2011-06-14 02:24:17 +04:00
static unsigned sev_pos ( const struct v4l2_subscribed_event * sev , unsigned idx )
2010-03-01 11:14:18 +03:00
{
2011-06-14 02:24:17 +04:00
idx + = sev - > first ;
return idx > = sev - > elems ? idx - sev - > elems : idx ;
2010-03-01 11:14:18 +03:00
}
static int __v4l2_event_dequeue ( struct v4l2_fh * fh , struct v4l2_event * event )
{
struct v4l2_kevent * kev ;
unsigned long flags ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-14 00:44:42 +04:00
if ( list_empty ( & fh - > available ) ) {
2010-03-01 11:14:18 +03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
return - ENOENT ;
}
2011-06-14 00:44:42 +04:00
WARN_ON ( fh - > navailable = = 0 ) ;
2010-03-01 11:14:18 +03:00
2011-06-14 00:44:42 +04:00
kev = list_first_entry ( & fh - > available , struct v4l2_kevent , list ) ;
2011-06-14 02:24:17 +04:00
list_del ( & kev - > list ) ;
2011-06-14 00:44:42 +04:00
fh - > navailable - - ;
2010-03-01 11:14:18 +03:00
2011-06-14 00:44:42 +04:00
kev - > event . pending = fh - > navailable ;
2010-03-01 11:14:18 +03:00
* event = kev - > event ;
2011-06-14 02:24:17 +04:00
kev - > sev - > first = sev_pos ( kev - > sev , 1 ) ;
kev - > sev - > in_use - - ;
2010-03-01 11:14:18 +03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
return 0 ;
}
int v4l2_event_dequeue ( struct v4l2_fh * fh , struct v4l2_event * event ,
int nonblocking )
{
int ret ;
if ( nonblocking )
return __v4l2_event_dequeue ( fh , event ) ;
2010-09-26 15:47:38 +04:00
/* Release the vdev lock while waiting */
if ( fh - > vdev - > lock )
mutex_unlock ( fh - > vdev - > lock ) ;
2010-03-01 11:14:18 +03:00
do {
2011-06-14 00:44:42 +04:00
ret = wait_event_interruptible ( fh - > wait ,
fh - > navailable ! = 0 ) ;
2010-03-01 11:14:18 +03:00
if ( ret < 0 )
2010-09-26 15:47:38 +04:00
break ;
2010-03-01 11:14:18 +03:00
ret = __v4l2_event_dequeue ( fh , event ) ;
} while ( ret = = - ENOENT ) ;
2010-09-26 15:47:38 +04:00
if ( fh - > vdev - > lock )
mutex_lock ( fh - > vdev - > lock ) ;
2010-03-01 11:14:18 +03:00
return ret ;
}
2010-05-02 21:32:43 +04:00
EXPORT_SYMBOL_GPL ( v4l2_event_dequeue ) ;
2010-03-01 11:14:18 +03:00
2011-06-07 18:13:44 +04:00
/* Caller must hold fh->vdev->fh_lock! */
2010-03-01 11:14:18 +03:00
static struct v4l2_subscribed_event * v4l2_event_subscribed (
2011-06-07 18:13:44 +04:00
struct v4l2_fh * fh , u32 type , u32 id )
2010-03-01 11:14:18 +03:00
{
struct v4l2_subscribed_event * sev ;
2010-05-03 19:42:46 +04:00
assert_spin_locked ( & fh - > vdev - > fh_lock ) ;
2010-03-01 11:14:18 +03:00
2011-06-20 18:56:24 +04:00
list_for_each_entry ( sev , & fh - > subscribed , list )
2011-06-07 18:13:44 +04:00
if ( sev - > type = = type & & sev - > id = = id )
2010-03-01 11:14:18 +03:00
return sev ;
return NULL ;
}
2011-06-07 18:13:44 +04:00
static void __v4l2_event_queue_fh ( struct v4l2_fh * fh , const struct v4l2_event * ev ,
const struct timespec * ts )
{
struct v4l2_subscribed_event * sev ;
struct v4l2_kevent * kev ;
2011-06-18 14:02:20 +04:00
bool copy_payload = true ;
2011-06-07 18:13:44 +04:00
/* Are we subscribed? */
sev = v4l2_event_subscribed ( fh , ev - > type , ev - > id ) ;
if ( sev = = NULL )
return ;
2012-04-08 19:59:46 +04:00
/*
* If the event has been added to the fh - > subscribed list , but its
* add op has not completed yet elems will be 0 , treat this as
* not being subscribed .
*/
if ( ! sev - > elems )
return ;
2011-06-07 18:13:44 +04:00
/* Increase event sequence number on fh. */
2011-06-14 00:44:42 +04:00
fh - > sequence + + ;
2011-06-07 18:13:44 +04:00
/* Do we have any free events? */
2011-06-14 02:24:17 +04:00
if ( sev - > in_use = = sev - > elems ) {
/* no, remove the oldest one */
kev = sev - > events + sev_pos ( sev , 0 ) ;
list_del ( & kev - > list ) ;
sev - > in_use - - ;
sev - > first = sev_pos ( sev , 1 ) ;
fh - > navailable - - ;
2011-06-18 14:02:20 +04:00
if ( sev - > elems = = 1 ) {
2012-04-08 19:59:46 +04:00
if ( sev - > ops & & sev - > ops - > replace ) {
sev - > ops - > replace ( & kev - > event , ev ) ;
2011-06-18 14:02:20 +04:00
copy_payload = false ;
}
2012-04-08 19:59:46 +04:00
} else if ( sev - > ops & & sev - > ops - > merge ) {
2011-06-18 14:02:20 +04:00
struct v4l2_kevent * second_oldest =
sev - > events + sev_pos ( sev , 0 ) ;
2012-04-08 19:59:46 +04:00
sev - > ops - > merge ( & kev - > event , & second_oldest - > event ) ;
2011-06-18 14:02:20 +04:00
}
2011-06-14 02:24:17 +04:00
}
2011-06-07 18:13:44 +04:00
/* Take one and fill it. */
2011-06-14 02:24:17 +04:00
kev = sev - > events + sev_pos ( sev , sev - > in_use ) ;
2011-06-07 18:13:44 +04:00
kev - > event . type = ev - > type ;
2011-06-18 14:02:20 +04:00
if ( copy_payload )
kev - > event . u = ev - > u ;
2011-06-07 18:13:44 +04:00
kev - > event . id = ev - > id ;
kev - > event . timestamp = * ts ;
2011-06-14 00:44:42 +04:00
kev - > event . sequence = fh - > sequence ;
2011-06-14 02:24:17 +04:00
sev - > in_use + + ;
list_add_tail ( & kev - > list , & fh - > available ) ;
2011-06-07 18:13:44 +04:00
2011-06-14 00:44:42 +04:00
fh - > navailable + + ;
2011-06-07 18:13:44 +04:00
2011-06-14 00:44:42 +04:00
wake_up_all ( & fh - > wait ) ;
2011-06-07 18:13:44 +04:00
}
2010-03-01 11:14:18 +03:00
void v4l2_event_queue ( struct video_device * vdev , const struct v4l2_event * ev )
{
struct v4l2_fh * fh ;
unsigned long flags ;
struct timespec timestamp ;
ktime_get_ts ( & timestamp ) ;
spin_lock_irqsave ( & vdev - > fh_lock , flags ) ;
2011-06-20 18:56:24 +04:00
list_for_each_entry ( fh , & vdev - > fh_list , list )
2011-06-07 18:13:44 +04:00
__v4l2_event_queue_fh ( fh , ev , & timestamp ) ;
2010-03-01 11:14:18 +03:00
spin_unlock_irqrestore ( & vdev - > fh_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_queue ) ;
2011-06-07 18:13:44 +04:00
void v4l2_event_queue_fh ( struct v4l2_fh * fh , const struct v4l2_event * ev )
{
unsigned long flags ;
struct timespec timestamp ;
ktime_get_ts ( & timestamp ) ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
__v4l2_event_queue_fh ( fh , ev , & timestamp ) ;
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_queue_fh ) ;
2010-03-01 11:14:18 +03:00
int v4l2_event_pending ( struct v4l2_fh * fh )
{
2011-06-14 00:44:42 +04:00
return fh - > navailable ;
2010-03-01 11:14:18 +03:00
}
EXPORT_SYMBOL_GPL ( v4l2_event_pending ) ;
int v4l2_event_subscribe ( struct v4l2_fh * fh ,
2012-04-08 19:59:46 +04:00
struct v4l2_event_subscription * sub , unsigned elems ,
const struct v4l2_subscribed_event_ops * ops )
2010-03-01 11:14:18 +03:00
{
2011-06-07 18:13:44 +04:00
struct v4l2_subscribed_event * sev , * found_ev ;
2010-03-01 11:14:18 +03:00
unsigned long flags ;
2011-06-14 02:24:17 +04:00
unsigned i ;
2010-03-01 11:14:18 +03:00
2011-10-24 12:03:27 +04:00
if ( sub - > type = = V4L2_EVENT_ALL )
return - EINVAL ;
2011-06-14 02:24:17 +04:00
if ( elems < 1 )
elems = 1 ;
2011-06-07 18:13:44 +04:00
2011-06-14 02:24:17 +04:00
sev = kzalloc ( sizeof ( * sev ) + sizeof ( struct v4l2_kevent ) * elems , GFP_KERNEL ) ;
2010-03-01 11:14:18 +03:00
if ( ! sev )
return - ENOMEM ;
2011-06-14 02:24:17 +04:00
for ( i = 0 ; i < elems ; i + + )
sev - > events [ i ] . sev = sev ;
sev - > type = sub - > type ;
sev - > id = sub - > id ;
sev - > flags = sub - > flags ;
sev - > fh = fh ;
2012-04-08 19:59:46 +04:00
sev - > ops = ops ;
2010-03-01 11:14:18 +03:00
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 18:13:44 +04:00
found_ev = v4l2_event_subscribed ( fh , sub - > type , sub - > id ) ;
2011-06-14 02:24:17 +04:00
if ( ! found_ev )
2011-06-14 00:44:42 +04:00
list_add ( & sev - > list , & fh - > subscribed ) ;
2010-03-01 11:14:18 +03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2012-04-08 19:59:46 +04:00
if ( found_ev ) {
2011-06-14 01:55:58 +04:00
kfree ( sev ) ;
2012-04-08 19:59:46 +04:00
return 0 ; /* Already listening */
}
if ( sev - > ops & & sev - > ops - > add ) {
2012-05-07 23:53:20 +04:00
int ret = sev - > ops - > add ( sev , elems ) ;
2012-04-08 19:59:46 +04:00
if ( ret ) {
sev - > ops = NULL ;
v4l2_event_unsubscribe ( fh , sub ) ;
return ret ;
}
}
/* Mark as ready for use */
sev - > elems = elems ;
2010-03-01 11:14:18 +03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_subscribe ) ;
2011-06-14 02:24:17 +04:00
void v4l2_event_unsubscribe_all ( struct v4l2_fh * fh )
2010-03-01 11:14:18 +03:00
{
2011-06-07 18:13:44 +04:00
struct v4l2_event_subscription sub ;
2010-03-01 11:14:18 +03:00
struct v4l2_subscribed_event * sev ;
unsigned long flags ;
do {
sev = NULL ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-14 00:44:42 +04:00
if ( ! list_empty ( & fh - > subscribed ) ) {
sev = list_first_entry ( & fh - > subscribed ,
2011-06-07 18:13:44 +04:00
struct v4l2_subscribed_event , list ) ;
sub . type = sev - > type ;
sub . id = sev - > id ;
2010-03-01 11:14:18 +03:00
}
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 18:13:44 +04:00
if ( sev )
v4l2_event_unsubscribe ( fh , & sub ) ;
2010-03-01 11:14:18 +03:00
} while ( sev ) ;
}
2011-06-14 02:24:17 +04:00
EXPORT_SYMBOL_GPL ( v4l2_event_unsubscribe_all ) ;
2010-03-01 11:14:18 +03:00
int v4l2_event_unsubscribe ( struct v4l2_fh * fh ,
struct v4l2_event_subscription * sub )
{
struct v4l2_subscribed_event * sev ;
unsigned long flags ;
2011-10-26 12:40:27 +04:00
int i ;
2010-03-01 11:14:18 +03:00
if ( sub - > type = = V4L2_EVENT_ALL ) {
v4l2_event_unsubscribe_all ( fh ) ;
return 0 ;
}
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 18:13:44 +04:00
sev = v4l2_event_subscribed ( fh , sub - > type , sub - > id ) ;
2011-06-14 01:55:58 +04:00
if ( sev ! = NULL ) {
2011-10-26 12:40:27 +04:00
/* Remove any pending events for this subscription */
for ( i = 0 ; i < sev - > in_use ; i + + ) {
list_del ( & sev - > events [ sev_pos ( sev , i ) ] . list ) ;
fh - > navailable - - ;
}
2010-03-01 11:14:18 +03:00
list_del ( & sev - > list ) ;
2011-06-14 01:55:58 +04:00
}
2010-03-01 11:14:18 +03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2012-04-08 19:59:46 +04:00
if ( sev & & sev - > ops & & sev - > ops - > del )
sev - > ops - > del ( sev ) ;
2010-03-01 11:14:18 +03:00
kfree ( sev ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_unsubscribe ) ;