2010-03-01 05:14:18 -03:00
/*
* v4l2 - event . c
*
* V4L2 events .
*
* Copyright ( C ) 2009 - - 2010 Nokia Corporation .
*
* Contact : Sakari Ailus < sakari . ailus @ maxwell . research . nokia . com >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 51 Franklin St , Fifth Floor , Boston , MA
* 02110 - 1301 USA
*/
# include <media/v4l2-dev.h>
# include <media/v4l2-fh.h>
# include <media/v4l2-event.h>
2011-06-07 11:13:44 -03:00
# include <media/v4l2-ctrls.h>
2010-03-01 05:14:18 -03:00
# include <linux/sched.h>
# include <linux/slab.h>
2011-08-01 15:26:38 -04:00
# include <linux/export.h>
2010-03-01 05:14:18 -03:00
2011-06-13 19:24:17 -03:00
static unsigned sev_pos ( const struct v4l2_subscribed_event * sev , unsigned idx )
2010-03-01 05:14:18 -03:00
{
2011-06-13 19:24:17 -03:00
idx + = sev - > first ;
return idx > = sev - > elems ? idx - sev - > elems : idx ;
2010-03-01 05:14:18 -03:00
}
static int __v4l2_event_dequeue ( struct v4l2_fh * fh , struct v4l2_event * event )
{
struct v4l2_kevent * kev ;
unsigned long flags ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-13 17:44:42 -03:00
if ( list_empty ( & fh - > available ) ) {
2010-03-01 05:14:18 -03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
return - ENOENT ;
}
2011-06-13 17:44:42 -03:00
WARN_ON ( fh - > navailable = = 0 ) ;
2010-03-01 05:14:18 -03:00
2011-06-13 17:44:42 -03:00
kev = list_first_entry ( & fh - > available , struct v4l2_kevent , list ) ;
2011-06-13 19:24:17 -03:00
list_del ( & kev - > list ) ;
2011-06-13 17:44:42 -03:00
fh - > navailable - - ;
2010-03-01 05:14:18 -03:00
2011-06-13 17:44:42 -03:00
kev - > event . pending = fh - > navailable ;
2010-03-01 05:14:18 -03:00
* event = kev - > event ;
2011-06-13 19:24:17 -03:00
kev - > sev - > first = sev_pos ( kev - > sev , 1 ) ;
kev - > sev - > in_use - - ;
2010-03-01 05:14:18 -03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
return 0 ;
}
int v4l2_event_dequeue ( struct v4l2_fh * fh , struct v4l2_event * event ,
int nonblocking )
{
int ret ;
if ( nonblocking )
return __v4l2_event_dequeue ( fh , event ) ;
2010-09-26 08:47:38 -03:00
/* Release the vdev lock while waiting */
if ( fh - > vdev - > lock )
mutex_unlock ( fh - > vdev - > lock ) ;
2010-03-01 05:14:18 -03:00
do {
2011-06-13 17:44:42 -03:00
ret = wait_event_interruptible ( fh - > wait ,
fh - > navailable ! = 0 ) ;
2010-03-01 05:14:18 -03:00
if ( ret < 0 )
2010-09-26 08:47:38 -03:00
break ;
2010-03-01 05:14:18 -03:00
ret = __v4l2_event_dequeue ( fh , event ) ;
} while ( ret = = - ENOENT ) ;
2010-09-26 08:47:38 -03:00
if ( fh - > vdev - > lock )
mutex_lock ( fh - > vdev - > lock ) ;
2010-03-01 05:14:18 -03:00
return ret ;
}
2010-05-02 14:32:43 -03:00
EXPORT_SYMBOL_GPL ( v4l2_event_dequeue ) ;
2010-03-01 05:14:18 -03:00
2011-06-07 11:13:44 -03:00
/* Caller must hold fh->vdev->fh_lock! */
2010-03-01 05:14:18 -03:00
static struct v4l2_subscribed_event * v4l2_event_subscribed (
2011-06-07 11:13:44 -03:00
struct v4l2_fh * fh , u32 type , u32 id )
2010-03-01 05:14:18 -03:00
{
struct v4l2_subscribed_event * sev ;
2010-05-03 12:42:46 -03:00
assert_spin_locked ( & fh - > vdev - > fh_lock ) ;
2010-03-01 05:14:18 -03:00
2011-06-20 11:56:24 -03:00
list_for_each_entry ( sev , & fh - > subscribed , list )
2011-06-07 11:13:44 -03:00
if ( sev - > type = = type & & sev - > id = = id )
2010-03-01 05:14:18 -03:00
return sev ;
return NULL ;
}
2011-06-07 11:13:44 -03:00
static void __v4l2_event_queue_fh ( struct v4l2_fh * fh , const struct v4l2_event * ev ,
const struct timespec * ts )
{
struct v4l2_subscribed_event * sev ;
struct v4l2_kevent * kev ;
2011-06-18 07:02:20 -03:00
bool copy_payload = true ;
2011-06-07 11:13:44 -03:00
/* Are we subscribed? */
sev = v4l2_event_subscribed ( fh , ev - > type , ev - > id ) ;
if ( sev = = NULL )
return ;
/* Increase event sequence number on fh. */
2011-06-13 17:44:42 -03:00
fh - > sequence + + ;
2011-06-07 11:13:44 -03:00
/* Do we have any free events? */
2011-06-13 19:24:17 -03:00
if ( sev - > in_use = = sev - > elems ) {
/* no, remove the oldest one */
kev = sev - > events + sev_pos ( sev , 0 ) ;
list_del ( & kev - > list ) ;
sev - > in_use - - ;
sev - > first = sev_pos ( sev , 1 ) ;
fh - > navailable - - ;
2011-06-18 07:02:20 -03:00
if ( sev - > elems = = 1 ) {
if ( sev - > replace ) {
sev - > replace ( & kev - > event , ev ) ;
copy_payload = false ;
}
} else if ( sev - > merge ) {
struct v4l2_kevent * second_oldest =
sev - > events + sev_pos ( sev , 0 ) ;
sev - > merge ( & kev - > event , & second_oldest - > event ) ;
}
2011-06-13 19:24:17 -03:00
}
2011-06-07 11:13:44 -03:00
/* Take one and fill it. */
2011-06-13 19:24:17 -03:00
kev = sev - > events + sev_pos ( sev , sev - > in_use ) ;
2011-06-07 11:13:44 -03:00
kev - > event . type = ev - > type ;
2011-06-18 07:02:20 -03:00
if ( copy_payload )
kev - > event . u = ev - > u ;
2011-06-07 11:13:44 -03:00
kev - > event . id = ev - > id ;
kev - > event . timestamp = * ts ;
2011-06-13 17:44:42 -03:00
kev - > event . sequence = fh - > sequence ;
2011-06-13 19:24:17 -03:00
sev - > in_use + + ;
list_add_tail ( & kev - > list , & fh - > available ) ;
2011-06-07 11:13:44 -03:00
2011-06-13 17:44:42 -03:00
fh - > navailable + + ;
2011-06-07 11:13:44 -03:00
2011-06-13 17:44:42 -03:00
wake_up_all ( & fh - > wait ) ;
2011-06-07 11:13:44 -03:00
}
2010-03-01 05:14:18 -03:00
void v4l2_event_queue ( struct video_device * vdev , const struct v4l2_event * ev )
{
struct v4l2_fh * fh ;
unsigned long flags ;
struct timespec timestamp ;
ktime_get_ts ( & timestamp ) ;
spin_lock_irqsave ( & vdev - > fh_lock , flags ) ;
2011-06-20 11:56:24 -03:00
list_for_each_entry ( fh , & vdev - > fh_list , list )
2011-06-07 11:13:44 -03:00
__v4l2_event_queue_fh ( fh , ev , & timestamp ) ;
2010-03-01 05:14:18 -03:00
spin_unlock_irqrestore ( & vdev - > fh_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_queue ) ;
2011-06-07 11:13:44 -03:00
void v4l2_event_queue_fh ( struct v4l2_fh * fh , const struct v4l2_event * ev )
{
unsigned long flags ;
struct timespec timestamp ;
ktime_get_ts ( & timestamp ) ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
__v4l2_event_queue_fh ( fh , ev , & timestamp ) ;
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_queue_fh ) ;
2010-03-01 05:14:18 -03:00
int v4l2_event_pending ( struct v4l2_fh * fh )
{
2011-06-13 17:44:42 -03:00
return fh - > navailable ;
2010-03-01 05:14:18 -03:00
}
EXPORT_SYMBOL_GPL ( v4l2_event_pending ) ;
2011-06-18 07:02:20 -03:00
static void ctrls_replace ( struct v4l2_event * old , const struct v4l2_event * new )
{
u32 old_changes = old - > u . ctrl . changes ;
old - > u . ctrl = new - > u . ctrl ;
old - > u . ctrl . changes | = old_changes ;
}
static void ctrls_merge ( const struct v4l2_event * old , struct v4l2_event * new )
{
new - > u . ctrl . changes | = old - > u . ctrl . changes ;
}
2010-03-01 05:14:18 -03:00
int v4l2_event_subscribe ( struct v4l2_fh * fh ,
2011-06-13 19:24:17 -03:00
struct v4l2_event_subscription * sub , unsigned elems )
2010-03-01 05:14:18 -03:00
{
2011-06-07 11:13:44 -03:00
struct v4l2_subscribed_event * sev , * found_ev ;
struct v4l2_ctrl * ctrl = NULL ;
2010-03-01 05:14:18 -03:00
unsigned long flags ;
2011-06-13 19:24:17 -03:00
unsigned i ;
2010-03-01 05:14:18 -03:00
2011-10-24 05:03:27 -03:00
if ( sub - > type = = V4L2_EVENT_ALL )
return - EINVAL ;
2011-06-13 19:24:17 -03:00
if ( elems < 1 )
elems = 1 ;
2011-06-07 11:13:44 -03:00
if ( sub - > type = = V4L2_EVENT_CTRL ) {
ctrl = v4l2_ctrl_find ( fh - > ctrl_handler , sub - > id ) ;
if ( ctrl = = NULL )
return - EINVAL ;
}
2011-06-13 19:24:17 -03:00
sev = kzalloc ( sizeof ( * sev ) + sizeof ( struct v4l2_kevent ) * elems , GFP_KERNEL ) ;
2010-03-01 05:14:18 -03:00
if ( ! sev )
return - ENOMEM ;
2011-06-13 19:24:17 -03:00
for ( i = 0 ; i < elems ; i + + )
sev - > events [ i ] . sev = sev ;
sev - > type = sub - > type ;
sev - > id = sub - > id ;
sev - > flags = sub - > flags ;
sev - > fh = fh ;
sev - > elems = elems ;
2011-06-18 07:02:20 -03:00
if ( ctrl ) {
sev - > replace = ctrls_replace ;
sev - > merge = ctrls_merge ;
}
2010-03-01 05:14:18 -03:00
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 11:13:44 -03:00
found_ev = v4l2_event_subscribed ( fh , sub - > type , sub - > id ) ;
2011-06-13 19:24:17 -03:00
if ( ! found_ev )
2011-06-13 17:44:42 -03:00
list_add ( & sev - > list , & fh - > subscribed ) ;
2010-03-01 05:14:18 -03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-13 19:24:17 -03:00
/* v4l2_ctrl_add_event uses a mutex, so do this outside the spin lock */
2011-06-13 18:55:58 -03:00
if ( found_ev )
kfree ( sev ) ;
else if ( ctrl )
v4l2_ctrl_add_event ( ctrl , sev ) ;
2010-03-01 05:14:18 -03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_subscribe ) ;
2011-06-13 19:24:17 -03:00
void v4l2_event_unsubscribe_all ( struct v4l2_fh * fh )
2010-03-01 05:14:18 -03:00
{
2011-06-07 11:13:44 -03:00
struct v4l2_event_subscription sub ;
2010-03-01 05:14:18 -03:00
struct v4l2_subscribed_event * sev ;
unsigned long flags ;
do {
sev = NULL ;
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-13 17:44:42 -03:00
if ( ! list_empty ( & fh - > subscribed ) ) {
sev = list_first_entry ( & fh - > subscribed ,
2011-06-07 11:13:44 -03:00
struct v4l2_subscribed_event , list ) ;
sub . type = sev - > type ;
sub . id = sev - > id ;
2010-03-01 05:14:18 -03:00
}
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 11:13:44 -03:00
if ( sev )
v4l2_event_unsubscribe ( fh , & sub ) ;
2010-03-01 05:14:18 -03:00
} while ( sev ) ;
}
2011-06-13 19:24:17 -03:00
EXPORT_SYMBOL_GPL ( v4l2_event_unsubscribe_all ) ;
2010-03-01 05:14:18 -03:00
int v4l2_event_unsubscribe ( struct v4l2_fh * fh ,
struct v4l2_event_subscription * sub )
{
struct v4l2_subscribed_event * sev ;
unsigned long flags ;
2011-10-26 05:40:27 -03:00
int i ;
2010-03-01 05:14:18 -03:00
if ( sub - > type = = V4L2_EVENT_ALL ) {
v4l2_event_unsubscribe_all ( fh ) ;
return 0 ;
}
spin_lock_irqsave ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-07 11:13:44 -03:00
sev = v4l2_event_subscribed ( fh , sub - > type , sub - > id ) ;
2011-06-13 18:55:58 -03:00
if ( sev ! = NULL ) {
2011-10-26 05:40:27 -03:00
/* Remove any pending events for this subscription */
for ( i = 0 ; i < sev - > in_use ; i + + ) {
list_del ( & sev - > events [ sev_pos ( sev , i ) ] . list ) ;
fh - > navailable - - ;
}
2010-03-01 05:14:18 -03:00
list_del ( & sev - > list ) ;
2011-06-13 18:55:58 -03:00
}
2010-03-01 05:14:18 -03:00
spin_unlock_irqrestore ( & fh - > vdev - > fh_lock , flags ) ;
2011-06-13 18:55:58 -03:00
if ( sev & & sev - > type = = V4L2_EVENT_CTRL ) {
2011-06-07 11:13:44 -03:00
struct v4l2_ctrl * ctrl = v4l2_ctrl_find ( fh - > ctrl_handler , sev - > id ) ;
if ( ctrl )
2011-06-13 18:55:58 -03:00
v4l2_ctrl_del_event ( ctrl , sev ) ;
2011-06-07 11:13:44 -03:00
}
2010-03-01 05:14:18 -03:00
kfree ( sev ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( v4l2_event_unsubscribe ) ;