2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-01-03 17:59:38 +04:00
/* Industrial I/O event handling
*
* Copyright ( c ) 2008 Jonathan Cameron
*
* Based on elements of hwmon and input subsystems .
*/
# include <linux/anon_inodes.h>
# include <linux/device.h>
# include <linux/fs.h>
# include <linux/kernel.h>
2012-01-03 17:59:39 +04:00
# include <linux/kfifo.h>
2012-01-03 17:59:38 +04:00
# include <linux/module.h>
2012-01-03 17:59:41 +04:00
# include <linux/poll.h>
2012-01-03 17:59:38 +04:00
# include <linux/sched.h>
# include <linux/slab.h>
# include <linux/uaccess.h>
# include <linux/wait.h>
2012-04-25 18:54:58 +04:00
# include <linux/iio/iio.h>
2020-06-30 07:57:08 +03:00
# include <linux/iio/iio-opaque.h>
2012-01-03 17:59:38 +04:00
# include "iio_core.h"
2012-04-25 18:54:58 +04:00
# include <linux/iio/sysfs.h>
# include <linux/iio/events.h>
2012-01-03 17:59:38 +04:00
/**
* struct iio_event_interface - chrdev interface for an event line
* @ wait : wait queue to allow blocking reads of events
* @ det_events : list of detected events
* @ dev_attr_list : list of event interface sysfs attribute
* @ flags : file operations related flags including busy flag .
* @ group : event interface sysfs attribute group
2015-07-24 16:21:50 +03:00
* @ read_lock : lock to protect kfifo read operations
2020-09-24 11:41:55 +03:00
* @ ioctl_handler : handler for event ioctl ( ) calls
2012-01-03 17:59:38 +04:00
*/
struct iio_event_interface {
wait_queue_head_t wait ;
2012-01-03 17:59:39 +04:00
DECLARE_KFIFO ( det_events , struct iio_event_data , 16 ) ;
2012-01-03 17:59:38 +04:00
struct list_head dev_attr_list ;
unsigned long flags ;
struct attribute_group group ;
2014-02-14 22:49:00 +04:00
struct mutex read_lock ;
2020-09-24 11:41:55 +03:00
struct iio_ioctl_handler ioctl_handler ;
2012-01-03 17:59:38 +04:00
} ;
2016-03-09 21:05:49 +03:00
bool iio_event_enabled ( const struct iio_event_interface * ev_int )
{
return ! ! test_bit ( IIO_BUSY_BIT_POS , & ev_int - > flags ) ;
}
2013-10-29 15:39:00 +04:00
/**
* iio_push_event ( ) - try to add event to the list for userspace reading
* @ indio_dev : IIO device structure
* @ ev_code : What event
* @ timestamp : When the event occurred
2014-02-14 22:49:00 +04:00
*
* Note : The caller must make sure that this function is not running
* concurrently for the same indio_dev more than once .
2016-09-08 19:49:10 +03:00
*
* This function may be safely used as soon as a valid reference to iio_dev has
* been obtained via iio_device_alloc ( ) , but any events that are submitted
* before iio_device_register ( ) has successfully completed will be silently
* discarded .
2013-10-29 15:39:00 +04:00
* */
2012-01-03 17:59:38 +04:00
int iio_push_event ( struct iio_dev * indio_dev , u64 ev_code , s64 timestamp )
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2012-01-03 17:59:39 +04:00
struct iio_event_data ev ;
int copied ;
2012-01-03 17:59:38 +04:00
2016-09-08 19:49:10 +03:00
if ( ! ev_int )
return 0 ;
2012-01-03 17:59:38 +04:00
/* Does anyone care? */
2016-03-09 21:05:49 +03:00
if ( iio_event_enabled ( ev_int ) ) {
2012-01-03 17:59:38 +04:00
2012-01-03 17:59:39 +04:00
ev . id = ev_code ;
ev . timestamp = timestamp ;
2013-11-15 02:32:17 +04:00
copied = kfifo_put ( & ev_int - > det_events , ev ) ;
2012-01-03 17:59:39 +04:00
if ( copied ! = 0 )
2018-02-12 01:34:03 +03:00
wake_up_poll ( & ev_int - > wait , EPOLLIN ) ;
2012-01-03 17:59:40 +04:00
}
2012-01-03 17:59:38 +04:00
2012-01-03 17:59:39 +04:00
return 0 ;
2012-01-03 17:59:38 +04:00
}
EXPORT_SYMBOL ( iio_push_event ) ;
2012-01-03 17:59:41 +04:00
/**
* iio_event_poll ( ) - poll the event queue to find out if it has data
2015-07-24 16:21:50 +03:00
* @ filep : File structure pointer to identify the device
* @ wait : Poll table pointer to add the wait queue on
*
2018-02-12 01:34:03 +03:00
* Return : ( EPOLLIN | EPOLLRDNORM ) if data is available for reading
2015-07-24 16:21:50 +03:00
* or a negative error code on failure
2012-01-03 17:59:41 +04:00
*/
2017-07-03 13:39:46 +03:00
static __poll_t iio_event_poll ( struct file * filep ,
2012-01-03 17:59:41 +04:00
struct poll_table_struct * wait )
{
2013-09-19 00:02:00 +04:00
struct iio_dev * indio_dev = filep - > private_data ;
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2017-07-03 13:39:46 +03:00
__poll_t events = 0 ;
2012-01-03 17:59:41 +04:00
2013-10-04 15:06:00 +04:00
if ( ! indio_dev - > info )
2015-08-03 13:00:47 +03:00
return events ;
2013-10-04 15:06:00 +04:00
2012-01-03 17:59:41 +04:00
poll_wait ( filep , & ev_int - > wait , wait ) ;
if ( ! kfifo_is_empty ( & ev_int - > det_events ) )
2018-02-12 01:34:03 +03:00
events = EPOLLIN | EPOLLRDNORM ;
2012-01-03 17:59:41 +04:00
return events ;
}
2012-01-03 17:59:38 +04:00
static ssize_t iio_event_chrdev_read ( struct file * filep ,
char __user * buf ,
size_t count ,
loff_t * f_ps )
{
2013-09-19 00:02:00 +04:00
struct iio_dev * indio_dev = filep - > private_data ;
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2012-01-03 17:59:39 +04:00
unsigned int copied ;
2012-01-03 17:59:38 +04:00
int ret ;
2013-10-04 15:06:00 +04:00
if ( ! indio_dev - > info )
return - ENODEV ;
2012-01-03 17:59:39 +04:00
if ( count < sizeof ( struct iio_event_data ) )
2012-01-03 17:59:38 +04:00
return - EINVAL ;
2014-02-14 22:49:00 +04:00
do {
if ( kfifo_is_empty ( & ev_int - > det_events ) ) {
if ( filep - > f_flags & O_NONBLOCK )
return - EAGAIN ;
ret = wait_event_interruptible ( ev_int - > wait ,
2013-10-04 15:07:00 +04:00
! kfifo_is_empty ( & ev_int - > det_events ) | |
indio_dev - > info = = NULL ) ;
2014-02-14 22:49:00 +04:00
if ( ret )
return ret ;
if ( indio_dev - > info = = NULL )
return - ENODEV ;
2013-10-04 15:07:00 +04:00
}
2012-01-03 17:59:38 +04:00
2014-02-14 22:49:00 +04:00
if ( mutex_lock_interruptible ( & ev_int - > read_lock ) )
return - ERESTARTSYS ;
ret = kfifo_to_user ( & ev_int - > det_events , buf , count , & copied ) ;
mutex_unlock ( & ev_int - > read_lock ) ;
if ( ret )
return ret ;
/*
* If we couldn ' t read anything from the fifo ( a different
* thread might have been faster ) we either return - EAGAIN if
* the file descriptor is non - blocking , otherwise we go back to
* sleep and wait for more data to arrive .
*/
if ( copied = = 0 & & ( filep - > f_flags & O_NONBLOCK ) )
return - EAGAIN ;
2012-01-03 17:59:38 +04:00
2014-02-14 22:49:00 +04:00
} while ( copied = = 0 ) ;
2012-01-03 17:59:40 +04:00
2014-02-14 22:49:00 +04:00
return copied ;
2012-01-03 17:59:38 +04:00
}
static int iio_event_chrdev_release ( struct inode * inode , struct file * filep )
{
2013-09-19 00:02:00 +04:00
struct iio_dev * indio_dev = filep - > private_data ;
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2012-01-03 17:59:38 +04:00
2014-02-14 22:49:00 +04:00
clear_bit ( IIO_BUSY_BIT_POS , & ev_int - > flags ) ;
2012-01-03 17:59:38 +04:00
2013-09-19 00:02:00 +04:00
iio_device_put ( indio_dev ) ;
2012-01-03 17:59:38 +04:00
return 0 ;
}
static const struct file_operations iio_event_chrdev_fileops = {
. read = iio_event_chrdev_read ,
2012-01-03 17:59:41 +04:00
. poll = iio_event_poll ,
2012-01-03 17:59:38 +04:00
. release = iio_event_chrdev_release ,
. owner = THIS_MODULE ,
. llseek = noop_llseek ,
} ;
2020-09-24 11:41:55 +03:00
static int iio_event_getfd ( struct iio_dev * indio_dev )
2012-01-03 17:59:38 +04:00
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2012-01-03 17:59:38 +04:00
int fd ;
if ( ev_int = = NULL )
return - ENODEV ;
2016-03-09 21:05:49 +03:00
fd = mutex_lock_interruptible ( & indio_dev - > mlock ) ;
if ( fd )
return fd ;
if ( test_and_set_bit ( IIO_BUSY_BIT_POS , & ev_int - > flags ) ) {
fd = - EBUSY ;
goto unlock ;
}
2014-02-14 22:49:00 +04:00
2013-09-19 00:02:00 +04:00
iio_device_get ( indio_dev ) ;
fd = anon_inode_getfd ( " iio:event " , & iio_event_chrdev_fileops ,
2013-09-25 19:59:04 +04:00
indio_dev , O_RDONLY | O_CLOEXEC ) ;
2012-01-03 17:59:38 +04:00
if ( fd < 0 ) {
2014-02-14 22:49:00 +04:00
clear_bit ( IIO_BUSY_BIT_POS , & ev_int - > flags ) ;
2013-09-19 00:02:00 +04:00
iio_device_put ( indio_dev ) ;
2014-02-14 22:49:00 +04:00
} else {
kfifo_reset_out ( & ev_int - > det_events ) ;
2012-01-03 17:59:38 +04:00
}
2014-02-14 22:49:00 +04:00
2016-03-09 21:05:49 +03:00
unlock :
mutex_unlock ( & indio_dev - > mlock ) ;
2012-01-03 17:59:38 +04:00
return fd ;
}
static const char * const iio_ev_type_text [ ] = {
[ IIO_EV_TYPE_THRESH ] = " thresh " ,
[ IIO_EV_TYPE_MAG ] = " mag " ,
[ IIO_EV_TYPE_ROC ] = " roc " ,
[ IIO_EV_TYPE_THRESH_ADAPTIVE ] = " thresh_adaptive " ,
[ IIO_EV_TYPE_MAG_ADAPTIVE ] = " mag_adaptive " ,
2015-01-11 22:10:11 +03:00
[ IIO_EV_TYPE_CHANGE ] = " change " ,
2012-01-03 17:59:38 +04:00
} ;
static const char * const iio_ev_dir_text [ ] = {
[ IIO_EV_DIR_EITHER ] = " either " ,
[ IIO_EV_DIR_RISING ] = " rising " ,
[ IIO_EV_DIR_FALLING ] = " falling "
} ;
2013-10-07 18:11:00 +04:00
static const char * const iio_ev_info_text [ ] = {
[ IIO_EV_INFO_ENABLE ] = " en " ,
[ IIO_EV_INFO_VALUE ] = " value " ,
2013-10-07 18:11:00 +04:00
[ IIO_EV_INFO_HYSTERESIS ] = " hysteresis " ,
2014-08-08 02:29:00 +04:00
[ IIO_EV_INFO_PERIOD ] = " period " ,
2015-05-13 13:26:42 +03:00
[ IIO_EV_INFO_HIGH_PASS_FILTER_3DB ] = " high_pass_filter_3db " ,
[ IIO_EV_INFO_LOW_PASS_FILTER_3DB ] = " low_pass_filter_3db " ,
2013-10-07 18:11:00 +04:00
} ;
static enum iio_event_direction iio_ev_attr_dir ( struct iio_dev_attr * attr )
{
return attr - > c - > event_spec [ attr - > address & 0xffff ] . dir ;
}
static enum iio_event_type iio_ev_attr_type ( struct iio_dev_attr * attr )
{
return attr - > c - > event_spec [ attr - > address & 0xffff ] . type ;
}
static enum iio_event_info iio_ev_attr_info ( struct iio_dev_attr * attr )
{
return ( attr - > address > > 16 ) & 0xffff ;
}
2012-01-03 17:59:38 +04:00
static ssize_t iio_ev_state_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf ,
size_t len )
{
2012-05-12 17:39:33 +04:00
struct iio_dev * indio_dev = dev_to_iio_dev ( dev ) ;
2012-01-03 17:59:38 +04:00
struct iio_dev_attr * this_attr = to_iio_dev_attr ( attr ) ;
int ret ;
bool val ;
ret = strtobool ( buf , & val ) ;
if ( ret < 0 )
return ret ;
2013-12-07 14:45:00 +04:00
ret = indio_dev - > info - > write_event_config ( indio_dev ,
this_attr - > c , iio_ev_attr_type ( this_attr ) ,
iio_ev_attr_dir ( this_attr ) , val ) ;
2013-10-07 18:11:00 +04:00
2012-01-03 17:59:38 +04:00
return ( ret < 0 ) ? ret : len ;
}
static ssize_t iio_ev_state_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2012-05-12 17:39:33 +04:00
struct iio_dev * indio_dev = dev_to_iio_dev ( dev ) ;
2012-01-03 17:59:38 +04:00
struct iio_dev_attr * this_attr = to_iio_dev_attr ( attr ) ;
2013-10-07 18:11:00 +04:00
int val ;
2012-01-03 17:59:38 +04:00
2013-12-07 14:45:00 +04:00
val = indio_dev - > info - > read_event_config ( indio_dev ,
this_attr - > c , iio_ev_attr_type ( this_attr ) ,
iio_ev_attr_dir ( this_attr ) ) ;
2012-01-03 17:59:38 +04:00
if ( val < 0 )
return val ;
else
return sprintf ( buf , " %d \n " , val ) ;
}
static ssize_t iio_ev_value_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
2012-05-12 17:39:33 +04:00
struct iio_dev * indio_dev = dev_to_iio_dev ( dev ) ;
2012-01-03 17:59:38 +04:00
struct iio_dev_attr * this_attr = to_iio_dev_attr ( attr ) ;
2014-04-29 03:51:00 +04:00
int val , val2 , val_arr [ 2 ] ;
2013-10-07 18:11:00 +04:00
int ret ;
2012-01-03 17:59:38 +04:00
2013-12-07 14:45:00 +04:00
ret = indio_dev - > info - > read_event_value ( indio_dev ,
this_attr - > c , iio_ev_attr_type ( this_attr ) ,
iio_ev_attr_dir ( this_attr ) , iio_ev_attr_info ( this_attr ) ,
& val , & val2 ) ;
if ( ret < 0 )
return ret ;
2014-04-29 03:51:00 +04:00
val_arr [ 0 ] = val ;
val_arr [ 1 ] = val2 ;
return iio_format_value ( buf , ret , 2 , val_arr ) ;
2012-01-03 17:59:38 +04:00
}
static ssize_t iio_ev_value_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf ,
size_t len )
{
2012-05-12 17:39:33 +04:00
struct iio_dev * indio_dev = dev_to_iio_dev ( dev ) ;
2012-01-03 17:59:38 +04:00
struct iio_dev_attr * this_attr = to_iio_dev_attr ( attr ) ;
2013-10-07 18:11:00 +04:00
int val , val2 ;
2012-01-03 17:59:38 +04:00
int ret ;
2013-12-07 14:45:00 +04:00
if ( ! indio_dev - > info - > write_event_value )
2012-01-03 17:59:38 +04:00
return - EINVAL ;
2013-12-07 14:45:00 +04:00
ret = iio_str_to_fixpoint ( buf , 100000 , & val , & val2 ) ;
if ( ret )
return ret ;
ret = indio_dev - > info - > write_event_value ( indio_dev ,
this_attr - > c , iio_ev_attr_type ( this_attr ) ,
iio_ev_attr_dir ( this_attr ) , iio_ev_attr_info ( this_attr ) ,
val , val2 ) ;
2012-01-03 17:59:38 +04:00
if ( ret < 0 )
return ret ;
return len ;
}
2013-10-07 18:11:00 +04:00
static int iio_device_add_event ( struct iio_dev * indio_dev ,
const struct iio_chan_spec * chan , unsigned int spec_index ,
enum iio_event_type type , enum iio_event_direction dir ,
enum iio_shared_by shared_by , const unsigned long * mask )
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
2013-10-07 18:11:00 +04:00
ssize_t ( * show ) ( struct device * , struct device_attribute * , char * ) ;
ssize_t ( * store ) ( struct device * , struct device_attribute * ,
const char * , size_t ) ;
unsigned int attrcount = 0 ;
unsigned int i ;
char * postfix ;
int ret ;
2014-01-04 02:24:00 +04:00
for_each_set_bit ( i , mask , sizeof ( * mask ) * 8 ) {
if ( i > = ARRAY_SIZE ( iio_ev_info_text ) )
return - EINVAL ;
2014-11-10 15:45:31 +03:00
if ( dir ! = IIO_EV_DIR_NONE )
postfix = kasprintf ( GFP_KERNEL , " %s_%s_%s " ,
iio_ev_type_text [ type ] ,
iio_ev_dir_text [ dir ] ,
iio_ev_info_text [ i ] ) ;
else
postfix = kasprintf ( GFP_KERNEL , " %s_%s " ,
iio_ev_type_text [ type ] ,
iio_ev_info_text [ i ] ) ;
2013-10-07 18:11:00 +04:00
if ( postfix = = NULL )
return - ENOMEM ;
if ( i = = IIO_EV_INFO_ENABLE ) {
show = iio_ev_state_show ;
store = iio_ev_state_store ;
} else {
show = iio_ev_value_show ;
store = iio_ev_value_store ;
}
ret = __iio_add_chan_devattr ( postfix , chan , show , store ,
( i < < 16 ) | spec_index , shared_by , & indio_dev - > dev ,
2020-06-30 07:57:08 +03:00
& iio_dev_opaque - > event_interface - > dev_attr_list ) ;
2013-10-07 18:11:00 +04:00
kfree ( postfix ) ;
iio:core: Handle error when mask type is not separate
When event spec is shared by multiple channels, which has definition
for mask_shared_by_type, iio_device_register_eventset fails.
For example:
static const struct iio_event_spec iio_dummy_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_RISING,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),
.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_ENABLE),a
.mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
}
};
If two channels use this event spec, this will result in error.
This change handles EBUSY error similar to iio_device_add_info_mask_type().
Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
Cc: Stable@vger.kernel.org
2014-08-08 01:03:00 +04:00
if ( ( ret = = - EBUSY ) & & ( shared_by ! = IIO_SEPARATE ) )
continue ;
2013-10-07 18:11:00 +04:00
if ( ret )
return ret ;
attrcount + + ;
}
return attrcount ;
}
2013-12-07 14:45:00 +04:00
static int iio_device_add_event_sysfs ( struct iio_dev * indio_dev ,
2013-10-07 18:11:00 +04:00
struct iio_chan_spec const * chan )
{
int ret = 0 , i , attrcount = 0 ;
enum iio_event_direction dir ;
enum iio_event_type type ;
for ( i = 0 ; i < chan - > num_event_specs ; i + + ) {
type = chan - > event_spec [ i ] . type ;
dir = chan - > event_spec [ i ] . dir ;
ret = iio_device_add_event ( indio_dev , chan , i , type , dir ,
IIO_SEPARATE , & chan - > event_spec [ i ] . mask_separate ) ;
if ( ret < 0 )
2014-02-16 15:53:00 +04:00
return ret ;
2013-10-07 18:11:00 +04:00
attrcount + = ret ;
ret = iio_device_add_event ( indio_dev , chan , i , type , dir ,
IIO_SHARED_BY_TYPE ,
& chan - > event_spec [ i ] . mask_shared_by_type ) ;
if ( ret < 0 )
2014-02-16 15:53:00 +04:00
return ret ;
2013-10-07 18:11:00 +04:00
attrcount + = ret ;
ret = iio_device_add_event ( indio_dev , chan , i , type , dir ,
IIO_SHARED_BY_DIR ,
& chan - > event_spec [ i ] . mask_shared_by_dir ) ;
if ( ret < 0 )
2014-02-16 15:53:00 +04:00
return ret ;
2013-10-07 18:11:00 +04:00
attrcount + = ret ;
ret = iio_device_add_event ( indio_dev , chan , i , type , dir ,
IIO_SHARED_BY_ALL ,
& chan - > event_spec [ i ] . mask_shared_by_all ) ;
if ( ret < 0 )
2014-02-16 15:53:00 +04:00
return ret ;
2013-10-07 18:11:00 +04:00
attrcount + = ret ;
}
ret = attrcount ;
return ret ;
}
2012-01-03 17:59:38 +04:00
static inline int __iio_add_event_config_attrs ( struct iio_dev * indio_dev )
{
int j , ret , attrcount = 0 ;
2015-01-16 01:24:14 +03:00
/* Dynamically created from the channels array */
2012-01-03 17:59:38 +04:00
for ( j = 0 ; j < indio_dev - > num_channels ; j + + ) {
ret = iio_device_add_event_sysfs ( indio_dev ,
& indio_dev - > channels [ j ] ) ;
if ( ret < 0 )
drivers/iio/industrialio-event.c: eliminate possible double free
The function __iio_add_event_config_attrs is only called once, by the
function iio_device_register_eventset. If the call fails,
iio_device_register_eventset calls __iio_remove_event_config_attrs. There
is thus no need for __iio_add_event_config_attrs to also call
__iio_remove_event_config_attrs on failure.
A simplified version of the semantic match that finds this problem is as
follows: (http://coccinelle.lip6.fr/)
// <smpl>
@r@
identifier f,free,a;
parameter list[n] ps;
type T;
expression e;
@@
f(ps,T a,...) {
... when any
when != a = e
if(...) { ... free(a); ... return ...; }
... when any
}
@@
identifier r.f,r.free;
expression x,a;
expression list[r.n] xs;
@@
* x = f(xs,a,...);
if (...) { ... free(a); ... return ...; }
// </smpl>
Signed-off-by: Julia Lawall <Julia.Lawall@lip6.fr>
Signed-off-by: Jonathan Cameron <jic23@kernel.org>
2012-10-21 14:52:00 +04:00
return ret ;
2012-01-03 17:59:38 +04:00
attrcount + = ret ;
}
return attrcount ;
}
static bool iio_check_for_dynamic_events ( struct iio_dev * indio_dev )
{
int j ;
2013-10-07 18:11:00 +04:00
for ( j = 0 ; j < indio_dev - > num_channels ; j + + ) {
if ( indio_dev - > channels [ j ] . num_event_specs ! = 0 )
return true ;
}
2012-01-03 17:59:38 +04:00
return false ;
}
static void iio_setup_ev_int ( struct iio_event_interface * ev_int )
{
2012-01-03 17:59:39 +04:00
INIT_KFIFO ( ev_int - > det_events ) ;
2012-01-03 17:59:38 +04:00
init_waitqueue_head ( & ev_int - > wait ) ;
2014-02-14 22:49:00 +04:00
mutex_init ( & ev_int - > read_lock ) ;
2012-01-03 17:59:38 +04:00
}
2020-09-24 11:41:55 +03:00
static long iio_event_ioctl ( struct iio_dev * indio_dev , struct file * filp ,
unsigned int cmd , unsigned long arg )
{
int __user * ip = ( int __user * ) arg ;
int fd ;
if ( cmd = = IIO_GET_EVENT_FD_IOCTL ) {
fd = iio_event_getfd ( indio_dev ) ;
if ( fd < 0 )
return fd ;
if ( copy_to_user ( ip , & fd , sizeof ( fd ) ) )
return - EFAULT ;
return 0 ;
}
return IIO_IOCTL_UNHANDLED ;
}
2012-01-03 17:59:38 +04:00
static const char * iio_event_group_name = " events " ;
int iio_device_register_eventset ( struct iio_dev * indio_dev )
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
2020-09-21 13:31:55 +03:00
struct iio_event_interface * ev_int ;
2012-01-03 17:59:38 +04:00
struct iio_dev_attr * p ;
int ret = 0 , attrcount_orig = 0 , attrcount , attrn ;
struct attribute * * attr ;
if ( ! ( indio_dev - > info - > event_attrs | |
iio_check_for_dynamic_events ( indio_dev ) ) )
return 0 ;
2020-09-21 13:31:55 +03:00
ev_int = kzalloc ( sizeof ( struct iio_event_interface ) , GFP_KERNEL ) ;
if ( ev_int = = NULL )
2014-02-16 15:53:00 +04:00
return - ENOMEM ;
2012-01-03 17:59:38 +04:00
2020-09-21 13:31:55 +03:00
iio_dev_opaque - > event_interface = ev_int ;
INIT_LIST_HEAD ( & ev_int - > dev_attr_list ) ;
2012-07-03 12:55:40 +04:00
2020-09-21 13:31:55 +03:00
iio_setup_ev_int ( ev_int ) ;
2012-01-03 17:59:38 +04:00
if ( indio_dev - > info - > event_attrs ! = NULL ) {
attr = indio_dev - > info - > event_attrs - > attrs ;
while ( * attr + + ! = NULL )
attrcount_orig + + ;
}
attrcount = attrcount_orig ;
if ( indio_dev - > channels ) {
ret = __iio_add_event_config_attrs ( indio_dev ) ;
if ( ret < 0 )
goto error_free_setup_event_lines ;
attrcount + = ret ;
}
2020-09-21 13:31:55 +03:00
ev_int - > group . name = iio_event_group_name ;
ev_int - > group . attrs = kcalloc ( attrcount + 1 ,
sizeof ( ev_int - > group . attrs [ 0 ] ) ,
GFP_KERNEL ) ;
if ( ev_int - > group . attrs = = NULL ) {
2012-01-03 17:59:38 +04:00
ret = - ENOMEM ;
goto error_free_setup_event_lines ;
}
if ( indio_dev - > info - > event_attrs )
2020-09-21 13:31:55 +03:00
memcpy ( ev_int - > group . attrs ,
2012-01-03 17:59:38 +04:00
indio_dev - > info - > event_attrs - > attrs ,
2020-09-21 13:31:55 +03:00
sizeof ( ev_int - > group . attrs [ 0 ] ) * attrcount_orig ) ;
2012-01-03 17:59:38 +04:00
attrn = attrcount_orig ;
/* Add all elements from the list. */
2020-09-21 13:31:55 +03:00
list_for_each_entry ( p , & ev_int - > dev_attr_list , l )
ev_int - > group . attrs [ attrn + + ] = & p - > dev_attr . attr ;
indio_dev - > groups [ indio_dev - > groupcounter + + ] = & ev_int - > group ;
2012-01-03 17:59:38 +04:00
2020-09-24 11:41:55 +03:00
ev_int - > ioctl_handler . ioctl = iio_event_ioctl ;
iio_device_ioctl_handler_register ( & iio_dev_opaque - > indio_dev ,
& ev_int - > ioctl_handler ) ;
2012-01-03 17:59:38 +04:00
return 0 ;
error_free_setup_event_lines :
2020-09-21 13:31:55 +03:00
iio_free_chan_devattr_list ( & ev_int - > dev_attr_list ) ;
kfree ( ev_int ) ;
2020-06-30 07:57:08 +03:00
iio_dev_opaque - > event_interface = NULL ;
2012-01-03 17:59:38 +04:00
return ret ;
}
2013-10-04 15:07:00 +04:00
/**
* iio_device_wakeup_eventset - Wakes up the event waitqueue
* @ indio_dev : The IIO device
*
* Wakes up the event waitqueue used for poll ( ) and blocking read ( ) .
* Should usually be called when the device is unregistered .
*/
void iio_device_wakeup_eventset ( struct iio_dev * indio_dev )
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
if ( iio_dev_opaque - > event_interface = = NULL )
2013-10-04 15:07:00 +04:00
return ;
2020-06-30 07:57:08 +03:00
wake_up ( & iio_dev_opaque - > event_interface - > wait ) ;
2013-10-04 15:07:00 +04:00
}
2012-01-03 17:59:38 +04:00
void iio_device_unregister_eventset ( struct iio_dev * indio_dev )
{
2020-06-30 07:57:08 +03:00
struct iio_dev_opaque * iio_dev_opaque = to_iio_dev_opaque ( indio_dev ) ;
2020-09-21 13:31:55 +03:00
struct iio_event_interface * ev_int = iio_dev_opaque - > event_interface ;
2020-06-30 07:57:08 +03:00
2020-09-21 13:31:55 +03:00
if ( ev_int = = NULL )
2012-01-03 17:59:38 +04:00
return ;
2020-09-24 11:41:55 +03:00
iio_device_ioctl_handler_unregister ( & ev_int - > ioctl_handler ) ;
2020-09-21 13:31:55 +03:00
iio_free_chan_devattr_list ( & ev_int - > dev_attr_list ) ;
kfree ( ev_int - > group . attrs ) ;
kfree ( ev_int ) ;
2020-09-21 13:31:56 +03:00
iio_dev_opaque - > event_interface = NULL ;
2012-01-03 17:59:38 +04:00
}