2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-10-14 19:34:15 +04:00
/**
* Copyright ( c ) 2011 Jonathan Cameron
*
* Buffer handling elements of industrial I / O reference driver .
* Uses the kfifo buffer .
*
* To test without hardware use the sysfs trigger .
*/
# include <linux/kernel.h>
2011-07-10 21:09:12 +04:00
# include <linux/export.h>
2011-10-14 19:34:15 +04:00
# include <linux/slab.h>
# include <linux/interrupt.h>
# include <linux/irq.h>
# include <linux/bitmap.h>
2012-04-25 18:54:58 +04:00
# include <linux/iio/iio.h>
# include <linux/iio/trigger_consumer.h>
2017-01-02 22:28:30 +03:00
# include <linux/iio/buffer.h>
2012-04-25 18:54:58 +04:00
# include <linux/iio/kfifo_buf.h>
2011-10-14 19:34:15 +04:00
# include "iio_simple_dummy.h"
/* Some fake data */
static const s16 fakedata [ ] = {
2015-10-26 23:48:23 +03:00
[ DUMMY_INDEX_VOLTAGE_0 ] = 7 ,
[ DUMMY_INDEX_DIFFVOLTAGE_1M2 ] = - 33 ,
[ DUMMY_INDEX_DIFFVOLTAGE_3M4 ] = - 2 ,
[ DUMMY_INDEX_ACCELX ] = 344 ,
2011-10-14 19:34:15 +04:00
} ;
2015-07-10 17:10:21 +03:00
2011-10-14 19:34:15 +04:00
/**
* iio_simple_dummy_trigger_h ( ) - the trigger handler function
* @ irq : the interrupt number
* @ p : private data - always a pointer to the poll func .
*
2012-05-08 22:18:17 +04:00
* This is the guts of buffered capture . On a trigger event occurring ,
2011-10-14 19:34:15 +04:00
* if the pollfunc is attached then this handler is called as a threaded
* interrupt ( and hence may sleep ) . It is responsible for grabbing data
* from the device and pushing it into the associated buffer .
*/
static irqreturn_t iio_simple_dummy_trigger_h ( int irq , void * p )
{
struct iio_poll_func * pf = p ;
struct iio_dev * indio_dev = pf - > indio_dev ;
int len = 0 ;
2012-04-21 13:09:35 +04:00
u16 * data ;
data = kmalloc ( indio_dev - > scan_bytes , GFP_KERNEL ) ;
2015-03-31 12:51:38 +03:00
if ( ! data )
2012-07-04 20:09:00 +04:00
goto done ;
2011-10-14 19:34:15 +04:00
2011-12-06 02:18:15 +04:00
if ( ! bitmap_empty ( indio_dev - > active_scan_mask , indio_dev - > masklength ) ) {
2011-10-14 19:34:15 +04:00
/*
* Three common options here :
* hardware scans : certain combinations of channels make
* up a fast read . The capture will consist of all of them .
* Hence we just call the grab data function and fill the
* buffer without processing .
2012-05-08 22:18:17 +04:00
* software scans : can be considered to be random access
2011-10-14 19:34:15 +04:00
* so efficient reading is just a case of minimal bus
* transactions .
* software culled hardware scans :
* occasionally a driver may process the nearest hardware
* scan to avoid storing elements that are not desired . This
2012-06-15 21:27:10 +04:00
* is the fiddliest option by far .
* Here let ' s pretend we have random access . And the values are
2011-10-14 19:34:15 +04:00
* in the constant table fakedata .
*/
int i , j ;
2014-09-19 01:55:06 +04:00
2011-12-06 02:18:15 +04:00
for ( i = 0 , j = 0 ;
i < bitmap_weight ( indio_dev - > active_scan_mask ,
indio_dev - > masklength ) ;
2012-06-22 11:47:41 +04:00
i + + , j + + ) {
2012-06-30 23:06:00 +04:00
j = find_next_bit ( indio_dev - > active_scan_mask ,
2012-06-22 11:47:41 +04:00
indio_dev - > masklength , j ) ;
2012-06-18 22:33:04 +04:00
/* random access read from the 'device' */
2011-10-14 19:34:15 +04:00
data [ i ] = fakedata [ j ] ;
len + = 2 ;
}
}
2013-09-19 17:00:00 +04:00
2016-03-09 21:05:49 +03:00
iio_push_to_buffers_with_timestamp ( indio_dev , data ,
iio_get_time_ns ( indio_dev ) ) ;
2011-10-14 19:34:15 +04:00
kfree ( data ) ;
2012-07-04 20:09:00 +04:00
done :
2011-10-14 19:34:15 +04:00
/*
* Tell the core we are done with this trigger and ready for the
* next one .
*/
iio_trigger_notify_done ( indio_dev - > trig ) ;
return IRQ_HANDLED ;
}
static const struct iio_buffer_setup_ops iio_simple_dummy_buffer_setup_ops = {
/*
* iio_triggered_buffer_postenable :
* Generic function that simply attaches the pollfunc to the trigger .
* Replace this to mess with hardware state before we attach the
* trigger .
*/
. postenable = & iio_triggered_buffer_postenable ,
/*
* iio_triggered_buffer_predisable :
* Generic function that simple detaches the pollfunc from the trigger .
* Replace this to put hardware state back again after the trigger is
* detached but before userspace knows we have disabled the ring .
*/
. predisable = & iio_triggered_buffer_predisable ,
} ;
2014-11-26 20:55:11 +03:00
int iio_simple_dummy_configure_buffer ( struct iio_dev * indio_dev )
2011-10-14 19:34:15 +04:00
{
int ret ;
struct iio_buffer * buffer ;
/* Allocate a buffer to use - here a kfifo */
2014-12-19 20:39:24 +03:00
buffer = iio_kfifo_allocate ( ) ;
2015-03-31 12:51:38 +03:00
if ( ! buffer ) {
2011-10-14 19:34:15 +04:00
ret = - ENOMEM ;
goto error_ret ;
}
2013-10-04 15:06:00 +04:00
iio_device_attach_buffer ( indio_dev , buffer ) ;
2011-10-14 19:34:15 +04:00
/*
* Tell the core what device type specific functions should
* be run on either side of buffer capture enable / disable .
*/
2011-12-06 02:18:14 +04:00
indio_dev - > setup_ops = & iio_simple_dummy_buffer_setup_ops ;
2011-10-14 19:34:15 +04:00
/*
* Configure a polling function .
* When a trigger event with this polling function connected
* occurs , this function is run . Typically this grabs data
* from the device .
*
2012-12-19 21:56:00 +04:00
* NULL for the bottom half . This is normally implemented only if we
2011-10-14 19:34:15 +04:00
* either want to ping a capture now pin ( no sleeping ) or grab
* a timestamp as close as possible to a data ready trigger firing .
*
* IRQF_ONESHOT ensures irqs are masked such that only one instance
* of the handler can run at a time .
*
* " iio_simple_dummy_consumer%d " formatting string for the irq ' name '
* as seen under / proc / interrupts . Remaining parameters as per printk .
*/
indio_dev - > pollfunc = iio_alloc_pollfunc ( NULL ,
& iio_simple_dummy_trigger_h ,
IRQF_ONESHOT ,
indio_dev ,
" iio_simple_dummy_consumer%d " ,
indio_dev - > id ) ;
2015-03-31 12:51:38 +03:00
if ( ! indio_dev - > pollfunc ) {
2011-10-14 19:34:15 +04:00
ret = - ENOMEM ;
goto error_free_buffer ;
}
/*
* Notify the core that this device is capable of buffered capture
* driven by a trigger .
*/
indio_dev - > modes | = INDIO_BUFFER_TRIGGERED ;
2012-09-12 15:06:00 +04:00
2011-10-14 19:34:15 +04:00
return 0 ;
error_free_buffer :
iio_kfifo_free ( indio_dev - > buffer ) ;
error_ret :
return ret ;
}
/**
* iio_simple_dummy_unconfigure_buffer ( ) - release buffer resources
* @ indo_dev : device instance state
*/
void iio_simple_dummy_unconfigure_buffer ( struct iio_dev * indio_dev )
{
iio_dealloc_pollfunc ( indio_dev - > pollfunc ) ;
iio_kfifo_free ( indio_dev - > buffer ) ;
}