2014-04-03 20:34:28 -03:00
/* rc-ir-raw.c - handle IR pulse/space events
2010-03-20 20:59:44 -03:00
*
2014-02-07 08:03:07 -02:00
* Copyright ( C ) 2010 by Mauro Carvalho Chehab
2010-03-20 20:59:44 -03:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation version 2 of the License .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*/
2011-08-01 15:26:38 -04:00
# include <linux/export.h>
2010-07-31 11:59:17 -03:00
# include <linux/kthread.h>
2010-07-31 11:59:16 -03:00
# include <linux/mutex.h>
2011-07-29 15:34:32 +10:00
# include <linux/kmod.h>
2010-04-08 13:10:00 -03:00
# include <linux/sched.h>
2010-11-09 23:09:57 -03:00
# include "rc-core-priv.h"
2010-03-20 20:59:44 -03:00
2010-06-13 17:29:36 -03:00
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD ( ir_raw_client_list ) ;
2010-03-24 20:47:53 -03:00
/* Used to handle IR raw handler extensions */
2010-07-31 11:59:16 -03:00
static DEFINE_MUTEX ( ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
static LIST_HEAD ( ir_raw_handler_list ) ;
2016-09-27 16:48:47 -03:00
static atomic64_t available_protocols = ATOMIC64_INIT ( 0 ) ;
2010-03-25 21:13:43 -03:00
2010-07-31 11:59:17 -03:00
static int ir_raw_event_thread ( void * data )
2010-04-08 13:10:00 -03:00
{
2010-04-15 18:46:00 -03:00
struct ir_raw_event ev ;
2010-06-13 17:29:36 -03:00
struct ir_raw_handler * handler ;
2010-07-31 11:59:17 -03:00
struct ir_raw_event_ctrl * raw = ( struct ir_raw_event_ctrl * ) data ;
2016-08-02 02:44:07 -03:00
while ( 1 ) {
mutex_lock ( & ir_raw_handler_lock ) ;
while ( kfifo_out ( & raw - > kfifo , & ev , 1 ) ) {
list_for_each_entry ( handler , & ir_raw_handler_list , list )
if ( raw - > dev - > enabled_protocols &
handler - > protocols | | ! handler - > protocols )
handler - > decode ( raw - > dev , ev ) ;
raw - > prev_ev = ev ;
2010-07-31 11:59:17 -03:00
}
2016-08-02 02:44:07 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-07-31 11:59:17 -03:00
2016-08-02 02:44:07 -03:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
2010-07-31 11:59:17 -03:00
2016-08-02 02:44:07 -03:00
if ( kthread_should_stop ( ) ) {
__set_current_state ( TASK_RUNNING ) ;
break ;
} else if ( ! kfifo_is_empty ( & raw - > kfifo ) )
set_current_state ( TASK_RUNNING ) ;
schedule ( ) ;
2010-06-13 17:29:36 -03:00
}
2010-07-31 11:59:17 -03:00
return 0 ;
2010-04-08 13:10:00 -03:00
}
/**
* ir_raw_event_store ( ) - pass a pulse / space duration to the raw ir decoders
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-04-15 18:46:00 -03:00
* @ ev : the struct ir_raw_event descriptor of the pulse / space
2010-04-08 13:10:00 -03:00
*
* This routine ( which may be called from an interrupt context ) stores a
* pulse / space duration for the raw ir decoding state machines . Pulses are
* signalled as positive values and spaces as negative values . A zero value
* will reset the decoding state machines .
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_store ( struct rc_dev * dev , struct ir_raw_event * ev )
2010-03-20 20:59:44 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-03-20 20:59:44 -03:00
return - EINVAL ;
2010-10-20 11:56:50 -03:00
IR_dprintk ( 2 , " sample: (%05dus %s) \n " ,
2010-10-29 16:08:23 -03:00
TO_US ( ev - > duration ) , TO_STR ( ev - > pulse ) ) ;
2010-07-31 11:59:15 -03:00
2015-11-27 20:02:38 -02:00
if ( ! kfifo_put ( & dev - > raw - > kfifo , * ev ) ) {
dev_err ( & dev - > dev , " IR event FIFO is full! \n " ) ;
return - ENOSPC ;
}
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( ir_raw_event_store ) ;
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
/**
* ir_raw_event_store_edge ( ) - notify raw ir decoders of the start of a pulse / space
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-04-08 13:10:00 -03:00
* @ type : the type of the event that has occurred
*
* This routine ( which may be called from an interrupt context ) is used to
* store the beginning of an ir pulse or space ( or the start / end of ir
* reception ) for the raw ir decoding state machines . This is used by
* hardware which does not provide durations directly but only interrupts
* ( or similar events ) on state change .
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_store_edge ( struct rc_dev * dev , enum raw_event_type type )
2010-04-08 13:10:00 -03:00
{
ktime_t now ;
s64 delta ; /* ns */
2011-01-20 18:16:50 -03:00
DEFINE_IR_RAW_EVENT ( ev ) ;
2010-04-08 13:10:00 -03:00
int rc = 0 ;
2011-06-16 16:18:37 -03:00
int delay ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-04-08 13:10:00 -03:00
return - EINVAL ;
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
now = ktime_get ( ) ;
2010-10-29 16:08:23 -03:00
delta = ktime_to_ns ( ktime_sub ( now , dev - > raw - > last_event ) ) ;
2011-06-16 16:18:37 -03:00
delay = MS_TO_NS ( dev - > input_dev - > rep [ REP_DELAY ] ) ;
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
/* Check for a long duration since last event or if we're
* being called for the first time , note that delta can ' t
* possibly be negative .
*/
2011-06-16 16:18:37 -03:00
if ( delta > delay | | ! dev - > raw - > last_type )
2010-04-08 13:10:00 -03:00
type | = IR_START_EVENT ;
2010-04-15 18:46:00 -03:00
else
ev . duration = delta ;
2010-04-08 13:10:00 -03:00
if ( type & IR_START_EVENT )
2010-10-29 16:08:23 -03:00
ir_raw_event_reset ( dev ) ;
else if ( dev - > raw - > last_type & IR_SPACE ) {
2010-04-15 18:46:00 -03:00
ev . pulse = false ;
2010-10-29 16:08:23 -03:00
rc = ir_raw_event_store ( dev , & ev ) ;
} else if ( dev - > raw - > last_type & IR_PULSE ) {
2010-04-15 18:46:00 -03:00
ev . pulse = true ;
2010-10-29 16:08:23 -03:00
rc = ir_raw_event_store ( dev , & ev ) ;
2010-04-15 18:46:00 -03:00
} else
2010-04-08 13:10:00 -03:00
return 0 ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw - > last_event = now ;
dev - > raw - > last_type = type ;
2010-03-20 20:59:44 -03:00
return rc ;
}
2010-04-08 13:10:00 -03:00
EXPORT_SYMBOL_GPL ( ir_raw_event_store_edge ) ;
2010-03-20 20:59:44 -03:00
2010-07-31 11:59:22 -03:00
/**
* ir_raw_event_store_with_filter ( ) - pass next pulse / space to decoders with some processing
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-07-31 11:59:22 -03:00
* @ type : the type of the event that has occurred
*
* This routine ( which may be called from an interrupt context ) works
2011-03-30 22:57:33 -03:00
* in similar manner to ir_raw_event_store_edge .
2010-07-31 11:59:22 -03:00
* This routine is intended for devices with limited internal buffer
2012-08-13 08:59:47 -03:00
* It automerges samples of same type , and handles timeouts . Returns non - zero
* if the event was added , and zero if the event was ignored due to idle
* processing .
2010-07-31 11:59:22 -03:00
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_store_with_filter ( struct rc_dev * dev , struct ir_raw_event * ev )
2010-07-31 11:59:22 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-07-31 11:59:22 -03:00
return - EINVAL ;
/* Ignore spaces in idle mode */
2010-10-29 16:08:23 -03:00
if ( dev - > idle & & ! ev - > pulse )
2010-07-31 11:59:22 -03:00
return 0 ;
2010-10-29 16:08:23 -03:00
else if ( dev - > idle )
ir_raw_event_set_idle ( dev , false ) ;
if ( ! dev - > raw - > this_ev . duration )
dev - > raw - > this_ev = * ev ;
else if ( ev - > pulse = = dev - > raw - > this_ev . pulse )
dev - > raw - > this_ev . duration + = ev - > duration ;
else {
ir_raw_event_store ( dev , & dev - > raw - > this_ev ) ;
dev - > raw - > this_ev = * ev ;
2010-07-31 11:59:22 -03:00
}
/* Enter idle mode if nessesary */
2010-10-29 16:08:23 -03:00
if ( ! ev - > pulse & & dev - > timeout & &
dev - > raw - > this_ev . duration > = dev - > timeout )
ir_raw_event_set_idle ( dev , true ) ;
2012-08-13 08:59:47 -03:00
return 1 ;
2010-07-31 11:59:22 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_store_with_filter ) ;
2010-10-16 19:56:28 -03:00
/**
2010-10-29 16:08:23 -03:00
* ir_raw_event_set_idle ( ) - provide hint to rc - core when the device is idle or not
* @ dev : the struct rc_dev device descriptor
* @ idle : whether the device is idle or not
2010-10-16 19:56:28 -03:00
*/
2010-10-29 16:08:23 -03:00
void ir_raw_event_set_idle ( struct rc_dev * dev , bool idle )
2010-07-31 11:59:22 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-07-31 11:59:22 -03:00
return ;
2010-10-16 19:56:28 -03:00
IR_dprintk ( 2 , " %s idle mode \n " , idle ? " enter " : " leave " ) ;
2010-07-31 11:59:22 -03:00
if ( idle ) {
2010-10-29 16:08:23 -03:00
dev - > raw - > this_ev . timeout = true ;
ir_raw_event_store ( dev , & dev - > raw - > this_ev ) ;
init_ir_raw_event ( & dev - > raw - > this_ev ) ;
2010-07-31 11:59:22 -03:00
}
2010-10-16 19:56:28 -03:00
2010-10-29 16:08:23 -03:00
if ( dev - > s_idle )
dev - > s_idle ( dev , idle ) ;
dev - > idle = idle ;
2010-07-31 11:59:22 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_set_idle ) ;
2010-04-08 13:10:00 -03:00
/**
* ir_raw_event_handle ( ) - schedules the decoding of stored ir data
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-04-08 13:10:00 -03:00
*
2010-10-29 16:08:23 -03:00
* This routine will tell rc - core to start decoding stored ir data .
2010-04-08 13:10:00 -03:00
*/
2010-10-29 16:08:23 -03:00
void ir_raw_event_handle ( struct rc_dev * dev )
2010-03-20 20:59:44 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-04-08 13:10:00 -03:00
return ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
wake_up_process ( dev - > raw - > thread ) ;
2010-03-20 20:59:44 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_handle ) ;
2010-03-24 20:47:53 -03:00
2010-06-13 17:29:31 -03:00
/* used internally by the sysfs interface */
u64
2011-01-09 00:53:53 -03:00
ir_raw_get_allowed_protocols ( void )
2010-06-13 17:29:31 -03:00
{
2016-09-27 16:48:47 -03:00
return atomic64_read ( & available_protocols ) ;
2010-06-13 17:29:31 -03:00
}
2014-04-03 20:32:16 -03:00
static int change_protocol ( struct rc_dev * dev , u64 * rc_type )
{
/* the caller will update dev->enabled_protocols */
return 0 ;
}
2015-11-16 17:51:56 -02:00
static void ir_raw_disable_protocols ( struct rc_dev * dev , u64 protocols )
{
mutex_lock ( & dev - > lock ) ;
dev - > enabled_protocols & = ~ protocols ;
mutex_unlock ( & dev - > lock ) ;
}
2010-06-13 17:29:31 -03:00
/*
* Used to ( un ) register raw event clients
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_register ( struct rc_dev * dev )
2010-06-13 17:29:31 -03:00
{
int rc ;
2010-06-13 17:29:36 -03:00
struct ir_raw_handler * handler ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
if ( ! dev )
return - EINVAL ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw = kzalloc ( sizeof ( * dev - > raw ) , GFP_KERNEL ) ;
if ( ! dev - > raw )
return - ENOMEM ;
2010-07-31 11:59:17 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw - > dev = dev ;
2014-04-03 20:32:16 -03:00
dev - > change_protocol = change_protocol ;
2015-11-27 20:02:38 -02:00
INIT_KFIFO ( dev - > raw - > kfifo ) ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw - > thread = kthread_run ( ir_raw_event_thread , dev - > raw ,
2015-05-19 19:03:17 -03:00
" rc%u " , dev - > minor ) ;
2010-07-31 11:59:17 -03:00
2010-10-29 16:08:23 -03:00
if ( IS_ERR ( dev - > raw - > thread ) ) {
rc = PTR_ERR ( dev - > raw - > thread ) ;
goto out ;
2010-07-31 11:59:17 -03:00
}
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-10-29 16:08:23 -03:00
list_add_tail ( & dev - > raw - > list , & ir_raw_client_list ) ;
2010-06-13 17:29:36 -03:00
list_for_each_entry ( handler , & ir_raw_handler_list , list )
if ( handler - > raw_register )
2010-10-29 16:08:23 -03:00
handler - > raw_register ( dev ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
2010-06-13 17:29:36 -03:00
return 0 ;
2010-10-29 16:08:23 -03:00
out :
kfree ( dev - > raw ) ;
dev - > raw = NULL ;
return rc ;
2010-06-13 17:29:31 -03:00
}
2010-10-29 16:08:23 -03:00
void ir_raw_event_unregister ( struct rc_dev * dev )
2010-06-13 17:29:31 -03:00
{
2010-06-13 17:29:36 -03:00
struct ir_raw_handler * handler ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
if ( ! dev | | ! dev - > raw )
2010-06-13 17:29:31 -03:00
return ;
2010-10-29 16:08:23 -03:00
kthread_stop ( dev - > raw - > thread ) ;
2010-06-13 17:29:36 -03:00
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-10-29 16:08:23 -03:00
list_del ( & dev - > raw - > list ) ;
2010-06-13 17:29:36 -03:00
list_for_each_entry ( handler , & ir_raw_handler_list , list )
if ( handler - > raw_unregister )
2010-10-29 16:08:23 -03:00
handler - > raw_unregister ( dev ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
kfree ( dev - > raw ) ;
dev - > raw = NULL ;
2010-06-13 17:29:31 -03:00
}
2010-03-24 20:47:53 -03:00
/*
* Extension interface - used to register the IR decoders
*/
int ir_raw_handler_register ( struct ir_raw_handler * ir_raw_handler )
{
2010-06-13 17:29:36 -03:00
struct ir_raw_event_ctrl * raw ;
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
list_add_tail ( & ir_raw_handler - > list , & ir_raw_handler_list ) ;
2010-06-13 17:29:36 -03:00
if ( ir_raw_handler - > raw_register )
list_for_each_entry ( raw , & ir_raw_client_list , list )
2010-10-29 16:08:23 -03:00
ir_raw_handler - > raw_register ( raw - > dev ) ;
2016-09-27 16:48:47 -03:00
atomic64_or ( ir_raw_handler - > protocols , & available_protocols ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
2010-03-24 20:47:53 -03:00
return 0 ;
}
EXPORT_SYMBOL ( ir_raw_handler_register ) ;
void ir_raw_handler_unregister ( struct ir_raw_handler * ir_raw_handler )
{
2010-06-13 17:29:36 -03:00
struct ir_raw_event_ctrl * raw ;
2015-11-16 17:51:56 -02:00
u64 protocols = ir_raw_handler - > protocols ;
2010-06-13 17:29:36 -03:00
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
list_del ( & ir_raw_handler - > list ) ;
2015-11-16 17:51:56 -02:00
list_for_each_entry ( raw , & ir_raw_client_list , list ) {
ir_raw_disable_protocols ( raw - > dev , protocols ) ;
if ( ir_raw_handler - > raw_unregister )
2010-10-29 16:08:23 -03:00
ir_raw_handler - > raw_unregister ( raw - > dev ) ;
2015-11-16 17:51:56 -02:00
}
2016-09-27 16:48:47 -03:00
atomic64_andnot ( protocols , & available_protocols ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
}
EXPORT_SYMBOL ( ir_raw_handler_unregister ) ;