2017-12-01 08:47:08 -05:00
// SPDX-License-Identifier: GPL-2.0
// rc-ir-raw.c - handle IR pulse/space events
//
// Copyright (C) 2010 by Mauro Carvalho Chehab
2010-03-20 20:59:44 -03:00
2011-08-01 15:26:38 -04:00
# include <linux/export.h>
2010-07-31 11:59:17 -03:00
# include <linux/kthread.h>
2010-07-31 11:59:16 -03:00
# include <linux/mutex.h>
2011-07-29 15:34:32 +10:00
# include <linux/kmod.h>
2010-04-08 13:10:00 -03:00
# include <linux/sched.h>
2010-11-09 23:09:57 -03:00
# include "rc-core-priv.h"
2010-03-20 20:59:44 -03:00
2010-06-13 17:29:36 -03:00
/* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
static LIST_HEAD ( ir_raw_client_list ) ;
2010-03-24 20:47:53 -03:00
/* Used to handle IR raw handler extensions */
2018-05-27 12:24:09 +01:00
DEFINE_MUTEX ( ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
static LIST_HEAD ( ir_raw_handler_list ) ;
2016-09-27 16:48:47 -03:00
static atomic64_t available_protocols = ATOMIC64_INIT ( 0 ) ;
2010-03-25 21:13:43 -03:00
2010-07-31 11:59:17 -03:00
static int ir_raw_event_thread ( void * data )
2010-04-08 13:10:00 -03:00
{
2010-04-15 18:46:00 -03:00
struct ir_raw_event ev ;
2010-06-13 17:29:36 -03:00
struct ir_raw_handler * handler ;
2018-05-10 06:11:47 -04:00
struct ir_raw_event_ctrl * raw = data ;
struct rc_dev * dev = raw - > dev ;
2010-07-31 11:59:17 -03:00
2016-08-02 02:44:07 -03:00
while ( 1 ) {
mutex_lock ( & ir_raw_handler_lock ) ;
while ( kfifo_out ( & raw - > kfifo , & ev , 1 ) ) {
2018-05-10 06:11:47 -04:00
if ( is_timing_event ( ev ) ) {
if ( ev . duration = = 0 )
2018-06-26 11:03:18 -04:00
dev_warn_once ( & dev - > dev , " nonsensical timing event of duration 0 " ) ;
2018-05-10 06:11:47 -04:00
if ( is_timing_event ( raw - > prev_ev ) & &
! is_transition ( & ev , & raw - > prev_ev ) )
2018-06-26 11:03:18 -04:00
dev_warn_once ( & dev - > dev , " two consecutive events of type %s " ,
TO_STR ( ev . pulse ) ) ;
2018-05-10 06:11:47 -04:00
if ( raw - > prev_ev . reset & & ev . pulse = = 0 )
2018-06-26 11:03:18 -04:00
dev_warn_once ( & dev - > dev , " timing event after reset should be pulse " ) ;
2018-05-10 06:11:47 -04:00
}
2016-08-02 02:44:07 -03:00
list_for_each_entry ( handler , & ir_raw_handler_list , list )
2018-05-10 06:11:47 -04:00
if ( dev - > enabled_protocols &
2016-08-02 02:44:07 -03:00
handler - > protocols | | ! handler - > protocols )
2018-05-10 06:11:47 -04:00
handler - > decode ( dev , ev ) ;
ir_lirc_raw_event ( dev , ev ) ;
2016-08-02 02:44:07 -03:00
raw - > prev_ev = ev ;
2010-07-31 11:59:17 -03:00
}
2016-08-02 02:44:07 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-07-31 11:59:17 -03:00
2016-08-02 02:44:07 -03:00
set_current_state ( TASK_INTERRUPTIBLE ) ;
2010-07-31 11:59:17 -03:00
2016-08-02 02:44:07 -03:00
if ( kthread_should_stop ( ) ) {
__set_current_state ( TASK_RUNNING ) ;
break ;
} else if ( ! kfifo_is_empty ( & raw - > kfifo ) )
set_current_state ( TASK_RUNNING ) ;
schedule ( ) ;
2010-06-13 17:29:36 -03:00
}
2010-07-31 11:59:17 -03:00
return 0 ;
2010-04-08 13:10:00 -03:00
}
/**
* ir_raw_event_store ( ) - pass a pulse / space duration to the raw ir decoders
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-04-15 18:46:00 -03:00
* @ ev : the struct ir_raw_event descriptor of the pulse / space
2010-04-08 13:10:00 -03:00
*
* This routine ( which may be called from an interrupt context ) stores a
* pulse / space duration for the raw ir decoding state machines . Pulses are
* signalled as positive values and spaces as negative values . A zero value
* will reset the decoding state machines .
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_store ( struct rc_dev * dev , struct ir_raw_event * ev )
2010-03-20 20:59:44 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-03-20 20:59:44 -03:00
return - EINVAL ;
2018-02-12 07:27:50 -05:00
dev_dbg ( & dev - > dev , " sample: (%05dus %s) \n " ,
TO_US ( ev - > duration ) , TO_STR ( ev - > pulse ) ) ;
2010-07-31 11:59:15 -03:00
2015-11-27 20:02:38 -02:00
if ( ! kfifo_put ( & dev - > raw - > kfifo , * ev ) ) {
dev_err ( & dev - > dev , " IR event FIFO is full! \n " ) ;
return - ENOSPC ;
}
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( ir_raw_event_store ) ;
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
/**
* ir_raw_event_store_edge ( ) - notify raw ir decoders of the start of a pulse / space
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2017-08-07 08:38:10 -04:00
* @ pulse : true for pulse , false for space
2010-04-08 13:10:00 -03:00
*
* This routine ( which may be called from an interrupt context ) is used to
* store the beginning of an ir pulse or space ( or the start / end of ir
* reception ) for the raw ir decoding state machines . This is used by
* hardware which does not provide durations directly but only interrupts
* ( or similar events ) on state change .
*/
2017-08-07 08:38:10 -04:00
int ir_raw_event_store_edge ( struct rc_dev * dev , bool pulse )
2010-04-08 13:10:00 -03:00
{
ktime_t now ;
2018-08-21 15:57:52 -04:00
struct ir_raw_event ev = { } ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-04-08 13:10:00 -03:00
return - EINVAL ;
2010-03-20 20:59:44 -03:00
2010-04-08 13:10:00 -03:00
now = ktime_get ( ) ;
2017-08-25 05:59:41 -04:00
ev . duration = ktime_to_ns ( ktime_sub ( now , dev - > raw - > last_event ) ) ;
2017-08-07 08:38:10 -04:00
ev . pulse = ! pulse ;
2018-03-08 09:42:44 -05:00
return ir_raw_event_store_with_timeout ( dev , & ev ) ;
}
EXPORT_SYMBOL_GPL ( ir_raw_event_store_edge ) ;
/*
* ir_raw_event_store_with_timeout ( ) - pass a pulse / space duration to the raw
* ir decoders , schedule decoding and
* timeout
* @ dev : the struct rc_dev device descriptor
* @ ev : the struct ir_raw_event descriptor of the pulse / space
*
* This routine ( which may be called from an interrupt context ) stores a
* pulse / space duration for the raw ir decoding state machines , schedules
* decoding and generates a timeout .
*/
int ir_raw_event_store_with_timeout ( struct rc_dev * dev , struct ir_raw_event * ev )
{
ktime_t now ;
int rc = 0 ;
if ( ! dev - > raw )
return - EINVAL ;
now = ktime_get ( ) ;
2018-02-14 10:26:17 -05:00
spin_lock ( & dev - > raw - > edge_spinlock ) ;
2018-03-08 09:42:44 -05:00
rc = ir_raw_event_store ( dev , ev ) ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw - > last_event = now ;
2017-08-06 15:25:52 -04:00
2017-08-07 08:30:18 -04:00
/* timer could be set to timeout (125ms by default) */
if ( ! timer_pending ( & dev - > raw - > edge_handle ) | |
time_after ( dev - > raw - > edge_handle . expires ,
jiffies + msecs_to_jiffies ( 15 ) ) ) {
2017-08-06 15:25:52 -04:00
mod_timer ( & dev - > raw - > edge_handle ,
jiffies + msecs_to_jiffies ( 15 ) ) ;
2017-08-07 08:30:18 -04:00
}
2018-02-14 10:26:17 -05:00
spin_unlock ( & dev - > raw - > edge_spinlock ) ;
2017-08-06 15:25:52 -04:00
2010-03-20 20:59:44 -03:00
return rc ;
}
2018-03-08 09:42:44 -05:00
EXPORT_SYMBOL_GPL ( ir_raw_event_store_with_timeout ) ;
2010-03-20 20:59:44 -03:00
2010-07-31 11:59:22 -03:00
/**
* ir_raw_event_store_with_filter ( ) - pass next pulse / space to decoders with some processing
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2017-11-27 10:19:38 -05:00
* @ ev : the event that has occurred
2010-07-31 11:59:22 -03:00
*
* This routine ( which may be called from an interrupt context ) works
2011-03-30 22:57:33 -03:00
* in similar manner to ir_raw_event_store_edge .
2010-07-31 11:59:22 -03:00
* This routine is intended for devices with limited internal buffer
2012-08-13 08:59:47 -03:00
* It automerges samples of same type , and handles timeouts . Returns non - zero
* if the event was added , and zero if the event was ignored due to idle
* processing .
2010-07-31 11:59:22 -03:00
*/
2010-10-29 16:08:23 -03:00
int ir_raw_event_store_with_filter ( struct rc_dev * dev , struct ir_raw_event * ev )
2010-07-31 11:59:22 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-07-31 11:59:22 -03:00
return - EINVAL ;
/* Ignore spaces in idle mode */
2010-10-29 16:08:23 -03:00
if ( dev - > idle & & ! ev - > pulse )
2010-07-31 11:59:22 -03:00
return 0 ;
2010-10-29 16:08:23 -03:00
else if ( dev - > idle )
ir_raw_event_set_idle ( dev , false ) ;
if ( ! dev - > raw - > this_ev . duration )
dev - > raw - > this_ev = * ev ;
else if ( ev - > pulse = = dev - > raw - > this_ev . pulse )
dev - > raw - > this_ev . duration + = ev - > duration ;
else {
ir_raw_event_store ( dev , & dev - > raw - > this_ev ) ;
dev - > raw - > this_ev = * ev ;
2010-07-31 11:59:22 -03:00
}
2019-02-18 14:29:01 -05:00
/* Enter idle mode if necessary */
2010-10-29 16:08:23 -03:00
if ( ! ev - > pulse & & dev - > timeout & &
dev - > raw - > this_ev . duration > = dev - > timeout )
ir_raw_event_set_idle ( dev , true ) ;
2012-08-13 08:59:47 -03:00
return 1 ;
2010-07-31 11:59:22 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_store_with_filter ) ;
2010-10-16 19:56:28 -03:00
/**
2010-10-29 16:08:23 -03:00
* ir_raw_event_set_idle ( ) - provide hint to rc - core when the device is idle or not
* @ dev : the struct rc_dev device descriptor
* @ idle : whether the device is idle or not
2010-10-16 19:56:28 -03:00
*/
2010-10-29 16:08:23 -03:00
void ir_raw_event_set_idle ( struct rc_dev * dev , bool idle )
2010-07-31 11:59:22 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev - > raw )
2010-07-31 11:59:22 -03:00
return ;
2018-02-12 07:27:50 -05:00
dev_dbg ( & dev - > dev , " %s idle mode \n " , idle ? " enter " : " leave " ) ;
2010-07-31 11:59:22 -03:00
if ( idle ) {
2010-10-29 16:08:23 -03:00
dev - > raw - > this_ev . timeout = true ;
ir_raw_event_store ( dev , & dev - > raw - > this_ev ) ;
2018-08-21 15:57:52 -04:00
dev - > raw - > this_ev = ( struct ir_raw_event ) { } ;
2010-07-31 11:59:22 -03:00
}
2010-10-16 19:56:28 -03:00
2010-10-29 16:08:23 -03:00
if ( dev - > s_idle )
dev - > s_idle ( dev , idle ) ;
dev - > idle = idle ;
2010-07-31 11:59:22 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_set_idle ) ;
2010-04-08 13:10:00 -03:00
/**
* ir_raw_event_handle ( ) - schedules the decoding of stored ir data
2010-10-29 16:08:23 -03:00
* @ dev : the struct rc_dev device descriptor
2010-04-08 13:10:00 -03:00
*
2010-10-29 16:08:23 -03:00
* This routine will tell rc - core to start decoding stored ir data .
2010-04-08 13:10:00 -03:00
*/
2010-10-29 16:08:23 -03:00
void ir_raw_event_handle ( struct rc_dev * dev )
2010-03-20 20:59:44 -03:00
{
2017-05-24 06:24:51 -03:00
if ( ! dev - > raw | | ! dev - > raw - > thread )
2010-04-08 13:10:00 -03:00
return ;
2010-03-20 20:59:44 -03:00
2010-10-29 16:08:23 -03:00
wake_up_process ( dev - > raw - > thread ) ;
2010-03-20 20:59:44 -03:00
}
EXPORT_SYMBOL_GPL ( ir_raw_event_handle ) ;
2010-03-24 20:47:53 -03:00
2010-06-13 17:29:31 -03:00
/* used internally by the sysfs interface */
u64
2011-01-09 00:53:53 -03:00
ir_raw_get_allowed_protocols ( void )
2010-06-13 17:29:31 -03:00
{
2016-09-27 16:48:47 -03:00
return atomic64_read ( & available_protocols ) ;
2010-06-13 17:29:31 -03:00
}
2017-08-07 16:20:58 -04:00
static int change_protocol ( struct rc_dev * dev , u64 * rc_proto )
2014-04-03 20:32:16 -03:00
{
2018-03-23 16:47:37 -04:00
struct ir_raw_handler * handler ;
u32 timeout = 0 ;
2017-11-12 16:34:59 -05:00
mutex_lock ( & ir_raw_handler_lock ) ;
list_for_each_entry ( handler , & ir_raw_handler_list , list ) {
if ( ! ( dev - > enabled_protocols & handler - > protocols ) & &
( * rc_proto & handler - > protocols ) & & handler - > raw_register )
handler - > raw_register ( dev ) ;
if ( ( dev - > enabled_protocols & handler - > protocols ) & &
! ( * rc_proto & handler - > protocols ) & &
handler - > raw_unregister )
handler - > raw_unregister ( dev ) ;
}
mutex_unlock ( & ir_raw_handler_lock ) ;
2018-03-23 16:47:37 -04:00
if ( ! dev - > max_timeout )
return 0 ;
mutex_lock ( & ir_raw_handler_lock ) ;
list_for_each_entry ( handler , & ir_raw_handler_list , list ) {
if ( handler - > protocols & * rc_proto ) {
if ( timeout < handler - > min_timeout )
timeout = handler - > min_timeout ;
}
}
mutex_unlock ( & ir_raw_handler_lock ) ;
if ( timeout = = 0 )
timeout = IR_DEFAULT_TIMEOUT ;
else
timeout + = MS_TO_NS ( 10 ) ;
if ( timeout < dev - > min_timeout )
timeout = dev - > min_timeout ;
else if ( timeout > dev - > max_timeout )
timeout = dev - > max_timeout ;
if ( dev - > s_timeout )
dev - > s_timeout ( dev , timeout ) ;
else
dev - > timeout = timeout ;
2014-04-03 20:32:16 -03:00
return 0 ;
}
2015-11-16 17:51:56 -02:00
static void ir_raw_disable_protocols ( struct rc_dev * dev , u64 protocols )
{
mutex_lock ( & dev - > lock ) ;
dev - > enabled_protocols & = ~ protocols ;
mutex_unlock ( & dev - > lock ) ;
}
2015-03-31 14:48:07 -03:00
/**
* ir_raw_gen_manchester ( ) - Encode data with Manchester ( bi - phase ) modulation .
* @ ev : Pointer to pointer to next free event . * @ ev is incremented for
* each raw event filled .
* @ max : Maximum number of raw events to fill .
* @ timings : Manchester modulation timings .
* @ n : Number of bits of data .
* @ data : Data bits to encode .
*
* Encodes the @ n least significant bits of @ data using Manchester ( bi - phase )
* modulation with the timing characteristics described by @ timings , writing up
* to @ max raw IR events using the * @ ev pointer .
*
* Returns : 0 on success .
* - ENOBUFS if there isn ' t enough space in the array to fit the
* full encoded data . In this case all @ max events will have been
* written .
*/
int ir_raw_gen_manchester ( struct ir_raw_event * * ev , unsigned int max ,
const struct ir_raw_timings_manchester * timings ,
2017-02-11 20:33:38 -02:00
unsigned int n , u64 data )
2015-03-31 14:48:07 -03:00
{
bool need_pulse ;
2017-02-11 20:33:38 -02:00
u64 i ;
2015-03-31 14:48:07 -03:00
int ret = - ENOBUFS ;
2017-02-11 20:33:38 -02:00
i = BIT_ULL ( n - 1 ) ;
2015-03-31 14:48:07 -03:00
2018-01-05 08:26:51 -05:00
if ( timings - > leader_pulse ) {
2015-03-31 14:48:07 -03:00
if ( ! max - - )
return ret ;
2018-01-05 08:26:51 -05:00
init_ir_raw_event_duration ( ( * ev ) , 1 , timings - > leader_pulse ) ;
if ( timings - > leader_space ) {
2015-03-31 14:48:07 -03:00
if ( ! max - - )
return ret ;
2018-01-05 08:26:51 -05:00
init_ir_raw_event_duration ( + + ( * ev ) , 0 ,
timings - > leader_space ) ;
2015-03-31 14:48:07 -03:00
}
} else {
/* continue existing signal */
- - ( * ev ) ;
}
/* from here on *ev will point to the last event rather than the next */
while ( n & & i > 0 ) {
need_pulse = ! ( data & i ) ;
if ( timings - > invert )
need_pulse = ! need_pulse ;
if ( need_pulse = = ! ! ( * ev ) - > pulse ) {
( * ev ) - > duration + = timings - > clock ;
} else {
if ( ! max - - )
goto nobufs ;
init_ir_raw_event_duration ( + + ( * ev ) , need_pulse ,
timings - > clock ) ;
}
if ( ! max - - )
goto nobufs ;
init_ir_raw_event_duration ( + + ( * ev ) , ! need_pulse ,
timings - > clock ) ;
i > > = 1 ;
}
if ( timings - > trailer_space ) {
if ( ! ( * ev ) - > pulse )
( * ev ) - > duration + = timings - > trailer_space ;
else if ( ! max - - )
goto nobufs ;
else
init_ir_raw_event_duration ( + + ( * ev ) , 0 ,
timings - > trailer_space ) ;
}
ret = 0 ;
nobufs :
/* point to the next event rather than last event before returning */
+ + ( * ev ) ;
return ret ;
}
EXPORT_SYMBOL ( ir_raw_gen_manchester ) ;
2014-03-14 20:04:12 -03:00
/**
* ir_raw_gen_pd ( ) - Encode data to raw events with pulse - distance modulation .
* @ ev : Pointer to pointer to next free event . * @ ev is incremented for
* each raw event filled .
* @ max : Maximum number of raw events to fill .
* @ timings : Pulse distance modulation timings .
* @ n : Number of bits of data .
* @ data : Data bits to encode .
*
* Encodes the @ n least significant bits of @ data using pulse - distance
* modulation with the timing characteristics described by @ timings , writing up
* to @ max raw IR events using the * @ ev pointer .
*
* Returns : 0 on success .
* - ENOBUFS if there isn ' t enough space in the array to fit the
* full encoded data . In this case all @ max events will have been
* written .
*/
int ir_raw_gen_pd ( struct ir_raw_event * * ev , unsigned int max ,
const struct ir_raw_timings_pd * timings ,
unsigned int n , u64 data )
{
int i ;
int ret ;
unsigned int space ;
if ( timings - > header_pulse ) {
ret = ir_raw_gen_pulse_space ( ev , & max , timings - > header_pulse ,
timings - > header_space ) ;
if ( ret )
return ret ;
}
if ( timings - > msb_first ) {
for ( i = n - 1 ; i > = 0 ; - - i ) {
space = timings - > bit_space [ ( data > > i ) & 1 ] ;
ret = ir_raw_gen_pulse_space ( ev , & max ,
timings - > bit_pulse ,
space ) ;
if ( ret )
return ret ;
}
} else {
for ( i = 0 ; i < n ; + + i , data > > = 1 ) {
space = timings - > bit_space [ data & 1 ] ;
ret = ir_raw_gen_pulse_space ( ev , & max ,
timings - > bit_pulse ,
space ) ;
if ( ret )
return ret ;
}
}
ret = ir_raw_gen_pulse_space ( ev , & max , timings - > trailer_pulse ,
timings - > trailer_space ) ;
return ret ;
}
EXPORT_SYMBOL ( ir_raw_gen_pd ) ;
2016-12-06 18:33:57 -02:00
/**
* ir_raw_gen_pl ( ) - Encode data to raw events with pulse - length modulation .
* @ ev : Pointer to pointer to next free event . * @ ev is incremented for
* each raw event filled .
* @ max : Maximum number of raw events to fill .
* @ timings : Pulse distance modulation timings .
* @ n : Number of bits of data .
* @ data : Data bits to encode .
*
* Encodes the @ n least significant bits of @ data using space - distance
* modulation with the timing characteristics described by @ timings , writing up
* to @ max raw IR events using the * @ ev pointer .
*
* Returns : 0 on success .
* - ENOBUFS if there isn ' t enough space in the array to fit the
* full encoded data . In this case all @ max events will have been
* written .
*/
int ir_raw_gen_pl ( struct ir_raw_event * * ev , unsigned int max ,
const struct ir_raw_timings_pl * timings ,
unsigned int n , u64 data )
{
int i ;
int ret = - ENOBUFS ;
unsigned int pulse ;
if ( ! max - - )
return ret ;
init_ir_raw_event_duration ( ( * ev ) + + , 1 , timings - > header_pulse ) ;
if ( timings - > msb_first ) {
for ( i = n - 1 ; i > = 0 ; - - i ) {
if ( ! max - - )
return ret ;
init_ir_raw_event_duration ( ( * ev ) + + , 0 ,
timings - > bit_space ) ;
if ( ! max - - )
return ret ;
pulse = timings - > bit_pulse [ ( data > > i ) & 1 ] ;
init_ir_raw_event_duration ( ( * ev ) + + , 1 , pulse ) ;
}
} else {
for ( i = 0 ; i < n ; + + i , data > > = 1 ) {
if ( ! max - - )
return ret ;
init_ir_raw_event_duration ( ( * ev ) + + , 0 ,
timings - > bit_space ) ;
if ( ! max - - )
return ret ;
pulse = timings - > bit_pulse [ data & 1 ] ;
init_ir_raw_event_duration ( ( * ev ) + + , 1 , pulse ) ;
}
}
if ( ! max - - )
return ret ;
init_ir_raw_event_duration ( ( * ev ) + + , 0 , timings - > trailer_space ) ;
return 0 ;
}
EXPORT_SYMBOL ( ir_raw_gen_pl ) ;
2015-03-31 14:48:06 -03:00
/**
* ir_raw_encode_scancode ( ) - Encode a scancode as raw events
*
* @ protocol : protocol
* @ scancode : scancode filter describing a single scancode
* @ events : array of raw events to write into
* @ max : max number of raw events
*
* Attempts to encode the scancode as raw events .
*
* Returns : The number of events written .
* - ENOBUFS if there isn ' t enough space in the array to fit the
* encoding . In this case all @ max events will have been written .
* - EINVAL if the scancode is ambiguous or invalid , or if no
* compatible encoder was found .
*/
2017-08-07 16:20:58 -04:00
int ir_raw_encode_scancode ( enum rc_proto protocol , u32 scancode ,
2015-03-31 14:48:06 -03:00
struct ir_raw_event * events , unsigned int max )
{
struct ir_raw_handler * handler ;
int ret = - EINVAL ;
u64 mask = 1ULL < < protocol ;
2017-02-25 06:51:31 -05:00
ir_raw_load_modules ( & mask ) ;
2015-03-31 14:48:06 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
list_for_each_entry ( handler , & ir_raw_handler_list , list ) {
if ( handler - > protocols & mask & & handler - > encode ) {
ret = handler - > encode ( protocol , scancode , events , max ) ;
if ( ret > = 0 | | ret = = - ENOBUFS )
break ;
}
}
mutex_unlock ( & ir_raw_handler_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( ir_raw_encode_scancode ) ;
2018-02-14 10:26:17 -05:00
/**
* ir_raw_edge_handle ( ) - Handle ir_raw_event_store_edge ( ) processing
*
* @ t : timer_list
*
* This callback is armed by ir_raw_event_store_edge ( ) . It does two things :
* first of all , rather than calling ir_raw_event_handle ( ) for each
* edge and waking up the rc thread , 15 ms after the first edge
* ir_raw_event_handle ( ) is called . Secondly , generate a timeout event
* no more IR is received after the rc_dev timeout .
*/
static void ir_raw_edge_handle ( struct timer_list * t )
2017-08-06 15:25:52 -04:00
{
2017-10-24 11:23:14 -04:00
struct ir_raw_event_ctrl * raw = from_timer ( raw , t , edge_handle ) ;
struct rc_dev * dev = raw - > dev ;
2018-02-14 10:26:17 -05:00
unsigned long flags ;
ktime_t interval ;
2017-08-07 08:30:18 -04:00
2018-02-14 10:26:17 -05:00
spin_lock_irqsave ( & dev - > raw - > edge_spinlock , flags ) ;
interval = ktime_sub ( ktime_get ( ) , dev - > raw - > last_event ) ;
2017-08-25 05:59:41 -04:00
if ( ktime_to_ns ( interval ) > = dev - > timeout ) {
2018-08-21 15:57:52 -04:00
struct ir_raw_event ev = {
. timeout = true ,
. duration = ktime_to_ns ( interval )
} ;
2017-08-07 08:30:18 -04:00
ir_raw_event_store ( dev , & ev ) ;
} else {
mod_timer ( & dev - > raw - > edge_handle ,
2017-08-25 05:59:41 -04:00
jiffies + nsecs_to_jiffies ( dev - > timeout -
ktime_to_ns ( interval ) ) ) ;
2017-08-07 08:30:18 -04:00
}
2018-02-14 10:26:17 -05:00
spin_unlock_irqrestore ( & dev - > raw - > edge_spinlock , flags ) ;
2017-08-06 15:25:52 -04:00
ir_raw_event_handle ( dev ) ;
}
2017-02-25 06:51:30 -05:00
/**
* ir_raw_encode_carrier ( ) - Get carrier used for protocol
*
* @ protocol : protocol
*
* Attempts to find the carrier for the specified protocol
*
* Returns : The carrier in Hz
* - EINVAL if the protocol is invalid , or if no
* compatible encoder was found .
*/
int ir_raw_encode_carrier ( enum rc_proto protocol )
{
struct ir_raw_handler * handler ;
int ret = - EINVAL ;
u64 mask = BIT_ULL ( protocol ) ;
mutex_lock ( & ir_raw_handler_lock ) ;
list_for_each_entry ( handler , & ir_raw_handler_list , list ) {
if ( handler - > protocols & mask & & handler - > encode ) {
ret = handler - > carrier ;
break ;
}
}
mutex_unlock ( & ir_raw_handler_lock ) ;
return ret ;
}
EXPORT_SYMBOL ( ir_raw_encode_carrier ) ;
2010-06-13 17:29:31 -03:00
/*
* Used to ( un ) register raw event clients
*/
2017-05-03 07:04:00 -03:00
int ir_raw_event_prepare ( struct rc_dev * dev )
2010-06-13 17:29:31 -03:00
{
2010-10-29 16:08:23 -03:00
if ( ! dev )
return - EINVAL ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw = kzalloc ( sizeof ( * dev - > raw ) , GFP_KERNEL ) ;
if ( ! dev - > raw )
return - ENOMEM ;
2010-07-31 11:59:17 -03:00
2010-10-29 16:08:23 -03:00
dev - > raw - > dev = dev ;
2014-04-03 20:32:16 -03:00
dev - > change_protocol = change_protocol ;
2018-05-10 16:41:15 -04:00
dev - > idle = true ;
2018-02-14 10:26:17 -05:00
spin_lock_init ( & dev - > raw - > edge_spinlock ) ;
timer_setup ( & dev - > raw - > edge_handle , ir_raw_edge_handle , 0 ) ;
2015-11-27 20:02:38 -02:00
INIT_KFIFO ( dev - > raw - > kfifo ) ;
2010-06-13 17:29:31 -03:00
2017-05-03 07:04:00 -03:00
return 0 ;
}
int ir_raw_event_register ( struct rc_dev * dev )
{
struct task_struct * thread ;
2017-09-23 10:41:13 -04:00
thread = kthread_run ( ir_raw_event_thread , dev - > raw , " rc%u " , dev - > minor ) ;
if ( IS_ERR ( thread ) )
return PTR_ERR ( thread ) ;
2010-07-31 11:59:17 -03:00
2017-09-23 10:41:13 -04:00
dev - > raw - > thread = thread ;
2010-07-31 11:59:17 -03:00
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-10-29 16:08:23 -03:00
list_add_tail ( & dev - > raw - > list , & ir_raw_client_list ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
2010-06-13 17:29:36 -03:00
return 0 ;
2017-05-03 07:04:00 -03:00
}
void ir_raw_event_free ( struct rc_dev * dev )
{
if ( ! dev )
return ;
2010-10-29 16:08:23 -03:00
kfree ( dev - > raw ) ;
dev - > raw = NULL ;
2010-06-13 17:29:31 -03:00
}
2010-10-29 16:08:23 -03:00
void ir_raw_event_unregister ( struct rc_dev * dev )
2010-06-13 17:29:31 -03:00
{
2010-06-13 17:29:36 -03:00
struct ir_raw_handler * handler ;
2010-06-13 17:29:31 -03:00
2010-10-29 16:08:23 -03:00
if ( ! dev | | ! dev - > raw )
2010-06-13 17:29:31 -03:00
return ;
2010-10-29 16:08:23 -03:00
kthread_stop ( dev - > raw - > thread ) ;
2017-08-06 15:25:52 -04:00
del_timer_sync ( & dev - > raw - > edge_handle ) ;
2010-06-13 17:29:36 -03:00
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-10-29 16:08:23 -03:00
list_del ( & dev - > raw - > list ) ;
2010-06-13 17:29:36 -03:00
list_for_each_entry ( handler , & ir_raw_handler_list , list )
2017-11-12 16:34:59 -05:00
if ( handler - > raw_unregister & &
( handler - > protocols & dev - > enabled_protocols ) )
2010-10-29 16:08:23 -03:00
handler - > raw_unregister ( dev ) ;
2018-05-27 12:24:09 +01:00
lirc_bpf_free ( dev ) ;
2010-06-13 17:29:31 -03:00
2017-05-03 07:04:00 -03:00
ir_raw_event_free ( dev ) ;
2018-05-27 12:24:09 +01:00
/*
* A user can be calling bpf ( BPF_PROG_ { QUERY | ATTACH | DETACH } ) , so
* ensure that the raw member is null on unlock ; this is how
* " device gone " is checked .
*/
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
}
2010-03-24 20:47:53 -03:00
/*
* Extension interface - used to register the IR decoders
*/
int ir_raw_handler_register ( struct ir_raw_handler * ir_raw_handler )
{
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
list_add_tail ( & ir_raw_handler - > list , & ir_raw_handler_list ) ;
2016-09-27 16:48:47 -03:00
atomic64_or ( ir_raw_handler - > protocols , & available_protocols ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-06-13 17:29:31 -03:00
2010-03-24 20:47:53 -03:00
return 0 ;
}
EXPORT_SYMBOL ( ir_raw_handler_register ) ;
void ir_raw_handler_unregister ( struct ir_raw_handler * ir_raw_handler )
{
2010-06-13 17:29:36 -03:00
struct ir_raw_event_ctrl * raw ;
2015-11-16 17:51:56 -02:00
u64 protocols = ir_raw_handler - > protocols ;
2010-06-13 17:29:36 -03:00
2010-07-31 11:59:16 -03:00
mutex_lock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
list_del ( & ir_raw_handler - > list ) ;
2015-11-16 17:51:56 -02:00
list_for_each_entry ( raw , & ir_raw_client_list , list ) {
2017-11-12 16:34:59 -05:00
if ( ir_raw_handler - > raw_unregister & &
( raw - > dev - > enabled_protocols & protocols ) )
2010-10-29 16:08:23 -03:00
ir_raw_handler - > raw_unregister ( raw - > dev ) ;
2017-11-12 16:34:59 -05:00
ir_raw_disable_protocols ( raw - > dev , protocols ) ;
2015-11-16 17:51:56 -02:00
}
2016-09-27 16:48:47 -03:00
atomic64_andnot ( protocols , & available_protocols ) ;
2010-07-31 11:59:16 -03:00
mutex_unlock ( & ir_raw_handler_lock ) ;
2010-03-24 20:47:53 -03:00
}
EXPORT_SYMBOL ( ir_raw_handler_unregister ) ;