2020-06-16 12:36:15 +03:00
// SPDX-License-Identifier: GPL-2.0
2020-07-08 07:15:45 +03:00
# include <linux/anon_inodes.h>
2020-09-28 03:27:54 +03:00
# include <linux/atomic.h>
2020-06-16 12:36:15 +03:00
# include <linux/bitmap.h>
2020-09-28 03:27:54 +03:00
# include <linux/build_bug.h>
2020-07-08 07:15:45 +03:00
# include <linux/cdev.h>
# include <linux/compat.h>
2020-09-28 03:27:59 +03:00
# include <linux/compiler.h>
2020-06-16 12:36:15 +03:00
# include <linux/device.h>
# include <linux/err.h>
2020-07-08 07:15:45 +03:00
# include <linux/file.h>
2020-06-16 12:36:15 +03:00
# include <linux/gpio.h>
# include <linux/gpio/driver.h>
2020-07-08 07:15:45 +03:00
# include <linux/interrupt.h>
# include <linux/irqreturn.h>
# include <linux/kernel.h>
2020-06-16 12:36:15 +03:00
# include <linux/kfifo.h>
2020-07-08 07:15:45 +03:00
# include <linux/module.h>
2020-09-28 03:27:57 +03:00
# include <linux/mutex.h>
2020-07-08 07:15:45 +03:00
# include <linux/pinctrl/consumer.h>
2020-06-16 12:36:15 +03:00
# include <linux/poll.h>
2020-07-08 07:15:45 +03:00
# include <linux/spinlock.h>
2020-06-16 12:36:15 +03:00
# include <linux/timekeeping.h>
2020-07-08 07:15:45 +03:00
# include <linux/uaccess.h>
2020-09-28 03:27:59 +03:00
# include <linux/workqueue.h>
2020-06-16 12:36:15 +03:00
# include <uapi/linux/gpio.h>
# include "gpiolib.h"
# include "gpiolib-cdev.h"
2020-09-28 03:27:54 +03:00
/*
* Array sizes must ensure 64 - bit alignment and not create holes in the
* struct packing .
*/
static_assert ( IS_ALIGNED ( GPIO_V2_LINES_MAX , 2 ) ) ;
static_assert ( IS_ALIGNED ( GPIO_MAX_NAME_SIZE , 8 ) ) ;
/*
* Check that uAPI structs are 64 - bit aligned for 32 / 64 - bit compatibility
*/
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_attribute ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_config_attribute ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_config ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_request ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_info ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_info_changed ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_event ) , 8 ) ) ;
static_assert ( IS_ALIGNED ( sizeof ( struct gpio_v2_line_values ) , 8 ) ) ;
2020-06-16 12:36:15 +03:00
/* Character device interface to GPIO.
*
* The GPIO character device , / dev / gpiochipN , provides userspace an
* interface to gpiolib GPIOs via ioctl ( ) s .
*/
/*
* GPIO line handle management
*/
2020-09-28 03:27:54 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
2020-06-16 12:36:15 +03:00
/**
* struct linehandle_state - contains the state of a userspace handle
* @ gdev : the GPIO device the handle pertains to
* @ label : consumer label used to tag descriptors
* @ descs : the GPIO descriptors held by this handle
2020-07-08 07:15:49 +03:00
* @ num_descs : the number of descriptors held in the descs array
2020-06-16 12:36:15 +03:00
*/
struct linehandle_state {
struct gpio_device * gdev ;
const char * label ;
struct gpio_desc * descs [ GPIOHANDLES_MAX ] ;
2020-07-08 07:15:49 +03:00
u32 num_descs ;
2020-06-16 12:36:15 +03:00
} ;
# define GPIOHANDLE_REQUEST_VALID_FLAGS \
( GPIOHANDLE_REQUEST_INPUT | \
GPIOHANDLE_REQUEST_OUTPUT | \
GPIOHANDLE_REQUEST_ACTIVE_LOW | \
GPIOHANDLE_REQUEST_BIAS_PULL_UP | \
GPIOHANDLE_REQUEST_BIAS_PULL_DOWN | \
GPIOHANDLE_REQUEST_BIAS_DISABLE | \
GPIOHANDLE_REQUEST_OPEN_DRAIN | \
GPIOHANDLE_REQUEST_OPEN_SOURCE )
static int linehandle_validate_flags ( u32 flags )
{
/* Return an error if an unknown flag is set */
if ( flags & ~ GPIOHANDLE_REQUEST_VALID_FLAGS )
return - EINVAL ;
/*
* Do not allow both INPUT & OUTPUT flags to be set as they are
* contradictory .
*/
if ( ( flags & GPIOHANDLE_REQUEST_INPUT ) & &
( flags & GPIOHANDLE_REQUEST_OUTPUT ) )
return - EINVAL ;
/*
* Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request . If
* the hardware actually supports enabling both at the same time the
* electrical result would be disastrous .
*/
if ( ( flags & GPIOHANDLE_REQUEST_OPEN_DRAIN ) & &
( flags & GPIOHANDLE_REQUEST_OPEN_SOURCE ) )
return - EINVAL ;
/* OPEN_DRAIN and OPEN_SOURCE flags only make sense for output mode. */
if ( ! ( flags & GPIOHANDLE_REQUEST_OUTPUT ) & &
( ( flags & GPIOHANDLE_REQUEST_OPEN_DRAIN ) | |
( flags & GPIOHANDLE_REQUEST_OPEN_SOURCE ) ) )
return - EINVAL ;
/* Bias flags only allowed for input or output mode. */
if ( ! ( ( flags & GPIOHANDLE_REQUEST_INPUT ) | |
( flags & GPIOHANDLE_REQUEST_OUTPUT ) ) & &
( ( flags & GPIOHANDLE_REQUEST_BIAS_DISABLE ) | |
( flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP ) | |
( flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN ) ) )
return - EINVAL ;
/* Only one bias flag can be set. */
if ( ( ( flags & GPIOHANDLE_REQUEST_BIAS_DISABLE ) & &
( flags & ( GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
2020-07-08 07:15:46 +03:00
GPIOHANDLE_REQUEST_BIAS_PULL_UP ) ) ) | |
2020-06-16 12:36:15 +03:00
( ( flags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN ) & &
( flags & GPIOHANDLE_REQUEST_BIAS_PULL_UP ) ) )
return - EINVAL ;
return 0 ;
}
2020-07-08 07:15:47 +03:00
static void linehandle_flags_to_desc_flags ( u32 lflags , unsigned long * flagsp )
{
assign_bit ( FLAG_ACTIVE_LOW , flagsp ,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW ) ;
assign_bit ( FLAG_OPEN_DRAIN , flagsp ,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN ) ;
assign_bit ( FLAG_OPEN_SOURCE , flagsp ,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE ) ;
assign_bit ( FLAG_PULL_UP , flagsp ,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP ) ;
assign_bit ( FLAG_PULL_DOWN , flagsp ,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN ) ;
assign_bit ( FLAG_BIAS_DISABLE , flagsp ,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE ) ;
}
2020-06-16 12:36:15 +03:00
static long linehandle_set_config ( struct linehandle_state * lh ,
void __user * ip )
{
struct gpiohandle_config gcnf ;
struct gpio_desc * desc ;
int i , ret ;
u32 lflags ;
if ( copy_from_user ( & gcnf , ip , sizeof ( gcnf ) ) )
return - EFAULT ;
lflags = gcnf . flags ;
ret = linehandle_validate_flags ( lflags ) ;
if ( ret )
return ret ;
2020-07-08 07:15:49 +03:00
for ( i = 0 ; i < lh - > num_descs ; i + + ) {
2020-06-16 12:36:15 +03:00
desc = lh - > descs [ i ] ;
2020-07-08 07:15:47 +03:00
linehandle_flags_to_desc_flags ( gcnf . flags , & desc - > flags ) ;
2020-06-16 12:36:15 +03:00
/*
* Lines have to be requested explicitly for input
* or output , else the line will be treated " as is " .
*/
if ( lflags & GPIOHANDLE_REQUEST_OUTPUT ) {
int val = ! ! gcnf . default_values [ i ] ;
ret = gpiod_direction_output ( desc , val ) ;
if ( ret )
return ret ;
} else if ( lflags & GPIOHANDLE_REQUEST_INPUT ) {
ret = gpiod_direction_input ( desc ) ;
if ( ret )
return ret ;
}
2020-07-08 07:15:51 +03:00
blocking_notifier_call_chain ( & desc - > gdev - > notifier ,
2020-09-28 03:27:55 +03:00
GPIO_V2_LINE_CHANGED_CONFIG ,
desc ) ;
2020-06-16 12:36:15 +03:00
}
return 0 ;
}
2020-07-08 07:15:48 +03:00
static long linehandle_ioctl ( struct file * file , unsigned int cmd ,
2020-06-16 12:36:15 +03:00
unsigned long arg )
{
2020-07-08 07:15:48 +03:00
struct linehandle_state * lh = file - > private_data ;
2020-06-16 12:36:15 +03:00
void __user * ip = ( void __user * ) arg ;
struct gpiohandle_data ghd ;
DECLARE_BITMAP ( vals , GPIOHANDLES_MAX ) ;
int i ;
if ( cmd = = GPIOHANDLE_GET_LINE_VALUES_IOCTL ) {
/* NOTE: It's ok to read values of output lines. */
int ret = gpiod_get_array_value_complex ( false ,
true ,
2020-07-08 07:15:49 +03:00
lh - > num_descs ,
2020-06-16 12:36:15 +03:00
lh - > descs ,
NULL ,
vals ) ;
if ( ret )
return ret ;
memset ( & ghd , 0 , sizeof ( ghd ) ) ;
2020-07-08 07:15:49 +03:00
for ( i = 0 ; i < lh - > num_descs ; i + + )
2020-06-16 12:36:15 +03:00
ghd . values [ i ] = test_bit ( i , vals ) ;
if ( copy_to_user ( ip , & ghd , sizeof ( ghd ) ) )
return - EFAULT ;
return 0 ;
} else if ( cmd = = GPIOHANDLE_SET_LINE_VALUES_IOCTL ) {
/*
* All line descriptors were created at once with the same
* flags so just check if the first one is really output .
*/
if ( ! test_bit ( FLAG_IS_OUT , & lh - > descs [ 0 ] - > flags ) )
return - EPERM ;
if ( copy_from_user ( & ghd , ip , sizeof ( ghd ) ) )
return - EFAULT ;
/* Clamp all values to [0,1] */
2020-07-08 07:15:49 +03:00
for ( i = 0 ; i < lh - > num_descs ; i + + )
2020-06-16 12:36:15 +03:00
__assign_bit ( i , vals , ghd . values [ i ] ) ;
/* Reuse the array setting function */
return gpiod_set_array_value_complex ( false ,
2020-07-08 07:15:46 +03:00
true ,
2020-07-08 07:15:49 +03:00
lh - > num_descs ,
2020-07-08 07:15:46 +03:00
lh - > descs ,
NULL ,
vals ) ;
2020-06-16 12:36:15 +03:00
} else if ( cmd = = GPIOHANDLE_SET_CONFIG_IOCTL ) {
return linehandle_set_config ( lh , ip ) ;
}
return - EINVAL ;
}
# ifdef CONFIG_COMPAT
2020-07-08 07:15:48 +03:00
static long linehandle_ioctl_compat ( struct file * file , unsigned int cmd ,
2020-07-08 07:15:46 +03:00
unsigned long arg )
2020-06-16 12:36:15 +03:00
{
2020-07-08 07:15:48 +03:00
return linehandle_ioctl ( file , cmd , ( unsigned long ) compat_ptr ( arg ) ) ;
2020-06-16 12:36:15 +03:00
}
# endif
2020-07-08 07:15:55 +03:00
static void linehandle_free ( struct linehandle_state * lh )
2020-06-16 12:36:15 +03:00
{
int i ;
2020-07-08 07:15:49 +03:00
for ( i = 0 ; i < lh - > num_descs ; i + + )
2020-07-08 07:15:55 +03:00
if ( lh - > descs [ i ] )
gpiod_free ( lh - > descs [ i ] ) ;
2020-06-16 12:36:15 +03:00
kfree ( lh - > label ) ;
2020-07-08 07:15:55 +03:00
put_device ( & lh - > gdev - > dev ) ;
2020-06-16 12:36:15 +03:00
kfree ( lh ) ;
2020-07-08 07:15:55 +03:00
}
static int linehandle_release ( struct inode * inode , struct file * file )
{
linehandle_free ( file - > private_data ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
}
static const struct file_operations linehandle_fileops = {
. release = linehandle_release ,
. owner = THIS_MODULE ,
. llseek = noop_llseek ,
. unlocked_ioctl = linehandle_ioctl ,
# ifdef CONFIG_COMPAT
. compat_ioctl = linehandle_ioctl_compat ,
# endif
} ;
static int linehandle_create ( struct gpio_device * gdev , void __user * ip )
{
struct gpiohandle_request handlereq ;
struct linehandle_state * lh ;
struct file * file ;
2020-07-08 07:15:55 +03:00
int fd , i , ret ;
2020-06-16 12:36:15 +03:00
u32 lflags ;
if ( copy_from_user ( & handlereq , ip , sizeof ( handlereq ) ) )
return - EFAULT ;
if ( ( handlereq . lines = = 0 ) | | ( handlereq . lines > GPIOHANDLES_MAX ) )
return - EINVAL ;
lflags = handlereq . flags ;
ret = linehandle_validate_flags ( lflags ) ;
if ( ret )
return ret ;
lh = kzalloc ( sizeof ( * lh ) , GFP_KERNEL ) ;
if ( ! lh )
return - ENOMEM ;
lh - > gdev = gdev ;
get_device ( & gdev - > dev ) ;
2020-10-05 10:02:46 +03:00
if ( handlereq . consumer_label [ 0 ] ! = ' \0 ' ) {
/* label is only initialized if consumer_label is set */
lh - > label = kstrndup ( handlereq . consumer_label ,
sizeof ( handlereq . consumer_label ) - 1 ,
GFP_KERNEL ) ;
2020-06-16 12:36:15 +03:00
if ( ! lh - > label ) {
ret = - ENOMEM ;
goto out_free_lh ;
}
}
2020-07-08 07:15:55 +03:00
lh - > num_descs = handlereq . lines ;
2020-06-16 12:36:15 +03:00
/* Request each GPIO */
for ( i = 0 ; i < handlereq . lines ; i + + ) {
u32 offset = handlereq . lineoffsets [ i ] ;
struct gpio_desc * desc = gpiochip_get_desc ( gdev - > chip , offset ) ;
if ( IS_ERR ( desc ) ) {
ret = PTR_ERR ( desc ) ;
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
}
ret = gpiod_request ( desc , lh - > label ) ;
if ( ret )
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
lh - > descs [ i ] = desc ;
2020-07-08 07:15:47 +03:00
linehandle_flags_to_desc_flags ( handlereq . flags , & desc - > flags ) ;
2020-06-16 12:36:15 +03:00
ret = gpiod_set_transitory ( desc , false ) ;
if ( ret < 0 )
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
/*
* Lines have to be requested explicitly for input
* or output , else the line will be treated " as is " .
*/
if ( lflags & GPIOHANDLE_REQUEST_OUTPUT ) {
int val = ! ! handlereq . default_values [ i ] ;
ret = gpiod_direction_output ( desc , val ) ;
if ( ret )
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
} else if ( lflags & GPIOHANDLE_REQUEST_INPUT ) {
ret = gpiod_direction_input ( desc ) ;
if ( ret )
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
}
2020-07-08 07:15:51 +03:00
blocking_notifier_call_chain ( & desc - > gdev - > notifier ,
2020-09-28 03:27:55 +03:00
GPIO_V2_LINE_CHANGED_REQUESTED , desc ) ;
2020-06-16 12:36:15 +03:00
dev_dbg ( & gdev - > dev , " registered chardev handle for line %d \n " ,
offset ) ;
}
fd = get_unused_fd_flags ( O_RDONLY | O_CLOEXEC ) ;
if ( fd < 0 ) {
ret = fd ;
2020-07-08 07:15:55 +03:00
goto out_free_lh ;
2020-06-16 12:36:15 +03:00
}
file = anon_inode_getfile ( " gpio-linehandle " ,
& linehandle_fileops ,
lh ,
O_RDONLY | O_CLOEXEC ) ;
if ( IS_ERR ( file ) ) {
ret = PTR_ERR ( file ) ;
goto out_put_unused_fd ;
}
handlereq . fd = fd ;
if ( copy_to_user ( ip , & handlereq , sizeof ( handlereq ) ) ) {
/*
* fput ( ) will trigger the release ( ) callback , so do not go onto
* the regular error cleanup path here .
*/
fput ( file ) ;
put_unused_fd ( fd ) ;
return - EFAULT ;
}
fd_install ( fd , file ) ;
dev_dbg ( & gdev - > dev , " registered chardev handle for %d lines \n " ,
2020-07-08 07:15:49 +03:00
lh - > num_descs ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
out_put_unused_fd :
put_unused_fd ( fd ) ;
out_free_lh :
2020-07-08 07:15:55 +03:00
linehandle_free ( lh ) ;
2020-06-16 12:36:15 +03:00
return ret ;
}
2020-09-28 03:27:54 +03:00
# endif /* CONFIG_GPIO_CDEV_V1 */
/**
* struct line - contains the state of a requested line
* @ desc : the GPIO descriptor for this line .
2020-09-28 03:27:56 +03:00
* @ req : the corresponding line request
* @ irq : the interrupt triggered in response to events on this GPIO
* @ eflags : the edge flags , GPIO_V2_LINE_FLAG_EDGE_RISING and / or
* GPIO_V2_LINE_FLAG_EDGE_FALLING , indicating the edge detection applied
* @ timestamp_ns : cache for the timestamp storing it between hardirq and
* IRQ thread , used to bring the timestamp close to the actual event
* @ req_seqno : the seqno for the current edge event in the sequence of
* events for the corresponding line request . This is drawn from the @ req .
* @ line_seqno : the seqno for the current edge event in the sequence of
* events for this line .
2020-09-28 03:27:59 +03:00
* @ work : the worker that implements software debouncing
* @ sw_debounced : flag indicating if the software debouncer is active
* @ level : the current debounced physical level of the line
2020-09-28 03:27:54 +03:00
*/
struct line {
struct gpio_desc * desc ;
2020-09-28 03:27:56 +03:00
/*
* - - edge detector specific fields - -
*/
struct linereq * req ;
unsigned int irq ;
2020-10-14 09:29:21 +03:00
/*
* eflags is set by edge_detector_setup ( ) , edge_detector_stop ( ) and
* edge_detector_update ( ) , which are themselves mutually exclusive ,
* and is accessed by edge_irq_thread ( ) and debounce_work_func ( ) ,
* which can both live with a slightly stale value .
*/
2020-09-28 03:27:56 +03:00
u64 eflags ;
/*
* timestamp_ns and req_seqno are accessed only by
* edge_irq_handler ( ) and edge_irq_thread ( ) , which are themselves
* mutually exclusive , so no additional protection is necessary .
*/
u64 timestamp_ns ;
u32 req_seqno ;
2020-09-28 03:27:59 +03:00
/*
* line_seqno is accessed by either edge_irq_thread ( ) or
* debounce_work_func ( ) , which are themselves mutually exclusive ,
* so no additional protection is necessary .
*/
2020-09-28 03:27:56 +03:00
u32 line_seqno ;
2020-09-28 03:27:59 +03:00
/*
* - - debouncer specific fields - -
*/
struct delayed_work work ;
/*
* sw_debounce is accessed by linereq_set_config ( ) , which is the
* only setter , and linereq_get_values ( ) , which can live with a
* slightly stale value .
*/
unsigned int sw_debounced ;
/*
* level is accessed by debounce_work_func ( ) , which is the only
* setter , and linereq_get_values ( ) which can live with a slightly
* stale value .
*/
unsigned int level ;
2020-09-28 03:27:54 +03:00
} ;
/**
* struct linereq - contains the state of a userspace line request
* @ gdev : the GPIO device the line request pertains to
* @ label : consumer label used to tag GPIO descriptors
* @ num_lines : the number of lines in the lines array
2020-09-28 03:27:56 +03:00
* @ wait : wait queue that handles blocking reads of events
* @ event_buffer_size : the number of elements allocated in @ events
* @ events : KFIFO for the GPIO events
* @ seqno : the sequence number for edge events generated on all lines in
* this line request . Note that this is not used when @ num_lines is 1 , as
* the line_seqno is then the same and is cheaper to calculate .
2020-09-28 03:27:57 +03:00
* @ config_mutex : mutex for serializing ioctl ( ) calls to ensure consistency
* of configuration , particularly multi - step accesses to desc flags .
2020-09-28 03:27:54 +03:00
* @ lines : the lines held by this line request , with @ num_lines elements .
*/
struct linereq {
struct gpio_device * gdev ;
const char * label ;
u32 num_lines ;
2020-09-28 03:27:56 +03:00
wait_queue_head_t wait ;
u32 event_buffer_size ;
DECLARE_KFIFO_PTR ( events , struct gpio_v2_line_event ) ;
atomic_t seqno ;
2020-09-28 03:27:57 +03:00
struct mutex config_mutex ;
2020-09-28 03:27:54 +03:00
struct line lines [ ] ;
} ;
# define GPIO_V2_LINE_BIAS_FLAGS \
( GPIO_V2_LINE_FLAG_BIAS_PULL_UP | \
GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN | \
GPIO_V2_LINE_FLAG_BIAS_DISABLED )
# define GPIO_V2_LINE_DIRECTION_FLAGS \
( GPIO_V2_LINE_FLAG_INPUT | \
GPIO_V2_LINE_FLAG_OUTPUT )
# define GPIO_V2_LINE_DRIVE_FLAGS \
( GPIO_V2_LINE_FLAG_OPEN_DRAIN | \
GPIO_V2_LINE_FLAG_OPEN_SOURCE )
2020-09-28 03:27:56 +03:00
# define GPIO_V2_LINE_EDGE_FLAGS \
( GPIO_V2_LINE_FLAG_EDGE_RISING | \
GPIO_V2_LINE_FLAG_EDGE_FALLING )
2020-10-29 11:48:32 +03:00
# define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
2020-09-28 03:27:54 +03:00
# define GPIO_V2_LINE_VALID_FLAGS \
( GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
GPIO_V2_LINE_DIRECTION_FLAGS | \
GPIO_V2_LINE_DRIVE_FLAGS | \
2020-09-28 03:27:56 +03:00
GPIO_V2_LINE_EDGE_FLAGS | \
2020-10-15 02:11:56 +03:00
GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
2020-09-28 03:27:54 +03:00
GPIO_V2_LINE_BIAS_FLAGS )
2020-09-28 03:27:56 +03:00
static void linereq_put_event ( struct linereq * lr ,
struct gpio_v2_line_event * le )
{
bool overflow = false ;
spin_lock ( & lr - > wait . lock ) ;
if ( kfifo_is_full ( & lr - > events ) ) {
overflow = true ;
kfifo_skip ( & lr - > events ) ;
}
kfifo_in ( & lr - > events , le , 1 ) ;
spin_unlock ( & lr - > wait . lock ) ;
if ( ! overflow )
wake_up_poll ( & lr - > wait , EPOLLIN ) ;
else
pr_debug_ratelimited ( " event FIFO is full - event dropped \n " ) ;
}
2020-10-15 02:11:56 +03:00
static u64 line_event_timestamp ( struct line * line )
{
if ( test_bit ( FLAG_EVENT_CLOCK_REALTIME , & line - > desc - > flags ) )
return ktime_get_real_ns ( ) ;
return ktime_get_ns ( ) ;
}
2020-09-28 03:27:56 +03:00
static irqreturn_t edge_irq_thread ( int irq , void * p )
{
struct line * line = p ;
struct linereq * lr = line - > req ;
struct gpio_v2_line_event le ;
2020-10-14 09:29:21 +03:00
u64 eflags ;
2020-09-28 03:27:56 +03:00
/* Do not leak kernel stack to userspace */
memset ( & le , 0 , sizeof ( le ) ) ;
if ( line - > timestamp_ns ) {
le . timestamp_ns = line - > timestamp_ns ;
} else {
/*
* We may be running from a nested threaded interrupt in
* which case we didn ' t get the timestamp from
* edge_irq_handler ( ) .
*/
2020-10-15 02:11:56 +03:00
le . timestamp_ns = line_event_timestamp ( line ) ;
2020-09-28 03:27:56 +03:00
if ( lr - > num_lines ! = 1 )
line - > req_seqno = atomic_inc_return ( & lr - > seqno ) ;
}
line - > timestamp_ns = 0 ;
2020-10-14 09:29:21 +03:00
eflags = READ_ONCE ( line - > eflags ) ;
2020-10-29 11:48:32 +03:00
if ( eflags = = GPIO_V2_LINE_FLAG_EDGE_BOTH ) {
2020-09-28 03:27:56 +03:00
int level = gpiod_get_value_cansleep ( line - > desc ) ;
if ( level )
/* Emit low-to-high event */
le . id = GPIO_V2_LINE_EVENT_RISING_EDGE ;
else
/* Emit high-to-low event */
le . id = GPIO_V2_LINE_EVENT_FALLING_EDGE ;
2020-10-14 09:29:21 +03:00
} else if ( eflags = = GPIO_V2_LINE_FLAG_EDGE_RISING ) {
2020-09-28 03:27:56 +03:00
/* Emit low-to-high event */
le . id = GPIO_V2_LINE_EVENT_RISING_EDGE ;
2020-10-14 09:29:21 +03:00
} else if ( eflags = = GPIO_V2_LINE_FLAG_EDGE_FALLING ) {
2020-09-28 03:27:56 +03:00
/* Emit high-to-low event */
le . id = GPIO_V2_LINE_EVENT_FALLING_EDGE ;
} else {
return IRQ_NONE ;
}
line - > line_seqno + + ;
le . line_seqno = line - > line_seqno ;
le . seqno = ( lr - > num_lines = = 1 ) ? le . line_seqno : line - > req_seqno ;
le . offset = gpio_chip_hwgpio ( line - > desc ) ;
linereq_put_event ( lr , & le ) ;
return IRQ_HANDLED ;
}
static irqreturn_t edge_irq_handler ( int irq , void * p )
{
struct line * line = p ;
struct linereq * lr = line - > req ;
/*
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event .
*/
2020-10-15 02:11:56 +03:00
line - > timestamp_ns = line_event_timestamp ( line ) ;
2020-09-28 03:27:56 +03:00
if ( lr - > num_lines ! = 1 )
line - > req_seqno = atomic_inc_return ( & lr - > seqno ) ;
return IRQ_WAKE_THREAD ;
}
2020-09-28 03:27:59 +03:00
/*
* returns the current debounced logical value .
*/
static bool debounced_value ( struct line * line )
{
bool value ;
/*
* minor race - debouncer may be stopped here , so edge_detector_stop ( )
* must leave the value unchanged so the following will read the level
* from when the debouncer was last running .
*/
value = READ_ONCE ( line - > level ) ;
if ( test_bit ( FLAG_ACTIVE_LOW , & line - > desc - > flags ) )
value = ! value ;
return value ;
}
static irqreturn_t debounce_irq_handler ( int irq , void * p )
{
struct line * line = p ;
mod_delayed_work ( system_wq , & line - > work ,
usecs_to_jiffies ( READ_ONCE ( line - > desc - > debounce_period_us ) ) ) ;
return IRQ_HANDLED ;
}
static void debounce_work_func ( struct work_struct * work )
{
struct gpio_v2_line_event le ;
struct line * line = container_of ( work , struct line , work . work ) ;
struct linereq * lr ;
int level ;
2020-10-14 09:29:21 +03:00
u64 eflags ;
2020-09-28 03:27:59 +03:00
level = gpiod_get_raw_value_cansleep ( line - > desc ) ;
if ( level < 0 ) {
pr_debug_ratelimited ( " debouncer failed to read line value \n " ) ;
return ;
}
if ( READ_ONCE ( line - > level ) = = level )
return ;
WRITE_ONCE ( line - > level , level ) ;
/* -- edge detection -- */
2020-10-14 09:29:21 +03:00
eflags = READ_ONCE ( line - > eflags ) ;
if ( ! eflags )
2020-09-28 03:27:59 +03:00
return ;
/* switch from physical level to logical - if they differ */
if ( test_bit ( FLAG_ACTIVE_LOW , & line - > desc - > flags ) )
level = ! level ;
/* ignore edges that are not being monitored */
2020-10-14 09:29:21 +03:00
if ( ( ( eflags = = GPIO_V2_LINE_FLAG_EDGE_RISING ) & & ! level ) | |
( ( eflags = = GPIO_V2_LINE_FLAG_EDGE_FALLING ) & & level ) )
2020-09-28 03:27:59 +03:00
return ;
/* Do not leak kernel stack to userspace */
memset ( & le , 0 , sizeof ( le ) ) ;
lr = line - > req ;
2020-10-15 02:11:56 +03:00
le . timestamp_ns = line_event_timestamp ( line ) ;
2020-09-28 03:27:59 +03:00
le . offset = gpio_chip_hwgpio ( line - > desc ) ;
line - > line_seqno + + ;
le . line_seqno = line - > line_seqno ;
le . seqno = ( lr - > num_lines = = 1 ) ?
le . line_seqno : atomic_inc_return ( & lr - > seqno ) ;
if ( level )
/* Emit low-to-high event */
le . id = GPIO_V2_LINE_EVENT_RISING_EDGE ;
else
/* Emit high-to-low event */
le . id = GPIO_V2_LINE_EVENT_FALLING_EDGE ;
linereq_put_event ( lr , & le ) ;
}
static int debounce_setup ( struct line * line ,
unsigned int debounce_period_us )
{
unsigned long irqflags ;
int ret , level , irq ;
/* try hardware */
ret = gpiod_set_debounce ( line - > desc , debounce_period_us ) ;
if ( ! ret ) {
WRITE_ONCE ( line - > desc - > debounce_period_us , debounce_period_us ) ;
return ret ;
}
if ( ret ! = - ENOTSUPP )
return ret ;
if ( debounce_period_us ) {
/* setup software debounce */
level = gpiod_get_raw_value_cansleep ( line - > desc ) ;
if ( level < 0 )
return level ;
irq = gpiod_to_irq ( line - > desc ) ;
if ( irq < 0 )
return - ENXIO ;
WRITE_ONCE ( line - > level , level ) ;
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING ;
ret = request_irq ( irq , debounce_irq_handler , irqflags ,
line - > req - > label , line ) ;
if ( ret )
return ret ;
WRITE_ONCE ( line - > sw_debounced , 1 ) ;
line - > irq = irq ;
}
return 0 ;
}
static bool gpio_v2_line_config_debounced ( struct gpio_v2_line_config * lc ,
unsigned int line_idx )
{
unsigned int i ;
u64 mask = BIT_ULL ( line_idx ) ;
for ( i = 0 ; i < lc - > num_attrs ; i + + ) {
if ( ( lc - > attrs [ i ] . attr . id = = GPIO_V2_LINE_ATTR_ID_DEBOUNCE ) & &
( lc - > attrs [ i ] . mask & mask ) )
return true ;
}
return false ;
}
static u32 gpio_v2_line_config_debounce_period ( struct gpio_v2_line_config * lc ,
unsigned int line_idx )
{
unsigned int i ;
u64 mask = BIT_ULL ( line_idx ) ;
for ( i = 0 ; i < lc - > num_attrs ; i + + ) {
if ( ( lc - > attrs [ i ] . attr . id = = GPIO_V2_LINE_ATTR_ID_DEBOUNCE ) & &
( lc - > attrs [ i ] . mask & mask ) )
return lc - > attrs [ i ] . attr . debounce_period_us ;
}
return 0 ;
}
2020-09-28 03:27:56 +03:00
static void edge_detector_stop ( struct line * line )
{
if ( line - > irq ) {
free_irq ( line - > irq , line ) ;
line - > irq = 0 ;
}
2020-09-28 03:27:57 +03:00
2020-09-28 03:27:59 +03:00
cancel_delayed_work_sync ( & line - > work ) ;
WRITE_ONCE ( line - > sw_debounced , 0 ) ;
2020-10-14 09:29:21 +03:00
WRITE_ONCE ( line - > eflags , 0 ) ;
2021-01-21 17:10:38 +03:00
if ( line - > desc )
WRITE_ONCE ( line - > desc - > debounce_period_us , 0 ) ;
2020-09-28 03:27:59 +03:00
/* do not change line->level - see comment in debounced_value() */
2020-09-28 03:27:56 +03:00
}
static int edge_detector_setup ( struct line * line ,
2020-09-28 03:27:59 +03:00
struct gpio_v2_line_config * lc ,
unsigned int line_idx ,
2020-09-28 03:27:56 +03:00
u64 eflags )
{
2020-09-28 03:27:59 +03:00
u32 debounce_period_us ;
2020-09-28 03:27:56 +03:00
unsigned long irqflags = 0 ;
int irq , ret ;
if ( eflags & & ! kfifo_initialized ( & line - > req - > events ) ) {
ret = kfifo_alloc ( & line - > req - > events ,
line - > req - > event_buffer_size , GFP_KERNEL ) ;
if ( ret )
return ret ;
}
2020-10-14 09:29:21 +03:00
WRITE_ONCE ( line - > eflags , eflags ) ;
2020-09-28 03:27:59 +03:00
if ( gpio_v2_line_config_debounced ( lc , line_idx ) ) {
debounce_period_us = gpio_v2_line_config_debounce_period ( lc , line_idx ) ;
ret = debounce_setup ( line , debounce_period_us ) ;
if ( ret )
return ret ;
WRITE_ONCE ( line - > desc - > debounce_period_us , debounce_period_us ) ;
}
2020-09-28 03:27:56 +03:00
2020-09-28 03:27:59 +03:00
/* detection disabled or sw debouncer will provide edge detection */
if ( ! eflags | | READ_ONCE ( line - > sw_debounced ) )
2020-09-28 03:27:56 +03:00
return 0 ;
irq = gpiod_to_irq ( line - > desc ) ;
if ( irq < 0 )
return - ENXIO ;
if ( eflags & GPIO_V2_LINE_FLAG_EDGE_RISING )
irqflags | = test_bit ( FLAG_ACTIVE_LOW , & line - > desc - > flags ) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING ;
if ( eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING )
irqflags | = test_bit ( FLAG_ACTIVE_LOW , & line - > desc - > flags ) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING ;
irqflags | = IRQF_ONESHOT ;
/* Request a thread to read the events */
ret = request_threaded_irq ( irq , edge_irq_handler , edge_irq_thread ,
irqflags , line - > req - > label , line ) ;
if ( ret )
return ret ;
line - > irq = irq ;
return 0 ;
}
2020-09-28 03:27:59 +03:00
static int edge_detector_update ( struct line * line ,
struct gpio_v2_line_config * lc ,
unsigned int line_idx ,
u64 eflags , bool polarity_change )
2020-09-28 03:27:57 +03:00
{
2020-09-28 03:27:59 +03:00
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period ( lc , line_idx ) ;
2020-10-14 09:29:21 +03:00
if ( ( READ_ONCE ( line - > eflags ) = = eflags ) & & ! polarity_change & &
2020-09-28 03:27:59 +03:00
( READ_ONCE ( line - > desc - > debounce_period_us ) = = debounce_period_us ) )
return 0 ;
/* sw debounced and still will be...*/
if ( debounce_period_us & & READ_ONCE ( line - > sw_debounced ) ) {
2020-10-14 09:29:21 +03:00
WRITE_ONCE ( line - > eflags , eflags ) ;
2020-09-28 03:27:59 +03:00
WRITE_ONCE ( line - > desc - > debounce_period_us , debounce_period_us ) ;
2020-09-28 03:27:57 +03:00
return 0 ;
2020-09-28 03:27:59 +03:00
}
2020-09-28 03:27:57 +03:00
2020-09-28 03:27:59 +03:00
/* reconfiguring edge detection or sw debounce being disabled */
if ( ( line - > irq & & ! READ_ONCE ( line - > sw_debounced ) ) | |
( ! debounce_period_us & & READ_ONCE ( line - > sw_debounced ) ) )
edge_detector_stop ( line ) ;
2020-09-28 03:27:57 +03:00
2020-09-28 03:27:59 +03:00
return edge_detector_setup ( line , lc , line_idx , eflags ) ;
2020-09-28 03:27:57 +03:00
}
2020-09-28 03:27:54 +03:00
static u64 gpio_v2_line_config_flags ( struct gpio_v2_line_config * lc ,
unsigned int line_idx )
{
unsigned int i ;
u64 mask = BIT_ULL ( line_idx ) ;
for ( i = 0 ; i < lc - > num_attrs ; i + + ) {
if ( ( lc - > attrs [ i ] . attr . id = = GPIO_V2_LINE_ATTR_ID_FLAGS ) & &
( lc - > attrs [ i ] . mask & mask ) )
return lc - > attrs [ i ] . attr . flags ;
}
return lc - > flags ;
}
static int gpio_v2_line_config_output_value ( struct gpio_v2_line_config * lc ,
unsigned int line_idx )
{
unsigned int i ;
u64 mask = BIT_ULL ( line_idx ) ;
for ( i = 0 ; i < lc - > num_attrs ; i + + ) {
if ( ( lc - > attrs [ i ] . attr . id = = GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES ) & &
( lc - > attrs [ i ] . mask & mask ) )
return ! ! ( lc - > attrs [ i ] . attr . values & mask ) ;
}
return 0 ;
}
static int gpio_v2_line_flags_validate ( u64 flags )
{
/* Return an error if an unknown flag is set */
if ( flags & ~ GPIO_V2_LINE_VALID_FLAGS )
return - EINVAL ;
/*
* Do not allow both INPUT and OUTPUT flags to be set as they are
* contradictory .
*/
if ( ( flags & GPIO_V2_LINE_FLAG_INPUT ) & &
( flags & GPIO_V2_LINE_FLAG_OUTPUT ) )
return - EINVAL ;
2020-09-28 03:27:56 +03:00
/* Edge detection requires explicit input. */
if ( ( flags & GPIO_V2_LINE_EDGE_FLAGS ) & &
! ( flags & GPIO_V2_LINE_FLAG_INPUT ) )
return - EINVAL ;
2020-09-28 03:27:54 +03:00
/*
* Do not allow OPEN_SOURCE and OPEN_DRAIN flags in a single
* request . If the hardware actually supports enabling both at the
* same time the electrical result would be disastrous .
*/
if ( ( flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN ) & &
( flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE ) )
return - EINVAL ;
/* Drive requires explicit output direction. */
if ( ( flags & GPIO_V2_LINE_DRIVE_FLAGS ) & &
! ( flags & GPIO_V2_LINE_FLAG_OUTPUT ) )
return - EINVAL ;
/* Bias requires explicit direction. */
if ( ( flags & GPIO_V2_LINE_BIAS_FLAGS ) & &
! ( flags & GPIO_V2_LINE_DIRECTION_FLAGS ) )
return - EINVAL ;
/* Only one bias flag can be set. */
if ( ( ( flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED ) & &
( flags & ( GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN |
GPIO_V2_LINE_FLAG_BIAS_PULL_UP ) ) ) | |
( ( flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN ) & &
( flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP ) ) )
return - EINVAL ;
return 0 ;
}
static int gpio_v2_line_config_validate ( struct gpio_v2_line_config * lc ,
unsigned int num_lines )
{
unsigned int i ;
u64 flags ;
int ret ;
if ( lc - > num_attrs > GPIO_V2_LINE_NUM_ATTRS_MAX )
return - EINVAL ;
if ( memchr_inv ( lc - > padding , 0 , sizeof ( lc - > padding ) ) )
return - EINVAL ;
for ( i = 0 ; i < num_lines ; i + + ) {
flags = gpio_v2_line_config_flags ( lc , i ) ;
ret = gpio_v2_line_flags_validate ( flags ) ;
if ( ret )
return ret ;
2020-09-28 03:27:59 +03:00
/* debounce requires explicit input */
if ( gpio_v2_line_config_debounced ( lc , i ) & &
! ( flags & GPIO_V2_LINE_FLAG_INPUT ) )
return - EINVAL ;
2020-09-28 03:27:54 +03:00
}
return 0 ;
}
static void gpio_v2_line_config_flags_to_desc_flags ( u64 flags ,
unsigned long * flagsp )
{
assign_bit ( FLAG_ACTIVE_LOW , flagsp ,
flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW ) ;
if ( flags & GPIO_V2_LINE_FLAG_OUTPUT )
set_bit ( FLAG_IS_OUT , flagsp ) ;
else if ( flags & GPIO_V2_LINE_FLAG_INPUT )
clear_bit ( FLAG_IS_OUT , flagsp ) ;
2020-09-28 03:27:56 +03:00
assign_bit ( FLAG_EDGE_RISING , flagsp ,
flags & GPIO_V2_LINE_FLAG_EDGE_RISING ) ;
assign_bit ( FLAG_EDGE_FALLING , flagsp ,
flags & GPIO_V2_LINE_FLAG_EDGE_FALLING ) ;
2020-09-28 03:27:54 +03:00
assign_bit ( FLAG_OPEN_DRAIN , flagsp ,
flags & GPIO_V2_LINE_FLAG_OPEN_DRAIN ) ;
assign_bit ( FLAG_OPEN_SOURCE , flagsp ,
flags & GPIO_V2_LINE_FLAG_OPEN_SOURCE ) ;
assign_bit ( FLAG_PULL_UP , flagsp ,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP ) ;
assign_bit ( FLAG_PULL_DOWN , flagsp ,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN ) ;
assign_bit ( FLAG_BIAS_DISABLE , flagsp ,
flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED ) ;
2020-10-15 02:11:56 +03:00
assign_bit ( FLAG_EVENT_CLOCK_REALTIME , flagsp ,
flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME ) ;
2020-09-28 03:27:54 +03:00
}
static long linereq_get_values ( struct linereq * lr , void __user * ip )
{
struct gpio_v2_line_values lv ;
DECLARE_BITMAP ( vals , GPIO_V2_LINES_MAX ) ;
struct gpio_desc * * descs ;
unsigned int i , didx , num_get ;
2020-09-28 03:27:59 +03:00
bool val ;
2020-09-28 03:27:54 +03:00
int ret ;
/* NOTE: It's ok to read values of output lines. */
if ( copy_from_user ( & lv , ip , sizeof ( lv ) ) )
return - EFAULT ;
for ( num_get = 0 , i = 0 ; i < lr - > num_lines ; i + + ) {
if ( lv . mask & BIT_ULL ( i ) ) {
num_get + + ;
descs = & lr - > lines [ i ] . desc ;
}
}
if ( num_get = = 0 )
return - EINVAL ;
if ( num_get ! = 1 ) {
descs = kmalloc_array ( num_get , sizeof ( * descs ) , GFP_KERNEL ) ;
if ( ! descs )
return - ENOMEM ;
for ( didx = 0 , i = 0 ; i < lr - > num_lines ; i + + ) {
if ( lv . mask & BIT_ULL ( i ) ) {
descs [ didx ] = lr - > lines [ i ] . desc ;
didx + + ;
}
}
}
ret = gpiod_get_array_value_complex ( false , true , num_get ,
descs , NULL , vals ) ;
if ( num_get ! = 1 )
kfree ( descs ) ;
if ( ret )
return ret ;
lv . bits = 0 ;
for ( didx = 0 , i = 0 ; i < lr - > num_lines ; i + + ) {
if ( lv . mask & BIT_ULL ( i ) ) {
2020-09-28 03:27:59 +03:00
if ( lr - > lines [ i ] . sw_debounced )
val = debounced_value ( & lr - > lines [ i ] ) ;
else
val = test_bit ( didx , vals ) ;
if ( val )
2020-09-28 03:27:54 +03:00
lv . bits | = BIT_ULL ( i ) ;
didx + + ;
}
}
if ( copy_to_user ( ip , & lv , sizeof ( lv ) ) )
return - EFAULT ;
return 0 ;
}
2020-09-28 03:27:58 +03:00
static long linereq_set_values_unlocked ( struct linereq * lr ,
struct gpio_v2_line_values * lv )
{
DECLARE_BITMAP ( vals , GPIO_V2_LINES_MAX ) ;
struct gpio_desc * * descs ;
unsigned int i , didx , num_set ;
int ret ;
bitmap_zero ( vals , GPIO_V2_LINES_MAX ) ;
for ( num_set = 0 , i = 0 ; i < lr - > num_lines ; i + + ) {
if ( lv - > mask & BIT_ULL ( i ) ) {
if ( ! test_bit ( FLAG_IS_OUT , & lr - > lines [ i ] . desc - > flags ) )
return - EPERM ;
if ( lv - > bits & BIT_ULL ( i ) )
__set_bit ( num_set , vals ) ;
num_set + + ;
descs = & lr - > lines [ i ] . desc ;
}
}
if ( num_set = = 0 )
return - EINVAL ;
if ( num_set ! = 1 ) {
/* build compacted desc array and values */
descs = kmalloc_array ( num_set , sizeof ( * descs ) , GFP_KERNEL ) ;
if ( ! descs )
return - ENOMEM ;
for ( didx = 0 , i = 0 ; i < lr - > num_lines ; i + + ) {
if ( lv - > mask & BIT_ULL ( i ) ) {
descs [ didx ] = lr - > lines [ i ] . desc ;
didx + + ;
}
}
}
ret = gpiod_set_array_value_complex ( false , true , num_set ,
descs , NULL , vals ) ;
if ( num_set ! = 1 )
kfree ( descs ) ;
return ret ;
}
static long linereq_set_values ( struct linereq * lr , void __user * ip )
{
struct gpio_v2_line_values lv ;
int ret ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:58 +03:00
if ( copy_from_user ( & lv , ip , sizeof ( lv ) ) )
2020-06-16 12:36:15 +03:00
return - EFAULT ;
2020-09-28 03:27:58 +03:00
mutex_lock ( & lr - > config_mutex ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:58 +03:00
ret = linereq_set_values_unlocked ( lr , & lv ) ;
mutex_unlock ( & lr - > config_mutex ) ;
return ret ;
}
2020-09-28 03:27:57 +03:00
static long linereq_set_config_unlocked ( struct linereq * lr ,
struct gpio_v2_line_config * lc )
{
struct gpio_desc * desc ;
unsigned int i ;
u64 flags ;
bool polarity_change ;
int ret ;
for ( i = 0 ; i < lr - > num_lines ; i + + ) {
desc = lr - > lines [ i ] . desc ;
flags = gpio_v2_line_config_flags ( lc , i ) ;
polarity_change =
( ! ! test_bit ( FLAG_ACTIVE_LOW , & desc - > flags ) ! =
( ( flags & GPIO_V2_LINE_FLAG_ACTIVE_LOW ) ! = 0 ) ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
gpio_v2_line_config_flags_to_desc_flags ( flags , & desc - > flags ) ;
2020-06-16 12:36:15 +03:00
/*
* Lines have to be requested explicitly for input
* or output , else the line will be treated " as is " .
*/
2020-09-28 03:27:57 +03:00
if ( flags & GPIO_V2_LINE_FLAG_OUTPUT ) {
int val = gpio_v2_line_config_output_value ( lc , i ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
edge_detector_stop ( & lr - > lines [ i ] ) ;
2020-06-16 12:36:15 +03:00
ret = gpiod_direction_output ( desc , val ) ;
if ( ret )
return ret ;
2020-09-28 03:27:57 +03:00
} else if ( flags & GPIO_V2_LINE_FLAG_INPUT ) {
2020-06-16 12:36:15 +03:00
ret = gpiod_direction_input ( desc ) ;
if ( ret )
return ret ;
2020-09-28 03:27:57 +03:00
2020-09-28 03:27:59 +03:00
ret = edge_detector_update ( & lr - > lines [ i ] , lc , i ,
2020-09-28 03:27:57 +03:00
flags & GPIO_V2_LINE_EDGE_FLAGS ,
polarity_change ) ;
if ( ret )
return ret ;
2020-06-16 12:36:15 +03:00
}
2020-07-08 07:15:51 +03:00
blocking_notifier_call_chain ( & desc - > gdev - > notifier ,
2020-09-28 03:27:57 +03:00
GPIO_V2_LINE_CHANGED_CONFIG ,
desc ) ;
2020-06-16 12:36:15 +03:00
}
return 0 ;
}
2020-09-28 03:27:57 +03:00
static long linereq_set_config ( struct linereq * lr , void __user * ip )
2020-06-16 12:36:15 +03:00
{
2020-09-28 03:27:57 +03:00
struct gpio_v2_line_config lc ;
int ret ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
if ( copy_from_user ( & lc , ip , sizeof ( lc ) ) )
return - EFAULT ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
ret = gpio_v2_line_config_validate ( & lc , lr - > num_lines ) ;
if ( ret )
return ret ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
mutex_lock ( & lr - > config_mutex ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
ret = linereq_set_config_unlocked ( lr , & lc ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
mutex_unlock ( & lr - > config_mutex ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:57 +03:00
return ret ;
}
2020-09-28 03:27:54 +03:00
static long linereq_ioctl ( struct file * file , unsigned int cmd ,
unsigned long arg )
{
struct linereq * lr = file - > private_data ;
void __user * ip = ( void __user * ) arg ;
if ( cmd = = GPIO_V2_LINE_GET_VALUES_IOCTL )
return linereq_get_values ( lr , ip ) ;
2020-09-28 03:27:58 +03:00
else if ( cmd = = GPIO_V2_LINE_SET_VALUES_IOCTL )
return linereq_set_values ( lr , ip ) ;
2020-09-28 03:27:57 +03:00
else if ( cmd = = GPIO_V2_LINE_SET_CONFIG_IOCTL )
return linereq_set_config ( lr , ip ) ;
2020-06-16 12:36:15 +03:00
return - EINVAL ;
}
# ifdef CONFIG_COMPAT
2020-09-28 03:27:54 +03:00
static long linereq_ioctl_compat ( struct file * file , unsigned int cmd ,
unsigned long arg )
2020-06-16 12:36:15 +03:00
{
2020-09-28 03:27:54 +03:00
return linereq_ioctl ( file , cmd , ( unsigned long ) compat_ptr ( arg ) ) ;
2020-06-16 12:36:15 +03:00
}
# endif
2020-09-28 03:27:56 +03:00
static __poll_t linereq_poll ( struct file * file ,
struct poll_table_struct * wait )
2020-06-16 12:36:15 +03:00
{
2020-09-28 03:27:56 +03:00
struct linereq * lr = file - > private_data ;
__poll_t events = 0 ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:56 +03:00
poll_wait ( file , & lr - > wait , wait ) ;
if ( ! kfifo_is_empty_spinlocked_noirqsave ( & lr - > events ,
& lr - > wait . lock ) )
events = EPOLLIN | EPOLLRDNORM ;
return events ;
2020-07-08 07:15:55 +03:00
}
2020-09-28 03:27:56 +03:00
static ssize_t linereq_read ( struct file * file ,
char __user * buf ,
size_t count ,
loff_t * f_ps )
2020-07-08 07:15:55 +03:00
{
2020-09-28 03:27:56 +03:00
struct linereq * lr = file - > private_data ;
struct gpio_v2_line_event le ;
ssize_t bytes_read = 0 ;
int ret ;
if ( count < sizeof ( le ) )
return - EINVAL ;
do {
spin_lock ( & lr - > wait . lock ) ;
if ( kfifo_is_empty ( & lr - > events ) ) {
if ( bytes_read ) {
spin_unlock ( & lr - > wait . lock ) ;
return bytes_read ;
}
if ( file - > f_flags & O_NONBLOCK ) {
spin_unlock ( & lr - > wait . lock ) ;
return - EAGAIN ;
}
ret = wait_event_interruptible_locked ( lr - > wait ,
! kfifo_is_empty ( & lr - > events ) ) ;
if ( ret ) {
spin_unlock ( & lr - > wait . lock ) ;
return ret ;
}
}
ret = kfifo_out ( & lr - > events , & le , 1 ) ;
spin_unlock ( & lr - > wait . lock ) ;
if ( ret ! = 1 ) {
/*
* This should never happen - we were holding the
* lock from the moment we learned the fifo is no
* longer empty until now .
*/
ret = - EIO ;
break ;
}
if ( copy_to_user ( buf + bytes_read , & le , sizeof ( le ) ) )
return - EFAULT ;
bytes_read + = sizeof ( le ) ;
} while ( count > = bytes_read + sizeof ( le ) ) ;
return bytes_read ;
}
2020-09-28 03:27:54 +03:00
static void linereq_free ( struct linereq * lr )
{
unsigned int i ;
for ( i = 0 ; i < lr - > num_lines ; i + + ) {
2020-09-28 03:27:56 +03:00
edge_detector_stop ( & lr - > lines [ i ] ) ;
2020-09-28 03:27:54 +03:00
if ( lr - > lines [ i ] . desc )
gpiod_free ( lr - > lines [ i ] . desc ) ;
}
2020-09-28 03:27:56 +03:00
kfifo_free ( & lr - > events ) ;
2020-09-28 03:27:54 +03:00
kfree ( lr - > label ) ;
put_device ( & lr - > gdev - > dev ) ;
kfree ( lr ) ;
}
static int linereq_release ( struct inode * inode , struct file * file )
{
struct linereq * lr = file - > private_data ;
linereq_free ( lr ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
}
2020-09-28 03:27:54 +03:00
static const struct file_operations line_fileops = {
. release = linereq_release ,
2020-09-28 03:27:56 +03:00
. read = linereq_read ,
. poll = linereq_poll ,
2020-06-16 12:36:15 +03:00
. owner = THIS_MODULE ,
. llseek = noop_llseek ,
2020-09-28 03:27:54 +03:00
. unlocked_ioctl = linereq_ioctl ,
2020-06-16 12:36:15 +03:00
# ifdef CONFIG_COMPAT
2020-09-28 03:27:54 +03:00
. compat_ioctl = linereq_ioctl_compat ,
2020-06-16 12:36:15 +03:00
# endif
} ;
2020-09-28 03:27:54 +03:00
static int linereq_create ( struct gpio_device * gdev , void __user * ip )
2020-06-16 12:36:15 +03:00
{
2020-09-28 03:27:54 +03:00
struct gpio_v2_line_request ulr ;
struct gpio_v2_line_config * lc ;
struct linereq * lr ;
2020-06-16 12:36:15 +03:00
struct file * file ;
2020-09-28 03:27:54 +03:00
u64 flags ;
unsigned int i ;
int fd , ret ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:54 +03:00
if ( copy_from_user ( & ulr , ip , sizeof ( ulr ) ) )
2020-06-16 12:36:15 +03:00
return - EFAULT ;
2020-09-28 03:27:54 +03:00
if ( ( ulr . num_lines = = 0 ) | | ( ulr . num_lines > GPIO_V2_LINES_MAX ) )
2020-06-16 12:36:15 +03:00
return - EINVAL ;
2020-09-28 03:27:54 +03:00
if ( memchr_inv ( ulr . padding , 0 , sizeof ( ulr . padding ) ) )
return - EINVAL ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:54 +03:00
lc = & ulr . config ;
ret = gpio_v2_line_config_validate ( lc , ulr . num_lines ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
return ret ;
2020-09-28 03:27:54 +03:00
lr = kzalloc ( struct_size ( lr , lines , ulr . num_lines ) , GFP_KERNEL ) ;
if ( ! lr )
2020-06-16 12:36:15 +03:00
return - ENOMEM ;
2020-09-28 03:27:54 +03:00
lr - > gdev = gdev ;
2020-06-16 12:36:15 +03:00
get_device ( & gdev - > dev ) ;
2020-09-28 03:27:59 +03:00
for ( i = 0 ; i < ulr . num_lines ; i + + ) {
2020-09-28 03:27:56 +03:00
lr - > lines [ i ] . req = lr ;
2020-09-28 03:27:59 +03:00
WRITE_ONCE ( lr - > lines [ i ] . sw_debounced , 0 ) ;
INIT_DELAYED_WORK ( & lr - > lines [ i ] . work , debounce_work_func ) ;
}
2020-09-28 03:27:56 +03:00
2020-10-05 10:02:46 +03:00
if ( ulr . consumer [ 0 ] ! = ' \0 ' ) {
2020-09-28 03:27:54 +03:00
/* label is only initialized if consumer is set */
2020-10-05 10:02:46 +03:00
lr - > label = kstrndup ( ulr . consumer , sizeof ( ulr . consumer ) - 1 ,
GFP_KERNEL ) ;
2020-09-28 03:27:54 +03:00
if ( ! lr - > label ) {
2020-06-16 12:36:15 +03:00
ret = - ENOMEM ;
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
2020-06-16 12:36:15 +03:00
}
}
2020-09-28 03:27:57 +03:00
mutex_init ( & lr - > config_mutex ) ;
2020-09-28 03:27:56 +03:00
init_waitqueue_head ( & lr - > wait ) ;
lr - > event_buffer_size = ulr . event_buffer_size ;
if ( lr - > event_buffer_size = = 0 )
lr - > event_buffer_size = ulr . num_lines * 16 ;
else if ( lr - > event_buffer_size > GPIO_V2_LINES_MAX * 16 )
lr - > event_buffer_size = GPIO_V2_LINES_MAX * 16 ;
atomic_set ( & lr - > seqno , 0 ) ;
2020-09-28 03:27:54 +03:00
lr - > num_lines = ulr . num_lines ;
2020-07-08 07:15:55 +03:00
2020-06-16 12:36:15 +03:00
/* Request each GPIO */
2020-09-28 03:27:54 +03:00
for ( i = 0 ; i < ulr . num_lines ; i + + ) {
u32 offset = ulr . offsets [ i ] ;
2020-06-16 12:36:15 +03:00
struct gpio_desc * desc = gpiochip_get_desc ( gdev - > chip , offset ) ;
if ( IS_ERR ( desc ) ) {
ret = PTR_ERR ( desc ) ;
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
2020-06-16 12:36:15 +03:00
}
2020-09-28 03:27:54 +03:00
ret = gpiod_request ( desc , lr - > label ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
lr - > lines [ i ] . desc = desc ;
flags = gpio_v2_line_config_flags ( lc , i ) ;
gpio_v2_line_config_flags_to_desc_flags ( flags , & desc - > flags ) ;
2020-06-16 12:36:15 +03:00
ret = gpiod_set_transitory ( desc , false ) ;
if ( ret < 0 )
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
2020-06-16 12:36:15 +03:00
/*
* Lines have to be requested explicitly for input
* or output , else the line will be treated " as is " .
*/
2020-09-28 03:27:54 +03:00
if ( flags & GPIO_V2_LINE_FLAG_OUTPUT ) {
int val = gpio_v2_line_config_output_value ( lc , i ) ;
2020-06-16 12:36:15 +03:00
ret = gpiod_direction_output ( desc , val ) ;
if ( ret )
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
} else if ( flags & GPIO_V2_LINE_FLAG_INPUT ) {
2020-06-16 12:36:15 +03:00
ret = gpiod_direction_input ( desc ) ;
if ( ret )
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
2020-09-28 03:27:56 +03:00
2020-09-28 03:27:59 +03:00
ret = edge_detector_setup ( & lr - > lines [ i ] , lc , i ,
2020-09-28 03:27:56 +03:00
flags & GPIO_V2_LINE_EDGE_FLAGS ) ;
if ( ret )
goto out_free_linereq ;
2020-06-16 12:36:15 +03:00
}
2020-07-08 07:15:51 +03:00
blocking_notifier_call_chain ( & desc - > gdev - > notifier ,
2020-09-28 03:27:55 +03:00
GPIO_V2_LINE_CHANGED_REQUESTED , desc ) ;
2020-06-16 12:36:15 +03:00
dev_dbg ( & gdev - > dev , " registered chardev handle for line %d \n " ,
offset ) ;
}
fd = get_unused_fd_flags ( O_RDONLY | O_CLOEXEC ) ;
if ( fd < 0 ) {
ret = fd ;
2020-09-28 03:27:54 +03:00
goto out_free_linereq ;
2020-06-16 12:36:15 +03:00
}
2020-09-28 03:27:54 +03:00
file = anon_inode_getfile ( " gpio-line " , & line_fileops , lr ,
2020-06-16 12:36:15 +03:00
O_RDONLY | O_CLOEXEC ) ;
if ( IS_ERR ( file ) ) {
ret = PTR_ERR ( file ) ;
goto out_put_unused_fd ;
}
2020-09-28 03:27:54 +03:00
ulr . fd = fd ;
if ( copy_to_user ( ip , & ulr , sizeof ( ulr ) ) ) {
2020-06-16 12:36:15 +03:00
/*
* fput ( ) will trigger the release ( ) callback , so do not go onto
* the regular error cleanup path here .
*/
fput ( file ) ;
put_unused_fd ( fd ) ;
return - EFAULT ;
}
fd_install ( fd , file ) ;
dev_dbg ( & gdev - > dev , " registered chardev handle for %d lines \n " ,
2020-09-28 03:27:54 +03:00
lr - > num_lines ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
out_put_unused_fd :
put_unused_fd ( fd ) ;
2020-09-28 03:27:54 +03:00
out_free_linereq :
linereq_free ( lr ) ;
2020-06-16 12:36:15 +03:00
return ret ;
}
2020-09-28 03:27:54 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
2020-06-16 12:36:15 +03:00
/*
* GPIO line event management
*/
/**
* struct lineevent_state - contains the state of a userspace event
* @ gdev : the GPIO device the event pertains to
* @ label : consumer label used to tag descriptors
* @ desc : the GPIO descriptor held by this event
* @ eflags : the event flags this line was requested with
* @ irq : the interrupt that trigger in response to events on this GPIO
* @ wait : wait queue that handles blocking reads of events
* @ events : KFIFO for the GPIO events
* @ timestamp : cache for the timestamp storing it between hardirq
* and IRQ thread , used to bring the timestamp close to the actual
* event
*/
struct lineevent_state {
struct gpio_device * gdev ;
const char * label ;
struct gpio_desc * desc ;
u32 eflags ;
int irq ;
wait_queue_head_t wait ;
DECLARE_KFIFO ( events , struct gpioevent_data , 16 ) ;
u64 timestamp ;
} ;
# define GPIOEVENT_REQUEST_VALID_FLAGS \
( GPIOEVENT_REQUEST_RISING_EDGE | \
GPIOEVENT_REQUEST_FALLING_EDGE )
2020-07-08 07:15:48 +03:00
static __poll_t lineevent_poll ( struct file * file ,
2020-07-08 07:15:46 +03:00
struct poll_table_struct * wait )
2020-06-16 12:36:15 +03:00
{
2020-07-08 07:15:48 +03:00
struct lineevent_state * le = file - > private_data ;
2020-06-16 12:36:15 +03:00
__poll_t events = 0 ;
2020-07-08 07:15:48 +03:00
poll_wait ( file , & le - > wait , wait ) ;
2020-06-16 12:36:15 +03:00
if ( ! kfifo_is_empty_spinlocked_noirqsave ( & le - > events , & le - > wait . lock ) )
events = EPOLLIN | EPOLLRDNORM ;
return events ;
}
2020-10-14 13:33:15 +03:00
struct compat_gpioeevent_data {
compat_u64 timestamp ;
u32 id ;
} ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:48 +03:00
static ssize_t lineevent_read ( struct file * file ,
2020-06-16 12:36:15 +03:00
char __user * buf ,
size_t count ,
loff_t * f_ps )
{
2020-07-08 07:15:48 +03:00
struct lineevent_state * le = file - > private_data ;
2020-06-16 12:36:15 +03:00
struct gpioevent_data ge ;
ssize_t bytes_read = 0 ;
2020-09-15 15:58:16 +03:00
ssize_t ge_size ;
2020-06-16 12:36:15 +03:00
int ret ;
2020-09-15 15:58:16 +03:00
/*
* When compatible system call is being used the struct gpioevent_data ,
* in case of at least ia32 , has different size due to the alignment
* differences . Because we have first member 64 bits followed by one of
* 32 bits there is no gap between them . The only difference is the
* padding at the end of the data structure . Hence , we calculate the
* actual sizeof ( ) and pass this as an argument to copy_to_user ( ) to
* drop unneeded bytes from the output .
*/
2020-10-14 13:33:15 +03:00
if ( compat_need_64bit_alignment_fixup ( ) )
ge_size = sizeof ( struct compat_gpioeevent_data ) ;
else
ge_size = sizeof ( struct gpioevent_data ) ;
2020-09-15 15:58:16 +03:00
if ( count < ge_size )
2020-06-16 12:36:15 +03:00
return - EINVAL ;
do {
spin_lock ( & le - > wait . lock ) ;
if ( kfifo_is_empty ( & le - > events ) ) {
if ( bytes_read ) {
spin_unlock ( & le - > wait . lock ) ;
return bytes_read ;
}
2020-07-08 07:15:48 +03:00
if ( file - > f_flags & O_NONBLOCK ) {
2020-06-16 12:36:15 +03:00
spin_unlock ( & le - > wait . lock ) ;
return - EAGAIN ;
}
ret = wait_event_interruptible_locked ( le - > wait ,
! kfifo_is_empty ( & le - > events ) ) ;
if ( ret ) {
spin_unlock ( & le - > wait . lock ) ;
return ret ;
}
}
ret = kfifo_out ( & le - > events , & ge , 1 ) ;
spin_unlock ( & le - > wait . lock ) ;
if ( ret ! = 1 ) {
/*
* This should never happen - we were holding the lock
* from the moment we learned the fifo is no longer
* empty until now .
*/
ret = - EIO ;
break ;
}
2020-09-15 15:58:16 +03:00
if ( copy_to_user ( buf + bytes_read , & ge , ge_size ) )
2020-06-16 12:36:15 +03:00
return - EFAULT ;
2020-09-15 15:58:16 +03:00
bytes_read + = ge_size ;
} while ( count > = bytes_read + ge_size ) ;
2020-06-16 12:36:15 +03:00
return bytes_read ;
}
2020-07-08 07:15:56 +03:00
static void lineevent_free ( struct lineevent_state * le )
2020-06-16 12:36:15 +03:00
{
2020-07-08 07:15:56 +03:00
if ( le - > irq )
free_irq ( le - > irq , le ) ;
if ( le - > desc )
gpiod_free ( le - > desc ) ;
2020-06-16 12:36:15 +03:00
kfree ( le - > label ) ;
2020-07-08 07:15:56 +03:00
put_device ( & le - > gdev - > dev ) ;
2020-06-16 12:36:15 +03:00
kfree ( le ) ;
2020-07-08 07:15:56 +03:00
}
static int lineevent_release ( struct inode * inode , struct file * file )
{
lineevent_free ( file - > private_data ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
}
2020-07-08 07:15:48 +03:00
static long lineevent_ioctl ( struct file * file , unsigned int cmd ,
2020-06-16 12:36:15 +03:00
unsigned long arg )
{
2020-07-08 07:15:48 +03:00
struct lineevent_state * le = file - > private_data ;
2020-06-16 12:36:15 +03:00
void __user * ip = ( void __user * ) arg ;
struct gpiohandle_data ghd ;
/*
* We can get the value for an event line but not set it ,
* because it is input by definition .
*/
if ( cmd = = GPIOHANDLE_GET_LINE_VALUES_IOCTL ) {
int val ;
memset ( & ghd , 0 , sizeof ( ghd ) ) ;
val = gpiod_get_value_cansleep ( le - > desc ) ;
if ( val < 0 )
return val ;
ghd . values [ 0 ] = val ;
if ( copy_to_user ( ip , & ghd , sizeof ( ghd ) ) )
return - EFAULT ;
return 0 ;
}
return - EINVAL ;
}
# ifdef CONFIG_COMPAT
2020-07-08 07:15:48 +03:00
static long lineevent_ioctl_compat ( struct file * file , unsigned int cmd ,
2020-06-16 12:36:15 +03:00
unsigned long arg )
{
2020-07-08 07:15:48 +03:00
return lineevent_ioctl ( file , cmd , ( unsigned long ) compat_ptr ( arg ) ) ;
2020-06-16 12:36:15 +03:00
}
# endif
static const struct file_operations lineevent_fileops = {
. release = lineevent_release ,
. read = lineevent_read ,
. poll = lineevent_poll ,
. owner = THIS_MODULE ,
. llseek = noop_llseek ,
. unlocked_ioctl = lineevent_ioctl ,
# ifdef CONFIG_COMPAT
. compat_ioctl = lineevent_ioctl_compat ,
# endif
} ;
static irqreturn_t lineevent_irq_thread ( int irq , void * p )
{
struct lineevent_state * le = p ;
struct gpioevent_data ge ;
int ret ;
/* Do not leak kernel stack to userspace */
memset ( & ge , 0 , sizeof ( ge ) ) ;
/*
* We may be running from a nested threaded interrupt in which case
* we didn ' t get the timestamp from lineevent_irq_handler ( ) .
*/
if ( ! le - > timestamp )
ge . timestamp = ktime_get_ns ( ) ;
else
ge . timestamp = le - > timestamp ;
if ( le - > eflags & GPIOEVENT_REQUEST_RISING_EDGE
& & le - > eflags & GPIOEVENT_REQUEST_FALLING_EDGE ) {
int level = gpiod_get_value_cansleep ( le - > desc ) ;
if ( level )
/* Emit low-to-high event */
ge . id = GPIOEVENT_EVENT_RISING_EDGE ;
else
/* Emit high-to-low event */
ge . id = GPIOEVENT_EVENT_FALLING_EDGE ;
} else if ( le - > eflags & GPIOEVENT_REQUEST_RISING_EDGE ) {
/* Emit low-to-high event */
ge . id = GPIOEVENT_EVENT_RISING_EDGE ;
} else if ( le - > eflags & GPIOEVENT_REQUEST_FALLING_EDGE ) {
/* Emit high-to-low event */
ge . id = GPIOEVENT_EVENT_FALLING_EDGE ;
} else {
return IRQ_NONE ;
}
ret = kfifo_in_spinlocked_noirqsave ( & le - > events , & ge ,
1 , & le - > wait . lock ) ;
if ( ret )
wake_up_poll ( & le - > wait , EPOLLIN ) ;
else
pr_debug_ratelimited ( " event FIFO is full - event dropped \n " ) ;
return IRQ_HANDLED ;
}
static irqreturn_t lineevent_irq_handler ( int irq , void * p )
{
struct lineevent_state * le = p ;
/*
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event .
*/
le - > timestamp = ktime_get_ns ( ) ;
return IRQ_WAKE_THREAD ;
}
static int lineevent_create ( struct gpio_device * gdev , void __user * ip )
{
struct gpioevent_request eventreq ;
struct lineevent_state * le ;
struct gpio_desc * desc ;
struct file * file ;
u32 offset ;
u32 lflags ;
u32 eflags ;
int fd ;
int ret ;
2020-07-08 07:15:56 +03:00
int irq , irqflags = 0 ;
2020-06-16 12:36:15 +03:00
if ( copy_from_user ( & eventreq , ip , sizeof ( eventreq ) ) )
return - EFAULT ;
offset = eventreq . lineoffset ;
lflags = eventreq . handleflags ;
eflags = eventreq . eventflags ;
desc = gpiochip_get_desc ( gdev - > chip , offset ) ;
if ( IS_ERR ( desc ) )
return PTR_ERR ( desc ) ;
/* Return an error if a unknown flag is set */
if ( ( lflags & ~ GPIOHANDLE_REQUEST_VALID_FLAGS ) | |
( eflags & ~ GPIOEVENT_REQUEST_VALID_FLAGS ) )
return - EINVAL ;
/* This is just wrong: we don't look for events on output lines */
if ( ( lflags & GPIOHANDLE_REQUEST_OUTPUT ) | |
( lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN ) | |
( lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE ) )
return - EINVAL ;
/* Only one bias flag can be set. */
if ( ( ( lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE ) & &
( lflags & ( GPIOHANDLE_REQUEST_BIAS_PULL_DOWN |
GPIOHANDLE_REQUEST_BIAS_PULL_UP ) ) ) | |
( ( lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN ) & &
( lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP ) ) )
return - EINVAL ;
le = kzalloc ( sizeof ( * le ) , GFP_KERNEL ) ;
if ( ! le )
return - ENOMEM ;
le - > gdev = gdev ;
get_device ( & gdev - > dev ) ;
2020-10-05 10:02:46 +03:00
if ( eventreq . consumer_label [ 0 ] ! = ' \0 ' ) {
/* label is only initialized if consumer_label is set */
le - > label = kstrndup ( eventreq . consumer_label ,
sizeof ( eventreq . consumer_label ) - 1 ,
GFP_KERNEL ) ;
2020-06-16 12:36:15 +03:00
if ( ! le - > label ) {
ret = - ENOMEM ;
goto out_free_le ;
}
}
ret = gpiod_request ( desc , le - > label ) ;
if ( ret )
2020-07-08 07:15:56 +03:00
goto out_free_le ;
2020-06-16 12:36:15 +03:00
le - > desc = desc ;
le - > eflags = eflags ;
2020-07-08 07:15:47 +03:00
linehandle_flags_to_desc_flags ( lflags , & desc - > flags ) ;
2020-06-16 12:36:15 +03:00
ret = gpiod_direction_input ( desc ) ;
if ( ret )
2020-07-08 07:15:56 +03:00
goto out_free_le ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:51 +03:00
blocking_notifier_call_chain ( & desc - > gdev - > notifier ,
2020-09-28 03:27:55 +03:00
GPIO_V2_LINE_CHANGED_REQUESTED , desc ) ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:56 +03:00
irq = gpiod_to_irq ( desc ) ;
if ( irq < = 0 ) {
2020-06-16 12:36:15 +03:00
ret = - ENODEV ;
2020-07-08 07:15:56 +03:00
goto out_free_le ;
2020-06-16 12:36:15 +03:00
}
2020-07-08 07:15:56 +03:00
le - > irq = irq ;
2020-06-16 12:36:15 +03:00
if ( eflags & GPIOEVENT_REQUEST_RISING_EDGE )
irqflags | = test_bit ( FLAG_ACTIVE_LOW , & desc - > flags ) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING ;
if ( eflags & GPIOEVENT_REQUEST_FALLING_EDGE )
irqflags | = test_bit ( FLAG_ACTIVE_LOW , & desc - > flags ) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING ;
irqflags | = IRQF_ONESHOT ;
INIT_KFIFO ( le - > events ) ;
init_waitqueue_head ( & le - > wait ) ;
/* Request a thread to read the events */
ret = request_threaded_irq ( le - > irq ,
2020-07-08 07:15:46 +03:00
lineevent_irq_handler ,
lineevent_irq_thread ,
irqflags ,
le - > label ,
le ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
2020-07-08 07:15:56 +03:00
goto out_free_le ;
2020-06-16 12:36:15 +03:00
fd = get_unused_fd_flags ( O_RDONLY | O_CLOEXEC ) ;
if ( fd < 0 ) {
ret = fd ;
2020-07-08 07:15:56 +03:00
goto out_free_le ;
2020-06-16 12:36:15 +03:00
}
file = anon_inode_getfile ( " gpio-event " ,
& lineevent_fileops ,
le ,
O_RDONLY | O_CLOEXEC ) ;
if ( IS_ERR ( file ) ) {
ret = PTR_ERR ( file ) ;
goto out_put_unused_fd ;
}
eventreq . fd = fd ;
if ( copy_to_user ( ip , & eventreq , sizeof ( eventreq ) ) ) {
/*
* fput ( ) will trigger the release ( ) callback , so do not go onto
* the regular error cleanup path here .
*/
fput ( file ) ;
put_unused_fd ( fd ) ;
return - EFAULT ;
}
fd_install ( fd , file ) ;
return 0 ;
out_put_unused_fd :
put_unused_fd ( fd ) ;
out_free_le :
2020-07-08 07:15:56 +03:00
lineevent_free ( le ) ;
2020-06-16 12:36:15 +03:00
return ret ;
}
2020-09-28 03:27:55 +03:00
static void gpio_v2_line_info_to_v1 ( struct gpio_v2_line_info * info_v2 ,
struct gpioline_info * info_v1 )
{
u64 flagsv2 = info_v2 - > flags ;
memcpy ( info_v1 - > name , info_v2 - > name , sizeof ( info_v1 - > name ) ) ;
memcpy ( info_v1 - > consumer , info_v2 - > consumer , sizeof ( info_v1 - > consumer ) ) ;
info_v1 - > line_offset = info_v2 - > offset ;
info_v1 - > flags = 0 ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_USED )
info_v1 - > flags | = GPIOLINE_FLAG_KERNEL ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_OUTPUT )
info_v1 - > flags | = GPIOLINE_FLAG_IS_OUT ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_ACTIVE_LOW )
info_v1 - > flags | = GPIOLINE_FLAG_ACTIVE_LOW ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_OPEN_DRAIN )
info_v1 - > flags | = GPIOLINE_FLAG_OPEN_DRAIN ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_OPEN_SOURCE )
info_v1 - > flags | = GPIOLINE_FLAG_OPEN_SOURCE ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_UP )
info_v1 - > flags | = GPIOLINE_FLAG_BIAS_PULL_UP ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN )
info_v1 - > flags | = GPIOLINE_FLAG_BIAS_PULL_DOWN ;
if ( flagsv2 & GPIO_V2_LINE_FLAG_BIAS_DISABLED )
info_v1 - > flags | = GPIOLINE_FLAG_BIAS_DISABLE ;
}
static void gpio_v2_line_info_changed_to_v1 (
struct gpio_v2_line_info_changed * lic_v2 ,
struct gpioline_info_changed * lic_v1 )
{
gpio_v2_line_info_to_v1 ( & lic_v2 - > info , & lic_v1 - > info ) ;
lic_v1 - > timestamp = lic_v2 - > timestamp_ns ;
lic_v1 - > event_type = lic_v2 - > event_type ;
}
2020-09-28 03:27:54 +03:00
# endif /* CONFIG_GPIO_CDEV_V1 */
2020-06-16 12:36:15 +03:00
static void gpio_desc_to_lineinfo ( struct gpio_desc * desc ,
2020-09-28 03:27:55 +03:00
struct gpio_v2_line_info * info )
2020-06-16 12:36:15 +03:00
{
struct gpio_chip * gc = desc - > gdev - > chip ;
bool ok_for_pinctrl ;
unsigned long flags ;
2020-09-28 03:27:59 +03:00
u32 debounce_period_us ;
unsigned int num_attrs = 0 ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:49 +03:00
memset ( info , 0 , sizeof ( * info ) ) ;
2020-09-28 03:27:55 +03:00
info - > offset = gpio_chip_hwgpio ( desc ) ;
2020-06-16 12:36:15 +03:00
/*
* This function takes a mutex so we must check this before taking
* the spinlock .
*
* FIXME : find a non - racy way to retrieve this information . Maybe a
* lock common to both frameworks ?
*/
ok_for_pinctrl =
2020-09-28 03:27:55 +03:00
pinctrl_gpio_can_use_line ( gc - > base + info - > offset ) ;
2020-06-16 12:36:15 +03:00
spin_lock_irqsave ( & gpio_lock , flags ) ;
2020-09-28 03:27:49 +03:00
if ( desc - > name )
strscpy ( info - > name , desc - > name , sizeof ( info - > name ) ) ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:49 +03:00
if ( desc - > label )
strscpy ( info - > consumer , desc - > label , sizeof ( info - > consumer ) ) ;
2020-06-16 12:36:15 +03:00
/*
* Userspace only need to know that the kernel is using this GPIO so
* it can ' t use it .
*/
info - > flags = 0 ;
if ( test_bit ( FLAG_REQUESTED , & desc - > flags ) | |
test_bit ( FLAG_IS_HOGGED , & desc - > flags ) | |
test_bit ( FLAG_USED_AS_IRQ , & desc - > flags ) | |
test_bit ( FLAG_EXPORT , & desc - > flags ) | |
test_bit ( FLAG_SYSFS , & desc - > flags ) | |
2020-12-04 19:47:36 +03:00
! gpiochip_line_is_valid ( gc , info - > offset ) | |
2020-06-16 12:36:15 +03:00
! ok_for_pinctrl )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_USED ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_IS_OUT , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_OUTPUT ;
else
info - > flags | = GPIO_V2_LINE_FLAG_INPUT ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_ACTIVE_LOW , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_ACTIVE_LOW ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_OPEN_DRAIN , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_OPEN_DRAIN ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_OPEN_SOURCE , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_OPEN_SOURCE ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_BIAS_DISABLE , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_BIAS_DISABLED ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_PULL_DOWN , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN ;
2020-06-16 12:36:15 +03:00
if ( test_bit ( FLAG_PULL_UP , & desc - > flags ) )
2020-09-28 03:27:55 +03:00
info - > flags | = GPIO_V2_LINE_FLAG_BIAS_PULL_UP ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:56 +03:00
if ( test_bit ( FLAG_EDGE_RISING , & desc - > flags ) )
info - > flags | = GPIO_V2_LINE_FLAG_EDGE_RISING ;
if ( test_bit ( FLAG_EDGE_FALLING , & desc - > flags ) )
info - > flags | = GPIO_V2_LINE_FLAG_EDGE_FALLING ;
2020-10-15 02:11:56 +03:00
if ( test_bit ( FLAG_EVENT_CLOCK_REALTIME , & desc - > flags ) )
info - > flags | = GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME ;
2020-09-28 03:27:59 +03:00
debounce_period_us = READ_ONCE ( desc - > debounce_period_us ) ;
if ( debounce_period_us ) {
info - > attrs [ num_attrs ] . id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE ;
info - > attrs [ num_attrs ] . debounce_period_us = debounce_period_us ;
num_attrs + + ;
}
info - > num_attrs = num_attrs ;
2020-06-16 12:36:15 +03:00
spin_unlock_irqrestore ( & gpio_lock , flags ) ;
}
struct gpio_chardev_data {
struct gpio_device * gdev ;
wait_queue_head_t wait ;
2020-09-28 03:27:55 +03:00
DECLARE_KFIFO ( events , struct gpio_v2_line_info_changed , 32 ) ;
2020-06-16 12:36:15 +03:00
struct notifier_block lineinfo_changed_nb ;
unsigned long * watched_lines ;
2020-09-28 03:27:55 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
atomic_t watch_abi_version ;
# endif
2020-06-16 12:36:15 +03:00
} ;
2020-12-27 19:10:40 +03:00
static int chipinfo_get ( struct gpio_chardev_data * cdev , void __user * ip )
{
struct gpio_device * gdev = cdev - > gdev ;
struct gpiochip_info chipinfo ;
memset ( & chipinfo , 0 , sizeof ( chipinfo ) ) ;
strscpy ( chipinfo . name , dev_name ( & gdev - > dev ) , sizeof ( chipinfo . name ) ) ;
strscpy ( chipinfo . label , gdev - > label , sizeof ( chipinfo . label ) ) ;
chipinfo . lines = gdev - > ngpio ;
if ( copy_to_user ( ip , & chipinfo , sizeof ( chipinfo ) ) )
return - EFAULT ;
return 0 ;
}
2020-09-28 03:27:55 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
/*
* returns 0 if the versions match , else the previously selected ABI version
*/
static int lineinfo_ensure_abi_version ( struct gpio_chardev_data * cdata ,
unsigned int version )
{
int abiv = atomic_cmpxchg ( & cdata - > watch_abi_version , 0 , version ) ;
if ( abiv = = version )
return 0 ;
return abiv ;
}
2020-12-27 19:10:40 +03:00
static int lineinfo_get_v1 ( struct gpio_chardev_data * cdev , void __user * ip ,
bool watch )
{
struct gpio_desc * desc ;
struct gpioline_info lineinfo ;
struct gpio_v2_line_info lineinfo_v2 ;
if ( copy_from_user ( & lineinfo , ip , sizeof ( lineinfo ) ) )
return - EFAULT ;
/* this doubles as a range check on line_offset */
desc = gpiochip_get_desc ( cdev - > gdev - > chip , lineinfo . line_offset ) ;
if ( IS_ERR ( desc ) )
return PTR_ERR ( desc ) ;
if ( watch ) {
if ( lineinfo_ensure_abi_version ( cdev , 1 ) )
return - EPERM ;
if ( test_and_set_bit ( lineinfo . line_offset , cdev - > watched_lines ) )
return - EBUSY ;
}
gpio_desc_to_lineinfo ( desc , & lineinfo_v2 ) ;
gpio_v2_line_info_to_v1 ( & lineinfo_v2 , & lineinfo ) ;
if ( copy_to_user ( ip , & lineinfo , sizeof ( lineinfo ) ) ) {
if ( watch )
clear_bit ( lineinfo . line_offset , cdev - > watched_lines ) ;
return - EFAULT ;
}
return 0 ;
}
2020-09-28 03:27:55 +03:00
# endif
static int lineinfo_get ( struct gpio_chardev_data * cdev , void __user * ip ,
bool watch )
{
struct gpio_desc * desc ;
struct gpio_v2_line_info lineinfo ;
if ( copy_from_user ( & lineinfo , ip , sizeof ( lineinfo ) ) )
return - EFAULT ;
if ( memchr_inv ( lineinfo . padding , 0 , sizeof ( lineinfo . padding ) ) )
return - EINVAL ;
desc = gpiochip_get_desc ( cdev - > gdev - > chip , lineinfo . offset ) ;
if ( IS_ERR ( desc ) )
return PTR_ERR ( desc ) ;
if ( watch ) {
# ifdef CONFIG_GPIO_CDEV_V1
if ( lineinfo_ensure_abi_version ( cdev , 2 ) )
return - EPERM ;
# endif
if ( test_and_set_bit ( lineinfo . offset , cdev - > watched_lines ) )
return - EBUSY ;
}
gpio_desc_to_lineinfo ( desc , & lineinfo ) ;
if ( copy_to_user ( ip , & lineinfo , sizeof ( lineinfo ) ) ) {
if ( watch )
clear_bit ( lineinfo . offset , cdev - > watched_lines ) ;
return - EFAULT ;
}
return 0 ;
}
2020-12-27 19:10:40 +03:00
static int lineinfo_unwatch ( struct gpio_chardev_data * cdev , void __user * ip )
{
__u32 offset ;
if ( copy_from_user ( & offset , ip , sizeof ( offset ) ) )
return - EFAULT ;
if ( offset > = cdev - > gdev - > ngpio )
return - EINVAL ;
if ( ! test_and_clear_bit ( offset , cdev - > watched_lines ) )
return - EBUSY ;
return 0 ;
}
2020-06-16 12:36:15 +03:00
/*
* gpio_ioctl ( ) - ioctl handler for the GPIO chardev
*/
2020-07-08 07:15:48 +03:00
static long gpio_ioctl ( struct file * file , unsigned int cmd , unsigned long arg )
2020-06-16 12:36:15 +03:00
{
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev = file - > private_data ;
struct gpio_device * gdev = cdev - > gdev ;
2020-06-16 12:36:15 +03:00
void __user * ip = ( void __user * ) arg ;
/* We fail any subsequent ioctl():s when the chip is gone */
2020-12-27 19:10:40 +03:00
if ( ! gdev - > chip )
2020-06-16 12:36:15 +03:00
return - ENODEV ;
/* Fill in the struct and pass to userspace */
if ( cmd = = GPIO_GET_CHIPINFO_IOCTL ) {
2020-12-27 19:10:40 +03:00
return chipinfo_get ( cdev , ip ) ;
2020-09-28 03:27:54 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
2020-06-16 12:36:15 +03:00
} else if ( cmd = = GPIO_GET_LINEHANDLE_IOCTL ) {
return linehandle_create ( gdev , ip ) ;
} else if ( cmd = = GPIO_GET_LINEEVENT_IOCTL ) {
return lineevent_create ( gdev , ip ) ;
2020-12-27 19:10:40 +03:00
} else if ( cmd = = GPIO_GET_LINEINFO_IOCTL | |
cmd = = GPIO_GET_LINEINFO_WATCH_IOCTL ) {
return lineinfo_get_v1 ( cdev , ip ,
cmd = = GPIO_GET_LINEINFO_WATCH_IOCTL ) ;
2020-09-28 03:27:54 +03:00
# endif /* CONFIG_GPIO_CDEV_V1 */
2020-09-28 03:27:55 +03:00
} else if ( cmd = = GPIO_V2_GET_LINEINFO_IOCTL | |
cmd = = GPIO_V2_GET_LINEINFO_WATCH_IOCTL ) {
return lineinfo_get ( cdev , ip ,
cmd = = GPIO_V2_GET_LINEINFO_WATCH_IOCTL ) ;
2020-09-28 03:27:54 +03:00
} else if ( cmd = = GPIO_V2_GET_LINE_IOCTL ) {
return linereq_create ( gdev , ip ) ;
2020-06-16 12:36:15 +03:00
} else if ( cmd = = GPIO_GET_LINEINFO_UNWATCH_IOCTL ) {
2020-12-27 19:10:40 +03:00
return lineinfo_unwatch ( cdev , ip ) ;
2020-06-16 12:36:15 +03:00
}
return - EINVAL ;
}
# ifdef CONFIG_COMPAT
2020-07-08 07:15:48 +03:00
static long gpio_ioctl_compat ( struct file * file , unsigned int cmd ,
2020-06-16 12:36:15 +03:00
unsigned long arg )
{
2020-07-08 07:15:48 +03:00
return gpio_ioctl ( file , cmd , ( unsigned long ) compat_ptr ( arg ) ) ;
2020-06-16 12:36:15 +03:00
}
# endif
static struct gpio_chardev_data *
to_gpio_chardev_data ( struct notifier_block * nb )
{
return container_of ( nb , struct gpio_chardev_data , lineinfo_changed_nb ) ;
}
static int lineinfo_changed_notify ( struct notifier_block * nb ,
unsigned long action , void * data )
{
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev = to_gpio_chardev_data ( nb ) ;
2020-09-28 03:27:55 +03:00
struct gpio_v2_line_info_changed chg ;
2020-06-16 12:36:15 +03:00
struct gpio_desc * desc = data ;
int ret ;
2020-07-08 07:15:52 +03:00
if ( ! test_bit ( gpio_chip_hwgpio ( desc ) , cdev - > watched_lines ) )
2020-06-16 12:36:15 +03:00
return NOTIFY_DONE ;
memset ( & chg , 0 , sizeof ( chg ) ) ;
chg . event_type = action ;
2020-09-28 03:27:55 +03:00
chg . timestamp_ns = ktime_get_ns ( ) ;
2020-06-16 12:36:15 +03:00
gpio_desc_to_lineinfo ( desc , & chg . info ) ;
2020-07-08 07:15:52 +03:00
ret = kfifo_in_spinlocked ( & cdev - > events , & chg , 1 , & cdev - > wait . lock ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
2020-07-08 07:15:52 +03:00
wake_up_poll ( & cdev - > wait , EPOLLIN ) ;
2020-06-16 12:36:15 +03:00
else
pr_debug_ratelimited ( " lineinfo event FIFO is full - event dropped \n " ) ;
return NOTIFY_OK ;
}
2020-07-08 07:15:48 +03:00
static __poll_t lineinfo_watch_poll ( struct file * file ,
2020-06-16 12:36:15 +03:00
struct poll_table_struct * pollt )
{
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev = file - > private_data ;
2020-06-16 12:36:15 +03:00
__poll_t events = 0 ;
2020-07-08 07:15:52 +03:00
poll_wait ( file , & cdev - > wait , pollt ) ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:52 +03:00
if ( ! kfifo_is_empty_spinlocked_noirqsave ( & cdev - > events ,
& cdev - > wait . lock ) )
2020-06-16 12:36:15 +03:00
events = EPOLLIN | EPOLLRDNORM ;
return events ;
}
2020-07-08 07:15:48 +03:00
static ssize_t lineinfo_watch_read ( struct file * file , char __user * buf ,
2020-06-16 12:36:15 +03:00
size_t count , loff_t * off )
{
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev = file - > private_data ;
2020-09-28 03:27:55 +03:00
struct gpio_v2_line_info_changed event ;
2020-06-16 12:36:15 +03:00
ssize_t bytes_read = 0 ;
int ret ;
2020-09-28 03:27:55 +03:00
size_t event_size ;
2020-06-16 12:36:15 +03:00
2020-09-28 03:27:55 +03:00
# ifndef CONFIG_GPIO_CDEV_V1
event_size = sizeof ( struct gpio_v2_line_info_changed ) ;
if ( count < event_size )
2020-06-16 12:36:15 +03:00
return - EINVAL ;
2020-09-28 03:27:55 +03:00
# endif
2020-06-16 12:36:15 +03:00
do {
2020-07-08 07:15:52 +03:00
spin_lock ( & cdev - > wait . lock ) ;
if ( kfifo_is_empty ( & cdev - > events ) ) {
2020-06-16 12:36:15 +03:00
if ( bytes_read ) {
2020-07-08 07:15:52 +03:00
spin_unlock ( & cdev - > wait . lock ) ;
2020-06-16 12:36:15 +03:00
return bytes_read ;
}
2020-07-08 07:15:48 +03:00
if ( file - > f_flags & O_NONBLOCK ) {
2020-07-08 07:15:52 +03:00
spin_unlock ( & cdev - > wait . lock ) ;
2020-06-16 12:36:15 +03:00
return - EAGAIN ;
}
2020-07-08 07:15:52 +03:00
ret = wait_event_interruptible_locked ( cdev - > wait ,
! kfifo_is_empty ( & cdev - > events ) ) ;
2020-06-16 12:36:15 +03:00
if ( ret ) {
2020-07-08 07:15:52 +03:00
spin_unlock ( & cdev - > wait . lock ) ;
2020-06-16 12:36:15 +03:00
return ret ;
}
}
2020-09-28 03:27:55 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
/* must be after kfifo check so watch_abi_version is set */
if ( atomic_read ( & cdev - > watch_abi_version ) = = 2 )
event_size = sizeof ( struct gpio_v2_line_info_changed ) ;
else
event_size = sizeof ( struct gpioline_info_changed ) ;
if ( count < event_size ) {
spin_unlock ( & cdev - > wait . lock ) ;
return - EINVAL ;
}
# endif
2020-07-08 07:15:52 +03:00
ret = kfifo_out ( & cdev - > events , & event , 1 ) ;
spin_unlock ( & cdev - > wait . lock ) ;
2020-06-16 12:36:15 +03:00
if ( ret ! = 1 ) {
ret = - EIO ;
break ;
/* We should never get here. See lineevent_read(). */
}
2020-09-28 03:27:55 +03:00
# ifdef CONFIG_GPIO_CDEV_V1
if ( event_size = = sizeof ( struct gpio_v2_line_info_changed ) ) {
if ( copy_to_user ( buf + bytes_read , & event , event_size ) )
return - EFAULT ;
} else {
struct gpioline_info_changed event_v1 ;
gpio_v2_line_info_changed_to_v1 ( & event , & event_v1 ) ;
if ( copy_to_user ( buf + bytes_read , & event_v1 ,
event_size ) )
return - EFAULT ;
}
# else
if ( copy_to_user ( buf + bytes_read , & event , event_size ) )
2020-06-16 12:36:15 +03:00
return - EFAULT ;
2020-09-28 03:27:55 +03:00
# endif
bytes_read + = event_size ;
2020-06-16 12:36:15 +03:00
} while ( count > = bytes_read + sizeof ( event ) ) ;
return bytes_read ;
}
/**
* gpio_chrdev_open ( ) - open the chardev for ioctl operations
* @ inode : inode for this chardev
2020-07-08 07:15:48 +03:00
* @ file : file struct for storing private data
2020-06-16 12:36:15 +03:00
* Returns 0 on success
*/
2020-07-08 07:15:48 +03:00
static int gpio_chrdev_open ( struct inode * inode , struct file * file )
2020-06-16 12:36:15 +03:00
{
struct gpio_device * gdev = container_of ( inode - > i_cdev ,
2020-07-08 07:15:46 +03:00
struct gpio_device , chrdev ) ;
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev ;
2020-06-16 12:36:15 +03:00
int ret = - ENOMEM ;
/* Fail on open if the backing gpiochip is gone */
if ( ! gdev - > chip )
return - ENODEV ;
2020-07-08 07:15:52 +03:00
cdev = kzalloc ( sizeof ( * cdev ) , GFP_KERNEL ) ;
if ( ! cdev )
2020-06-16 12:36:15 +03:00
return - ENOMEM ;
2020-07-08 07:15:52 +03:00
cdev - > watched_lines = bitmap_zalloc ( gdev - > chip - > ngpio , GFP_KERNEL ) ;
if ( ! cdev - > watched_lines )
goto out_free_cdev ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:52 +03:00
init_waitqueue_head ( & cdev - > wait ) ;
INIT_KFIFO ( cdev - > events ) ;
cdev - > gdev = gdev ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:52 +03:00
cdev - > lineinfo_changed_nb . notifier_call = lineinfo_changed_notify ;
2020-07-08 07:15:51 +03:00
ret = blocking_notifier_chain_register ( & gdev - > notifier ,
2020-07-08 07:15:52 +03:00
& cdev - > lineinfo_changed_nb ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
goto out_free_bitmap ;
get_device ( & gdev - > dev ) ;
2020-07-08 07:15:52 +03:00
file - > private_data = cdev ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:48 +03:00
ret = nonseekable_open ( inode , file ) ;
2020-06-16 12:36:15 +03:00
if ( ret )
goto out_unregister_notifier ;
return ret ;
out_unregister_notifier :
2020-07-08 07:15:51 +03:00
blocking_notifier_chain_unregister ( & gdev - > notifier ,
2020-07-08 07:15:52 +03:00
& cdev - > lineinfo_changed_nb ) ;
2020-06-16 12:36:15 +03:00
out_free_bitmap :
2020-07-08 07:15:52 +03:00
bitmap_free ( cdev - > watched_lines ) ;
out_free_cdev :
kfree ( cdev ) ;
2020-06-16 12:36:15 +03:00
return ret ;
}
/**
* gpio_chrdev_release ( ) - close chardev after ioctl operations
* @ inode : inode for this chardev
2020-07-08 07:15:48 +03:00
* @ file : file struct for storing private data
2020-06-16 12:36:15 +03:00
* Returns 0 on success
*/
2020-07-08 07:15:48 +03:00
static int gpio_chrdev_release ( struct inode * inode , struct file * file )
2020-06-16 12:36:15 +03:00
{
2020-07-08 07:15:52 +03:00
struct gpio_chardev_data * cdev = file - > private_data ;
struct gpio_device * gdev = cdev - > gdev ;
2020-06-16 12:36:15 +03:00
2020-07-08 07:15:52 +03:00
bitmap_free ( cdev - > watched_lines ) ;
2020-07-08 07:15:51 +03:00
blocking_notifier_chain_unregister ( & gdev - > notifier ,
2020-07-08 07:15:52 +03:00
& cdev - > lineinfo_changed_nb ) ;
2020-06-16 12:36:15 +03:00
put_device ( & gdev - > dev ) ;
2020-07-08 07:15:52 +03:00
kfree ( cdev ) ;
2020-06-16 12:36:15 +03:00
return 0 ;
}
static const struct file_operations gpio_fileops = {
. release = gpio_chrdev_release ,
. open = gpio_chrdev_open ,
. poll = lineinfo_watch_poll ,
. read = lineinfo_watch_read ,
. owner = THIS_MODULE ,
. llseek = no_llseek ,
. unlocked_ioctl = gpio_ioctl ,
# ifdef CONFIG_COMPAT
. compat_ioctl = gpio_ioctl_compat ,
# endif
} ;
int gpiolib_cdev_register ( struct gpio_device * gdev , dev_t devt )
{
int ret ;
cdev_init ( & gdev - > chrdev , & gpio_fileops ) ;
gdev - > chrdev . owner = THIS_MODULE ;
gdev - > dev . devt = MKDEV ( MAJOR ( devt ) , gdev - > id ) ;
ret = cdev_device_add ( & gdev - > chrdev , & gdev - > dev ) ;
if ( ret )
return ret ;
chip_dbg ( gdev - > chip , " added GPIO chardev (%d:%d) \n " ,
MAJOR ( devt ) , gdev - > id ) ;
return 0 ;
}
void gpiolib_cdev_unregister ( struct gpio_device * gdev )
{
cdev_device_del ( & gdev - > chrdev , & gdev - > dev ) ;
}