2019-04-25 20:06:18 +02:00
// SPDX-License-Identifier: GPL-2.0
//
// regmap based irq_chip
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
2011-10-28 23:50:49 +02:00
2012-01-22 11:23:42 -05:00
# include <linux/device.h>
2014-05-19 15:13:45 +08:00
# include <linux/export.h>
2011-10-28 23:50:49 +02:00
# include <linux/interrupt.h>
2014-05-19 15:13:45 +08:00
# include <linux/irq.h>
2012-05-13 10:59:56 +01:00
# include <linux/irqdomain.h>
2012-07-24 15:41:19 +01:00
# include <linux/pm_runtime.h>
2014-05-19 15:13:45 +08:00
# include <linux/regmap.h>
2011-10-28 23:50:49 +02:00
# include <linux/slab.h>
# include "internal.h"
struct regmap_irq_chip_data {
struct mutex lock ;
2012-08-01 11:40:47 -06:00
struct irq_chip irq_chip ;
2011-10-28 23:50:49 +02:00
struct regmap * map ;
2012-05-31 21:01:46 +01:00
const struct regmap_irq_chip * chip ;
2011-10-28 23:50:49 +02:00
int irq_base ;
2012-05-13 10:59:56 +01:00
struct irq_domain * domain ;
2011-10-28 23:50:49 +02:00
2012-06-05 14:34:03 +01:00
int irq ;
int wake_count ;
2013-01-03 14:27:15 +00:00
void * status_reg_buf ;
2019-01-22 11:42:24 +02:00
unsigned int * main_status_buf ;
2011-10-28 23:50:49 +02:00
unsigned int * status_buf ;
unsigned int * mask_buf ;
unsigned int * mask_buf_def ;
2012-06-05 14:34:03 +01:00
unsigned int * wake_buf ;
2015-12-22 18:25:26 +05:30
unsigned int * type_buf ;
unsigned int * type_buf_def ;
2012-05-14 22:40:43 +09:00
unsigned int irq_reg_stride ;
2015-12-22 18:25:26 +05:30
unsigned int type_reg_stride ;
2018-12-19 12:18:05 +01:00
bool clear_status : 1 ;
2011-10-28 23:50:49 +02:00
} ;
static inline const
struct regmap_irq * irq_to_regmap_irq ( struct regmap_irq_chip_data * data ,
int irq )
{
2012-05-13 10:59:56 +01:00
return & data - > chip - > irqs [ irq ] ;
2011-10-28 23:50:49 +02:00
}
static void regmap_irq_lock ( struct irq_data * data )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
mutex_lock ( & d - > lock ) ;
}
2017-06-23 14:35:09 +02:00
static int regmap_irq_update_bits ( struct regmap_irq_chip_data * d ,
unsigned int reg , unsigned int mask ,
unsigned int val )
{
if ( d - > chip - > mask_writeonly )
return regmap_write_bits ( d - > map , reg , mask , val ) ;
else
return regmap_update_bits ( d - > map , reg , mask , val ) ;
}
2011-10-28 23:50:49 +02:00
static void regmap_irq_sync_unlock ( struct irq_data * data )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
2012-04-10 23:37:22 -06:00
struct regmap * map = d - > map ;
2011-10-28 23:50:49 +02:00
int i , ret ;
2012-07-27 13:01:54 -06:00
u32 reg ;
2015-09-17 05:23:20 +00:00
u32 unmask_offset ;
2018-12-19 12:18:05 +01:00
u32 val ;
2011-10-28 23:50:49 +02:00
2012-07-24 15:41:19 +01:00
if ( d - > chip - > runtime_pm ) {
ret = pm_runtime_get_sync ( map - > dev ) ;
if ( ret < 0 )
dev_err ( map - > dev , " IRQ sync failed to resume: %d \n " ,
ret ) ;
}
2018-12-19 12:18:05 +01:00
if ( d - > clear_status ) {
for ( i = 0 ; i < d - > chip - > num_regs ; i + + ) {
reg = d - > chip - > status_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
ret = regmap_read ( map , reg , & val ) ;
if ( ret )
dev_err ( d - > map - > dev ,
" Failed to clear the interrupt status bits \n " ) ;
}
d - > clear_status = false ;
}
2011-10-28 23:50:49 +02:00
/*
* If there ' s been a change in the mask write it back to the
* hardware . We rely on the use of the regmap core cache to
* suppress pointless writes .
*/
for ( i = 0 ; i < d - > chip - > num_regs ; i + + ) {
2019-01-14 17:32:58 +08:00
if ( ! d - > chip - > mask_base )
continue ;
2012-07-27 13:01:54 -06:00
reg = d - > chip - > mask_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
2015-09-17 05:23:20 +00:00
if ( d - > chip - > mask_invert ) {
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2012-08-30 17:03:35 +08:00
d - > mask_buf_def [ i ] , ~ d - > mask_buf [ i ] ) ;
2015-09-17 05:23:20 +00:00
} else if ( d - > chip - > unmask_base ) {
/* set mask with mask_base register */
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2015-09-17 05:23:20 +00:00
d - > mask_buf_def [ i ] , ~ d - > mask_buf [ i ] ) ;
if ( ret < 0 )
dev_err ( d - > map - > dev ,
" Failed to sync unmasks in %x \n " ,
reg ) ;
unmask_offset = d - > chip - > unmask_base -
d - > chip - > mask_base ;
/* clear mask with unmask_base register */
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d ,
2015-09-17 05:23:20 +00:00
reg + unmask_offset ,
d - > mask_buf_def [ i ] ,
d - > mask_buf [ i ] ) ;
} else {
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2011-10-28 23:50:49 +02:00
d - > mask_buf_def [ i ] , d - > mask_buf [ i ] ) ;
2015-09-17 05:23:20 +00:00
}
2011-10-28 23:50:49 +02:00
if ( ret ! = 0 )
dev_err ( d - > map - > dev , " Failed to sync masks in %x \n " ,
2012-07-27 13:01:54 -06:00
reg ) ;
2013-01-04 16:32:54 +00:00
reg = d - > chip - > wake_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
if ( d - > wake_buf ) {
2013-01-04 16:35:07 +00:00
if ( d - > chip - > wake_invert )
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2013-01-04 16:35:07 +00:00
d - > mask_buf_def [ i ] ,
~ d - > wake_buf [ i ] ) ;
else
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2013-01-04 16:35:07 +00:00
d - > mask_buf_def [ i ] ,
d - > wake_buf [ i ] ) ;
2013-01-04 16:32:54 +00:00
if ( ret ! = 0 )
dev_err ( d - > map - > dev ,
" Failed to sync wakes in %x: %d \n " ,
reg , ret ) ;
}
2013-10-22 18:44:32 +08:00
if ( ! d - > chip - > init_ack_masked )
continue ;
/*
2015-05-27 00:55:13 +05:30
* Ack all the masked interrupts unconditionally ,
2013-10-22 18:44:32 +08:00
* OR if there is masked interrupt which hasn ' t been Acked ,
* it ' ll be ignored in irq handler , then may introduce irq storm
*/
2013-12-15 13:36:51 +04:00
if ( d - > mask_buf [ i ] & & ( d - > chip - > ack_base | | d - > chip - > use_ack ) ) {
2013-10-22 18:44:32 +08:00
reg = d - > chip - > ack_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
2015-09-17 05:23:21 +00:00
/* some chips ack by write 0 */
if ( d - > chip - > ack_invert )
ret = regmap_write ( map , reg , ~ d - > mask_buf [ i ] ) ;
else
ret = regmap_write ( map , reg , d - > mask_buf [ i ] ) ;
2013-10-22 18:44:32 +08:00
if ( ret ! = 0 )
dev_err ( d - > map - > dev , " Failed to ack 0x%x: %d \n " ,
reg , ret ) ;
}
2011-10-28 23:50:49 +02:00
}
2018-12-07 14:04:52 +01:00
/* Don't update the type bits if we're using mask bits for irq type. */
if ( ! d - > chip - > type_in_mask ) {
for ( i = 0 ; i < d - > chip - > num_type_reg ; i + + ) {
if ( ! d - > type_buf_def [ i ] )
continue ;
reg = d - > chip - > type_base +
( i * map - > reg_stride * d - > type_reg_stride ) ;
if ( d - > chip - > type_invert )
ret = regmap_irq_update_bits ( d , reg ,
d - > type_buf_def [ i ] , ~ d - > type_buf [ i ] ) ;
else
ret = regmap_irq_update_bits ( d , reg ,
d - > type_buf_def [ i ] , d - > type_buf [ i ] ) ;
if ( ret ! = 0 )
dev_err ( d - > map - > dev , " Failed to sync type in %x \n " ,
reg ) ;
}
2015-12-22 18:25:26 +05:30
}
2012-07-24 15:41:19 +01:00
if ( d - > chip - > runtime_pm )
pm_runtime_put ( map - > dev ) ;
2012-06-05 14:34:03 +01:00
/* If we've changed our wakeup count propagate it to the parent */
if ( d - > wake_count < 0 )
for ( i = d - > wake_count ; i < 0 ; i + + )
irq_set_irq_wake ( d - > irq , 0 ) ;
else if ( d - > wake_count > 0 )
for ( i = 0 ; i < d - > wake_count ; i + + )
irq_set_irq_wake ( d - > irq , 1 ) ;
d - > wake_count = 0 ;
2011-10-28 23:50:49 +02:00
mutex_unlock ( & d - > lock ) ;
}
static void regmap_irq_enable ( struct irq_data * data )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
2012-04-10 23:37:22 -06:00
struct regmap * map = d - > map ;
2012-05-13 10:59:56 +01:00
const struct regmap_irq * irq_data = irq_to_regmap_irq ( d , data - > hwirq ) ;
2018-12-07 14:04:52 +01:00
unsigned int mask , type ;
2018-12-18 13:59:31 +02:00
type = irq_data - > type . type_falling_val | irq_data - > type . type_rising_val ;
2018-12-07 14:04:52 +01:00
/*
* The type_in_mask flag means that the underlying hardware uses
* separate mask bits for rising and falling edge interrupts , but
* we want to make them into a single virtual interrupt with
* configurable edge .
*
* If the interrupt we ' re enabling defines the falling or rising
* masks then instead of using the regular mask bits for this
* interrupt , use the value previously written to the type buffer
* at the corresponding offset in regmap_irq_set_type ( ) .
*/
if ( d - > chip - > type_in_mask & & type )
mask = d - > type_buf [ irq_data - > reg_offset / map - > reg_stride ] ;
else
mask = irq_data - > mask ;
2011-10-28 23:50:49 +02:00
2018-12-19 12:18:05 +01:00
if ( d - > chip - > clear_on_unmask )
d - > clear_status = true ;
2018-12-07 14:04:52 +01:00
d - > mask_buf [ irq_data - > reg_offset / map - > reg_stride ] & = ~ mask ;
2011-10-28 23:50:49 +02:00
}
static void regmap_irq_disable ( struct irq_data * data )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
2012-04-10 23:37:22 -06:00
struct regmap * map = d - > map ;
2012-05-13 10:59:56 +01:00
const struct regmap_irq * irq_data = irq_to_regmap_irq ( d , data - > hwirq ) ;
2011-10-28 23:50:49 +02:00
2012-04-09 13:40:24 -06:00
d - > mask_buf [ irq_data - > reg_offset / map - > reg_stride ] | = irq_data - > mask ;
2011-10-28 23:50:49 +02:00
}
2015-12-22 18:25:26 +05:30
static int regmap_irq_set_type ( struct irq_data * data , unsigned int type )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
struct regmap * map = d - > map ;
const struct regmap_irq * irq_data = irq_to_regmap_irq ( d , data - > hwirq ) ;
2018-12-18 13:59:31 +02:00
int reg ;
const struct regmap_irq_type * t = & irq_data - > type ;
2015-12-22 18:25:26 +05:30
2018-12-18 13:59:31 +02:00
if ( ( t - > types_supported & type ) ! = type )
2018-12-27 10:44:43 +02:00
return 0 ;
2015-12-22 18:25:26 +05:30
2018-12-18 13:59:31 +02:00
reg = t - > type_reg_offset / map - > reg_stride ;
if ( t - > type_reg_mask )
d - > type_buf [ reg ] & = ~ t - > type_reg_mask ;
else
d - > type_buf [ reg ] & = ~ ( t - > type_falling_val |
t - > type_rising_val |
t - > type_level_low_val |
t - > type_level_high_val ) ;
2015-12-22 18:25:26 +05:30
switch ( type ) {
case IRQ_TYPE_EDGE_FALLING :
2018-12-18 13:59:31 +02:00
d - > type_buf [ reg ] | = t - > type_falling_val ;
2015-12-22 18:25:26 +05:30
break ;
case IRQ_TYPE_EDGE_RISING :
2018-12-18 13:59:31 +02:00
d - > type_buf [ reg ] | = t - > type_rising_val ;
2015-12-22 18:25:26 +05:30
break ;
case IRQ_TYPE_EDGE_BOTH :
2018-12-18 13:59:31 +02:00
d - > type_buf [ reg ] | = ( t - > type_falling_val |
t - > type_rising_val ) ;
2015-12-22 18:25:26 +05:30
break ;
2018-12-18 13:59:31 +02:00
case IRQ_TYPE_LEVEL_HIGH :
d - > type_buf [ reg ] | = t - > type_level_high_val ;
break ;
case IRQ_TYPE_LEVEL_LOW :
d - > type_buf [ reg ] | = t - > type_level_low_val ;
break ;
2015-12-22 18:25:26 +05:30
default :
return - EINVAL ;
}
return 0 ;
}
2012-06-05 14:34:03 +01:00
static int regmap_irq_set_wake ( struct irq_data * data , unsigned int on )
{
struct regmap_irq_chip_data * d = irq_data_get_irq_chip_data ( data ) ;
struct regmap * map = d - > map ;
const struct regmap_irq * irq_data = irq_to_regmap_irq ( d , data - > hwirq ) ;
if ( on ) {
2012-12-19 19:42:28 +05:30
if ( d - > wake_buf )
d - > wake_buf [ irq_data - > reg_offset / map - > reg_stride ]
& = ~ irq_data - > mask ;
2012-06-05 14:34:03 +01:00
d - > wake_count + + ;
} else {
2012-12-19 19:42:28 +05:30
if ( d - > wake_buf )
d - > wake_buf [ irq_data - > reg_offset / map - > reg_stride ]
| = irq_data - > mask ;
2012-06-05 14:34:03 +01:00
d - > wake_count - - ;
}
return 0 ;
}
2012-08-01 11:40:47 -06:00
static const struct irq_chip regmap_irq_chip = {
2011-10-28 23:50:49 +02:00
. irq_bus_lock = regmap_irq_lock ,
. irq_bus_sync_unlock = regmap_irq_sync_unlock ,
. irq_disable = regmap_irq_disable ,
. irq_enable = regmap_irq_enable ,
2015-12-22 18:25:26 +05:30
. irq_set_type = regmap_irq_set_type ,
2012-06-05 14:34:03 +01:00
. irq_set_wake = regmap_irq_set_wake ,
2011-10-28 23:50:49 +02:00
} ;
2019-01-22 11:42:24 +02:00
static inline int read_sub_irq_data ( struct regmap_irq_chip_data * data ,
unsigned int b )
{
const struct regmap_irq_chip * chip = data - > chip ;
struct regmap * map = data - > map ;
struct regmap_irq_sub_irq_map * subreg ;
int i , ret = 0 ;
if ( ! chip - > sub_reg_offsets ) {
/* Assume linear mapping */
ret = regmap_read ( map , chip - > status_base +
( b * map - > reg_stride * data - > irq_reg_stride ) ,
& data - > status_buf [ b ] ) ;
} else {
subreg = & chip - > sub_reg_offsets [ b ] ;
for ( i = 0 ; i < subreg - > num_regs ; i + + ) {
unsigned int offset = subreg - > offset [ i ] ;
ret = regmap_read ( map , chip - > status_base + offset ,
& data - > status_buf [ offset ] ) ;
if ( ret )
break ;
}
}
return ret ;
}
2011-10-28 23:50:49 +02:00
static irqreturn_t regmap_irq_thread ( int irq , void * d )
{
struct regmap_irq_chip_data * data = d ;
2012-05-31 21:01:46 +01:00
const struct regmap_irq_chip * chip = data - > chip ;
2011-10-28 23:50:49 +02:00
struct regmap * map = data - > map ;
int ret , i ;
2011-11-28 18:50:39 +00:00
bool handled = false ;
2012-07-27 13:01:54 -06:00
u32 reg ;
2011-10-28 23:50:49 +02:00
2016-05-20 20:40:26 +05:30
if ( chip - > handle_pre_irq )
chip - > handle_pre_irq ( chip - > irq_drv_data ) ;
2012-07-24 15:41:19 +01:00
if ( chip - > runtime_pm ) {
ret = pm_runtime_get_sync ( map - > dev ) ;
if ( ret < 0 ) {
dev_err ( map - > dev , " IRQ thread failed to resume: %d \n " ,
ret ) ;
2016-05-20 20:40:26 +05:30
goto exit ;
2012-07-24 15:41:19 +01:00
}
}
2013-01-03 14:27:15 +00:00
/*
2019-01-22 11:42:24 +02:00
* Read only registers with active IRQs if the chip has ' main status
* register ' . Else read in the statuses , using a single bulk read if
* possible in order to reduce the I / O overheads .
2013-01-03 14:27:15 +00:00
*/
2019-01-22 11:42:24 +02:00
if ( chip - > num_main_regs ) {
unsigned int max_main_bits ;
unsigned long size ;
size = chip - > num_regs * sizeof ( unsigned int ) ;
max_main_bits = ( chip - > num_main_status_bits ) ?
chip - > num_main_status_bits : chip - > num_regs ;
/* Clear the status buf as we don't read all status regs */
memset ( data - > status_buf , 0 , size ) ;
/* We could support bulk read for main status registers
* but I don ' t expect to see devices with really many main
* status registers so let ' s only support single reads for the
* sake of simplicity . and add bulk reads only if needed
*/
for ( i = 0 ; i < chip - > num_main_regs ; i + + ) {
ret = regmap_read ( map , chip - > main_status +
( i * map - > reg_stride
* data - > irq_reg_stride ) ,
& data - > main_status_buf [ i ] ) ;
if ( ret ) {
dev_err ( map - > dev ,
" Failed to read IRQ status %d \n " ,
ret ) ;
goto exit ;
}
}
/* Read sub registers with active IRQs */
for ( i = 0 ; i < chip - > num_main_regs ; i + + ) {
unsigned int b ;
const unsigned long mreg = data - > main_status_buf [ i ] ;
for_each_set_bit ( b , & mreg , map - > format . val_bytes * 8 ) {
if ( i * map - > format . val_bytes * 8 + b >
max_main_bits )
break ;
ret = read_sub_irq_data ( data , b ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev ,
" Failed to read IRQ status %d \n " ,
ret ) ;
goto exit ;
}
}
}
} else if ( ! map - > use_single_read & & map - > reg_stride = = 1 & &
data - > irq_reg_stride = = 1 ) {
2013-01-03 14:27:15 +00:00
u8 * buf8 = data - > status_reg_buf ;
u16 * buf16 = data - > status_reg_buf ;
u32 * buf32 = data - > status_reg_buf ;
2012-05-14 22:40:43 +09:00
2013-01-03 14:27:15 +00:00
BUG_ON ( ! data - > status_reg_buf ) ;
ret = regmap_bulk_read ( map , chip - > status_base ,
data - > status_reg_buf ,
chip - > num_regs ) ;
2012-05-14 22:40:43 +09:00
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to read IRQ status: %d \n " ,
2013-01-03 14:27:15 +00:00
ret ) ;
2016-05-20 20:40:26 +05:30
goto exit ;
2011-10-28 23:50:49 +02:00
}
2013-01-03 14:27:15 +00:00
for ( i = 0 ; i < data - > chip - > num_regs ; i + + ) {
switch ( map - > format . val_bytes ) {
case 1 :
data - > status_buf [ i ] = buf8 [ i ] ;
break ;
case 2 :
data - > status_buf [ i ] = buf16 [ i ] ;
break ;
case 4 :
data - > status_buf [ i ] = buf32 [ i ] ;
break ;
default :
BUG ( ) ;
2016-05-20 20:40:26 +05:30
goto exit ;
2013-01-03 14:27:15 +00:00
}
}
} else {
for ( i = 0 ; i < data - > chip - > num_regs ; i + + ) {
ret = regmap_read ( map , chip - > status_base +
( i * map - > reg_stride
* data - > irq_reg_stride ) ,
& data - > status_buf [ i ] ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev ,
" Failed to read IRQ status: %d \n " ,
ret ) ;
2016-05-20 20:40:26 +05:30
goto exit ;
2013-01-03 14:27:15 +00:00
}
}
2013-01-03 13:58:33 +00:00
}
2011-10-28 23:50:49 +02:00
2013-01-03 13:58:33 +00:00
/*
* Ignore masked IRQs and ack if we need to ; we ack early so
* there is no race between handling and acknowleding the
* interrupt . We assume that typically few of the interrupts
* will fire simultaneously so don ' t worry about overhead from
* doing a write per register .
*/
for ( i = 0 ; i < data - > chip - > num_regs ; i + + ) {
2011-10-28 23:50:49 +02:00
data - > status_buf [ i ] & = ~ data - > mask_buf [ i ] ;
2013-12-15 13:36:51 +04:00
if ( data - > status_buf [ i ] & & ( chip - > ack_base | | chip - > use_ack ) ) {
2012-07-27 13:01:54 -06:00
reg = chip - > ack_base +
( i * map - > reg_stride * data - > irq_reg_stride ) ;
ret = regmap_write ( map , reg , data - > status_buf [ i ] ) ;
2011-10-28 23:50:49 +02:00
if ( ret ! = 0 )
dev_err ( map - > dev , " Failed to ack 0x%x: %d \n " ,
2012-07-27 13:01:54 -06:00
reg , ret ) ;
2011-10-28 23:50:49 +02:00
}
}
for ( i = 0 ; i < chip - > num_irqs ; i + + ) {
2012-04-09 13:40:24 -06:00
if ( data - > status_buf [ chip - > irqs [ i ] . reg_offset /
map - > reg_stride ] & chip - > irqs [ i ] . mask ) {
2012-05-13 10:59:56 +01:00
handle_nested_irq ( irq_find_mapping ( data - > domain , i ) ) ;
2011-11-28 18:50:39 +00:00
handled = true ;
2011-10-28 23:50:49 +02:00
}
}
2019-08-12 10:24:09 +01:00
exit :
2012-07-24 15:41:19 +01:00
if ( chip - > runtime_pm )
pm_runtime_put ( map - > dev ) ;
2016-05-20 20:40:26 +05:30
if ( chip - > handle_post_irq )
chip - > handle_post_irq ( chip - > irq_drv_data ) ;
2011-11-28 18:50:39 +00:00
if ( handled )
return IRQ_HANDLED ;
else
return IRQ_NONE ;
2011-10-28 23:50:49 +02:00
}
2012-05-13 10:59:56 +01:00
static int regmap_irq_map ( struct irq_domain * h , unsigned int virq ,
irq_hw_number_t hw )
{
struct regmap_irq_chip_data * data = h - > host_data ;
irq_set_chip_data ( virq , data ) ;
2012-09-08 03:53:25 -07:00
irq_set_chip ( virq , & data - > irq_chip ) ;
2012-05-13 10:59:56 +01:00
irq_set_nested_thread ( virq , 1 ) ;
2016-02-26 17:53:57 +02:00
irq_set_parent ( virq , data - > irq ) ;
2012-05-13 10:59:56 +01:00
irq_set_noprobe ( virq ) ;
return 0 ;
}
2015-04-27 21:52:10 +09:00
static const struct irq_domain_ops regmap_domain_ops = {
2012-05-13 10:59:56 +01:00
. map = regmap_irq_map ,
2017-06-09 12:05:16 +03:00
. xlate = irq_domain_xlate_onetwocell ,
2012-05-13 10:59:56 +01:00
} ;
2011-10-28 23:50:49 +02:00
/**
2020-04-02 22:36:44 +02:00
* regmap_add_irq_chip_np ( ) - Use standard regmap IRQ controller handling
2011-10-28 23:50:49 +02:00
*
2020-04-02 22:36:44 +02:00
* @ np : The device_node where the IRQ domain should be added to .
2017-01-12 11:17:39 +00:00
* @ map : The regmap for the device .
* @ irq : The IRQ the device uses to signal interrupts .
* @ irq_flags : The IRQF_ flags to use for the primary interrupt .
* @ irq_base : Allocate at specific IRQ number if irq_base > 0.
* @ chip : Configuration for the interrupt controller .
* @ data : Runtime data structure for the controller , allocated on success .
2011-10-28 23:50:49 +02:00
*
* Returns 0 on success or an errno on failure .
*
* In order for this to be efficient the chip really should use a
* register cache . The chip driver is responsible for restoring the
* register values used by the IRQ controller over suspend and resume .
*/
2020-04-02 22:36:44 +02:00
int regmap_add_irq_chip_np ( struct device_node * np , struct regmap * map , int irq ,
int irq_flags , int irq_base ,
const struct regmap_irq_chip * chip ,
struct regmap_irq_chip_data * * data )
2011-10-28 23:50:49 +02:00
{
struct regmap_irq_chip_data * d ;
2012-05-13 10:59:56 +01:00
int i ;
2011-10-28 23:50:49 +02:00
int ret = - ENOMEM ;
2018-12-07 14:04:52 +01:00
int num_type_reg ;
2012-07-27 13:01:54 -06:00
u32 reg ;
2015-09-17 05:23:20 +00:00
u32 unmask_offset ;
2011-10-28 23:50:49 +02:00
2014-05-19 15:13:45 +08:00
if ( chip - > num_regs < = 0 )
return - EINVAL ;
2018-12-19 12:18:05 +01:00
if ( chip - > clear_on_unmask & & ( chip - > ack_base | | chip - > use_ack ) )
return - EINVAL ;
2012-04-09 13:40:24 -06:00
for ( i = 0 ; i < chip - > num_irqs ; i + + ) {
if ( chip - > irqs [ i ] . reg_offset % map - > reg_stride )
return - EINVAL ;
if ( chip - > irqs [ i ] . reg_offset / map - > reg_stride > =
chip - > num_regs )
return - EINVAL ;
}
2012-05-13 10:59:56 +01:00
if ( irq_base ) {
irq_base = irq_alloc_descs ( irq_base , 0 , chip - > num_irqs , 0 ) ;
if ( irq_base < 0 ) {
dev_warn ( map - > dev , " Failed to allocate IRQs: %d \n " ,
irq_base ) ;
return irq_base ;
}
2011-10-28 23:50:49 +02:00
}
d = kzalloc ( sizeof ( * d ) , GFP_KERNEL ) ;
if ( ! d )
return - ENOMEM ;
2019-01-22 11:42:24 +02:00
if ( chip - > num_main_regs ) {
d - > main_status_buf = kcalloc ( chip - > num_main_regs ,
sizeof ( unsigned int ) ,
GFP_KERNEL ) ;
if ( ! d - > main_status_buf )
goto err_alloc ;
}
2015-11-20 18:06:29 +08:00
d - > status_buf = kcalloc ( chip - > num_regs , sizeof ( unsigned int ) ,
2011-10-28 23:50:49 +02:00
GFP_KERNEL ) ;
if ( ! d - > status_buf )
goto err_alloc ;
2015-11-20 18:06:29 +08:00
d - > mask_buf = kcalloc ( chip - > num_regs , sizeof ( unsigned int ) ,
2011-10-28 23:50:49 +02:00
GFP_KERNEL ) ;
if ( ! d - > mask_buf )
goto err_alloc ;
2015-11-20 18:06:29 +08:00
d - > mask_buf_def = kcalloc ( chip - > num_regs , sizeof ( unsigned int ) ,
2011-10-28 23:50:49 +02:00
GFP_KERNEL ) ;
if ( ! d - > mask_buf_def )
goto err_alloc ;
2012-06-05 14:34:03 +01:00
if ( chip - > wake_base ) {
2015-11-20 18:06:29 +08:00
d - > wake_buf = kcalloc ( chip - > num_regs , sizeof ( unsigned int ) ,
2012-06-05 14:34:03 +01:00
GFP_KERNEL ) ;
if ( ! d - > wake_buf )
goto err_alloc ;
}
2018-12-07 14:04:52 +01:00
num_type_reg = chip - > type_in_mask ? chip - > num_regs : chip - > num_type_reg ;
if ( num_type_reg ) {
d - > type_buf_def = kcalloc ( num_type_reg ,
sizeof ( unsigned int ) , GFP_KERNEL ) ;
2015-12-22 18:25:26 +05:30
if ( ! d - > type_buf_def )
goto err_alloc ;
2018-12-07 14:04:52 +01:00
d - > type_buf = kcalloc ( num_type_reg , sizeof ( unsigned int ) ,
2015-12-22 18:25:26 +05:30
GFP_KERNEL ) ;
if ( ! d - > type_buf )
goto err_alloc ;
}
2012-08-01 11:40:47 -06:00
d - > irq_chip = regmap_irq_chip ;
2012-08-01 11:40:48 -06:00
d - > irq_chip . name = chip - > name ;
2012-06-05 14:34:03 +01:00
d - > irq = irq ;
2011-10-28 23:50:49 +02:00
d - > map = map ;
d - > chip = chip ;
d - > irq_base = irq_base ;
2012-05-14 22:40:43 +09:00
if ( chip - > irq_reg_stride )
d - > irq_reg_stride = chip - > irq_reg_stride ;
else
d - > irq_reg_stride = 1 ;
2015-12-22 18:25:26 +05:30
if ( chip - > type_reg_stride )
d - > type_reg_stride = chip - > type_reg_stride ;
else
d - > type_reg_stride = 1 ;
2015-08-21 10:26:42 +02:00
if ( ! map - > use_single_read & & map - > reg_stride = = 1 & &
2013-01-03 14:27:15 +00:00
d - > irq_reg_stride = = 1 ) {
2015-11-20 18:06:30 +08:00
d - > status_reg_buf = kmalloc_array ( chip - > num_regs ,
map - > format . val_bytes ,
GFP_KERNEL ) ;
2013-01-03 14:27:15 +00:00
if ( ! d - > status_reg_buf )
goto err_alloc ;
}
2011-10-28 23:50:49 +02:00
mutex_init ( & d - > lock ) ;
for ( i = 0 ; i < chip - > num_irqs ; i + + )
2012-04-09 13:40:24 -06:00
d - > mask_buf_def [ chip - > irqs [ i ] . reg_offset / map - > reg_stride ]
2011-10-28 23:50:49 +02:00
| = chip - > irqs [ i ] . mask ;
/* Mask all the interrupts by default */
for ( i = 0 ; i < chip - > num_regs ; i + + ) {
d - > mask_buf [ i ] = d - > mask_buf_def [ i ] ;
2019-01-14 17:32:58 +08:00
if ( ! chip - > mask_base )
continue ;
2012-07-27 13:01:54 -06:00
reg = chip - > mask_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
2012-08-30 17:03:35 +08:00
if ( chip - > mask_invert )
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2012-08-30 17:03:35 +08:00
d - > mask_buf [ i ] , ~ d - > mask_buf [ i ] ) ;
2015-09-17 05:23:20 +00:00
else if ( d - > chip - > unmask_base ) {
unmask_offset = d - > chip - > unmask_base -
d - > chip - > mask_base ;
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d ,
2015-09-17 05:23:20 +00:00
reg + unmask_offset ,
d - > mask_buf [ i ] ,
d - > mask_buf [ i ] ) ;
} else
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2012-08-01 20:29:14 +01:00
d - > mask_buf [ i ] , d - > mask_buf [ i ] ) ;
2011-10-28 23:50:49 +02:00
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to set masks in 0x%x: %d \n " ,
2012-07-27 13:01:54 -06:00
reg , ret ) ;
2011-10-28 23:50:49 +02:00
goto err_alloc ;
}
2013-07-22 17:15:52 +02:00
if ( ! chip - > init_ack_masked )
continue ;
/* Ack masked but set interrupts */
reg = chip - > status_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
ret = regmap_read ( map , reg , & d - > status_buf [ i ] ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to read IRQ status: %d \n " ,
ret ) ;
goto err_alloc ;
}
2013-12-15 13:36:51 +04:00
if ( d - > status_buf [ i ] & & ( chip - > ack_base | | chip - > use_ack ) ) {
2013-07-22 17:15:52 +02:00
reg = chip - > ack_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
2015-09-17 05:23:21 +00:00
if ( chip - > ack_invert )
ret = regmap_write ( map , reg ,
~ ( d - > status_buf [ i ] & d - > mask_buf [ i ] ) ) ;
else
ret = regmap_write ( map , reg ,
2013-07-22 17:15:52 +02:00
d - > status_buf [ i ] & d - > mask_buf [ i ] ) ;
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to ack 0x%x: %d \n " ,
reg , ret ) ;
goto err_alloc ;
}
}
2011-10-28 23:50:49 +02:00
}
2012-08-01 13:57:24 -06:00
/* Wake is disabled by default */
if ( d - > wake_buf ) {
for ( i = 0 ; i < chip - > num_regs ; i + + ) {
d - > wake_buf [ i ] = d - > mask_buf_def [ i ] ;
reg = chip - > wake_base +
( i * map - > reg_stride * d - > irq_reg_stride ) ;
2013-01-04 16:35:07 +00:00
if ( chip - > wake_invert )
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2013-01-04 16:35:07 +00:00
d - > mask_buf_def [ i ] ,
0 ) ;
else
2017-06-23 14:35:09 +02:00
ret = regmap_irq_update_bits ( d , reg ,
2013-01-04 16:35:07 +00:00
d - > mask_buf_def [ i ] ,
d - > wake_buf [ i ] ) ;
2012-08-01 13:57:24 -06:00
if ( ret ! = 0 ) {
dev_err ( map - > dev , " Failed to set masks in 0x%x: %d \n " ,
reg , ret ) ;
goto err_alloc ;
}
}
}
2018-12-07 14:04:52 +01:00
if ( chip - > num_type_reg & & ! chip - > type_in_mask ) {
2015-12-22 18:25:26 +05:30
for ( i = 0 ; i < chip - > num_type_reg ; + + i ) {
reg = chip - > type_base +
( i * map - > reg_stride * d - > type_reg_stride ) ;
2018-12-18 12:58:13 +02:00
ret = regmap_read ( map , reg , & d - > type_buf_def [ i ] ) ;
if ( d - > chip - > type_invert )
d - > type_buf_def [ i ] = ~ d - > type_buf_def [ i ] ;
if ( ret ) {
dev_err ( map - > dev , " Failed to get type defaults at 0x%x: %d \n " ,
2015-12-22 18:25:26 +05:30
reg , ret ) ;
goto err_alloc ;
}
}
}
2012-05-13 10:59:56 +01:00
if ( irq_base )
2020-04-02 22:36:44 +02:00
d - > domain = irq_domain_add_legacy ( np , chip - > num_irqs , irq_base ,
0 , & regmap_domain_ops , d ) ;
2012-05-13 10:59:56 +01:00
else
2020-04-02 22:36:44 +02:00
d - > domain = irq_domain_add_linear ( np , chip - > num_irqs ,
2012-05-13 10:59:56 +01:00
& regmap_domain_ops , d ) ;
if ( ! d - > domain ) {
dev_err ( map - > dev , " Failed to create IRQ domain \n " ) ;
ret = - ENOMEM ;
goto err_alloc ;
2011-10-28 23:50:49 +02:00
}
2015-02-11 16:37:57 +01:00
ret = request_threaded_irq ( irq , NULL , regmap_irq_thread ,
irq_flags | IRQF_ONESHOT ,
2011-10-28 23:50:49 +02:00
chip - > name , d ) ;
if ( ret ! = 0 ) {
2013-03-19 10:45:04 +00:00
dev_err ( map - > dev , " Failed to request IRQ %d for %s: %d \n " ,
irq , chip - > name , ret ) ;
2012-05-13 10:59:56 +01:00
goto err_domain ;
2011-10-28 23:50:49 +02:00
}
2014-03-13 09:06:01 +01:00
* data = d ;
2011-10-28 23:50:49 +02:00
return 0 ;
2012-05-13 10:59:56 +01:00
err_domain :
/* Should really dispose of the domain but... */
2011-10-28 23:50:49 +02:00
err_alloc :
2015-12-22 18:25:26 +05:30
kfree ( d - > type_buf ) ;
kfree ( d - > type_buf_def ) ;
2012-06-05 14:34:03 +01:00
kfree ( d - > wake_buf ) ;
2011-10-28 23:50:49 +02:00
kfree ( d - > mask_buf_def ) ;
kfree ( d - > mask_buf ) ;
kfree ( d - > status_buf ) ;
2013-01-03 14:27:15 +00:00
kfree ( d - > status_reg_buf ) ;
2011-10-28 23:50:49 +02:00
kfree ( d ) ;
return ret ;
}
2020-04-02 22:36:44 +02:00
EXPORT_SYMBOL_GPL ( regmap_add_irq_chip_np ) ;
/**
* regmap_add_irq_chip ( ) - Use standard regmap IRQ controller handling
*
* @ map : The regmap for the device .
* @ irq : The IRQ the device uses to signal interrupts .
* @ irq_flags : The IRQF_ flags to use for the primary interrupt .
* @ irq_base : Allocate at specific IRQ number if irq_base > 0.
* @ chip : Configuration for the interrupt controller .
* @ data : Runtime data structure for the controller , allocated on success .
*
* Returns 0 on success or an errno on failure .
*
* This is the same as regmap_add_irq_chip_np , except that the device
* node of the regmap is used .
*/
int regmap_add_irq_chip ( struct regmap * map , int irq , int irq_flags ,
int irq_base , const struct regmap_irq_chip * chip ,
struct regmap_irq_chip_data * * data )
{
return regmap_add_irq_chip_np ( map - > dev - > of_node , map , irq , irq_flags ,
irq_base , chip , data ) ;
}
2011-10-28 23:50:49 +02:00
EXPORT_SYMBOL_GPL ( regmap_add_irq_chip ) ;
/**
2017-01-12 11:17:39 +00:00
* regmap_del_irq_chip ( ) - Stop interrupt handling for a regmap IRQ chip
2011-10-28 23:50:49 +02:00
*
* @ irq : Primary IRQ for the device
2017-01-12 11:17:39 +00:00
* @ d : & regmap_irq_chip_data allocated by regmap_add_irq_chip ( )
2016-02-09 17:58:22 +05:30
*
2017-01-12 11:17:39 +00:00
* This function also disposes of all mapped IRQs on the chip .
2011-10-28 23:50:49 +02:00
*/
void regmap_del_irq_chip ( int irq , struct regmap_irq_chip_data * d )
{
2016-02-09 17:58:22 +05:30
unsigned int virq ;
int hwirq ;
2011-10-28 23:50:49 +02:00
if ( ! d )
return ;
free_irq ( irq , d ) ;
2016-02-09 17:58:22 +05:30
/* Dispose all virtual irq from irq domain before removing it */
for ( hwirq = 0 ; hwirq < d - > chip - > num_irqs ; hwirq + + ) {
/* Ignore hwirq if holes in the IRQ list */
if ( ! d - > chip - > irqs [ hwirq ] . mask )
continue ;
/*
* Find the virtual irq of hwirq on chip and if it is
* there then dispose it
*/
virq = irq_find_mapping ( d - > domain , hwirq ) ;
if ( virq )
irq_dispose_mapping ( virq ) ;
}
2014-01-22 20:25:48 +00:00
irq_domain_remove ( d - > domain ) ;
2015-12-22 18:25:26 +05:30
kfree ( d - > type_buf ) ;
kfree ( d - > type_buf_def ) ;
2012-06-05 14:34:03 +01:00
kfree ( d - > wake_buf ) ;
2011-10-28 23:50:49 +02:00
kfree ( d - > mask_buf_def ) ;
kfree ( d - > mask_buf ) ;
2013-01-03 14:27:15 +00:00
kfree ( d - > status_reg_buf ) ;
2011-10-28 23:50:49 +02:00
kfree ( d - > status_buf ) ;
kfree ( d ) ;
}
EXPORT_SYMBOL_GPL ( regmap_del_irq_chip ) ;
2011-12-05 16:10:15 +00:00
2016-02-10 14:29:50 +05:30
static void devm_regmap_irq_chip_release ( struct device * dev , void * res )
{
struct regmap_irq_chip_data * d = * ( struct regmap_irq_chip_data * * ) res ;
regmap_del_irq_chip ( d - > irq , d ) ;
}
static int devm_regmap_irq_chip_match ( struct device * dev , void * res , void * data )
{
struct regmap_irq_chip_data * * r = res ;
if ( ! r | | ! * r ) {
WARN_ON ( ! r | | ! * r ) ;
return 0 ;
}
return * r = = data ;
}
/**
2020-04-02 22:36:44 +02:00
* devm_regmap_add_irq_chip_np ( ) - Resource manager regmap_add_irq_chip_np ( )
2016-02-10 14:29:50 +05:30
*
2017-01-12 11:17:39 +00:00
* @ dev : The device pointer on which irq_chip belongs to .
2020-04-02 22:36:44 +02:00
* @ np : The device_node where the IRQ domain should be added to .
2017-01-12 11:17:39 +00:00
* @ map : The regmap for the device .
* @ irq : The IRQ the device uses to signal interrupts
2016-02-10 14:29:50 +05:30
* @ irq_flags : The IRQF_ flags to use for the primary interrupt .
2017-01-12 11:17:39 +00:00
* @ irq_base : Allocate at specific IRQ number if irq_base > 0.
* @ chip : Configuration for the interrupt controller .
* @ data : Runtime data structure for the controller , allocated on success
2016-02-10 14:29:50 +05:30
*
* Returns 0 on success or an errno on failure .
*
2017-01-12 11:17:39 +00:00
* The & regmap_irq_chip_data will be automatically released when the device is
2016-02-10 14:29:50 +05:30
* unbound .
*/
2020-04-02 22:36:44 +02:00
int devm_regmap_add_irq_chip_np ( struct device * dev , struct device_node * np ,
struct regmap * map , int irq , int irq_flags ,
int irq_base ,
const struct regmap_irq_chip * chip ,
struct regmap_irq_chip_data * * data )
2016-02-10 14:29:50 +05:30
{
struct regmap_irq_chip_data * * ptr , * d ;
int ret ;
ptr = devres_alloc ( devm_regmap_irq_chip_release , sizeof ( * ptr ) ,
GFP_KERNEL ) ;
if ( ! ptr )
return - ENOMEM ;
2020-04-02 22:36:44 +02:00
ret = regmap_add_irq_chip_np ( np , map , irq , irq_flags , irq_base ,
chip , & d ) ;
2016-02-10 14:29:50 +05:30
if ( ret < 0 ) {
devres_free ( ptr ) ;
return ret ;
}
* ptr = d ;
devres_add ( dev , ptr ) ;
* data = d ;
return 0 ;
}
2020-04-02 22:36:44 +02:00
EXPORT_SYMBOL_GPL ( devm_regmap_add_irq_chip_np ) ;
/**
* devm_regmap_add_irq_chip ( ) - Resource manager regmap_add_irq_chip ( )
*
* @ dev : The device pointer on which irq_chip belongs to .
* @ map : The regmap for the device .
* @ irq : The IRQ the device uses to signal interrupts
* @ irq_flags : The IRQF_ flags to use for the primary interrupt .
* @ irq_base : Allocate at specific IRQ number if irq_base > 0.
* @ chip : Configuration for the interrupt controller .
* @ data : Runtime data structure for the controller , allocated on success
*
* Returns 0 on success or an errno on failure .
*
* The & regmap_irq_chip_data will be automatically released when the device is
* unbound .
*/
int devm_regmap_add_irq_chip ( struct device * dev , struct regmap * map , int irq ,
int irq_flags , int irq_base ,
const struct regmap_irq_chip * chip ,
struct regmap_irq_chip_data * * data )
{
return devm_regmap_add_irq_chip_np ( dev , map - > dev - > of_node , map , irq ,
irq_flags , irq_base , chip , data ) ;
}
2016-02-10 14:29:50 +05:30
EXPORT_SYMBOL_GPL ( devm_regmap_add_irq_chip ) ;
/**
2017-01-12 11:17:39 +00:00
* devm_regmap_del_irq_chip ( ) - Resource managed regmap_del_irq_chip ( )
2016-02-10 14:29:50 +05:30
*
* @ dev : Device for which which resource was allocated .
2017-01-12 11:17:39 +00:00
* @ irq : Primary IRQ for the device .
* @ data : & regmap_irq_chip_data allocated by regmap_add_irq_chip ( ) .
*
* A resource managed version of regmap_del_irq_chip ( ) .
2016-02-10 14:29:50 +05:30
*/
void devm_regmap_del_irq_chip ( struct device * dev , int irq ,
struct regmap_irq_chip_data * data )
{
int rc ;
WARN_ON ( irq ! = data - > irq ) ;
rc = devres_release ( dev , devm_regmap_irq_chip_release ,
devm_regmap_irq_chip_match , data ) ;
if ( rc ! = 0 )
WARN_ON ( rc ) ;
}
EXPORT_SYMBOL_GPL ( devm_regmap_del_irq_chip ) ;
2011-12-05 16:10:15 +00:00
/**
2017-01-12 11:17:39 +00:00
* regmap_irq_chip_get_base ( ) - Retrieve interrupt base for a regmap IRQ chip
2011-12-05 16:10:15 +00:00
*
2017-01-12 11:17:39 +00:00
* @ data : regmap irq controller to operate on .
2011-12-05 16:10:15 +00:00
*
2017-01-12 11:17:39 +00:00
* Useful for drivers to request their own IRQs .
2011-12-05 16:10:15 +00:00
*/
int regmap_irq_chip_get_base ( struct regmap_irq_chip_data * data )
{
2012-05-13 10:59:56 +01:00
WARN_ON ( ! data - > irq_base ) ;
2011-12-05 16:10:15 +00:00
return data - > irq_base ;
}
EXPORT_SYMBOL_GPL ( regmap_irq_chip_get_base ) ;
2012-05-13 10:59:56 +01:00
/**
2017-01-12 11:17:39 +00:00
* regmap_irq_get_virq ( ) - Map an interrupt on a chip to a virtual IRQ
2012-05-13 10:59:56 +01:00
*
2017-01-12 11:17:39 +00:00
* @ data : regmap irq controller to operate on .
* @ irq : index of the interrupt requested in the chip IRQs .
2012-05-13 10:59:56 +01:00
*
2017-01-12 11:17:39 +00:00
* Useful for drivers to request their own IRQs .
2012-05-13 10:59:56 +01:00
*/
int regmap_irq_get_virq ( struct regmap_irq_chip_data * data , int irq )
{
2012-06-05 14:29:36 +01:00
/* Handle holes in the IRQ list */
if ( ! data - > chip - > irqs [ irq ] . mask )
return - EINVAL ;
2012-05-13 10:59:56 +01:00
return irq_create_mapping ( data - > domain , irq ) ;
}
EXPORT_SYMBOL_GPL ( regmap_irq_get_virq ) ;
2012-08-20 21:45:05 +01:00
/**
2017-01-12 11:17:39 +00:00
* regmap_irq_get_domain ( ) - Retrieve the irq_domain for the chip
*
* @ data : regmap_irq controller to operate on .
2012-08-20 21:45:05 +01:00
*
* Useful for drivers to request their own IRQs and for integration
* with subsystems . For ease of integration NULL is accepted as a
* domain , allowing devices to just call this even if no domain is
* allocated .
*/
struct irq_domain * regmap_irq_get_domain ( struct regmap_irq_chip_data * data )
{
if ( data )
return data - > domain ;
else
return NULL ;
}
EXPORT_SYMBOL_GPL ( regmap_irq_get_domain ) ;