2005-08-19 00:31:00 +04:00
/*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Interrupt architecture for the GIC :
*
* o There is one Interrupt Distributor , which receives interrupts
* from system devices and sends them to the Interrupt Controllers .
*
* o There is one CPU Interface per CPU , which sends interrupts sent
* by the Distributor , and interrupts generated locally , to the
2007-02-14 21:14:56 +03:00
* associated CPU . The base address of the CPU interface is usually
* aliased so that the same address points to different chips depending
* on the CPU it is accessed from .
2005-08-19 00:31:00 +04:00
*
* Note that IRQs 0 - 31 are special - they are local to each CPU .
* As such , the enable set / clear , pending set / clear and active bit
* registers are banked per - cpu for these sources .
*/
# include <linux/init.h>
# include <linux/kernel.h>
2011-10-22 02:14:27 +04:00
# include <linux/err.h>
2011-11-01 03:28:37 +04:00
# include <linux/module.h>
2005-08-19 00:31:00 +04:00
# include <linux/list.h>
# include <linux/smp.h>
2013-01-14 22:05:37 +04:00
# include <linux/cpu.h>
2011-02-10 23:54:10 +03:00
# include <linux/cpu_pm.h>
2005-09-01 00:45:14 +04:00
# include <linux/cpumask.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2011-09-29 06:27:52 +04:00
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
2011-09-29 06:25:31 +04:00
# include <linux/irqdomain.h>
2011-07-20 19:24:14 +04:00
# include <linux/interrupt.h>
# include <linux/percpu.h>
# include <linux/slab.h>
2013-01-18 19:31:37 +04:00
# include <linux/irqchip/chained_irq.h>
2012-12-27 23:10:24 +04:00
# include <linux/irqchip/arm-gic.h>
2005-08-19 00:31:00 +04:00
2014-07-17 19:23:44 +04:00
# include <asm/cputype.h>
2005-08-19 00:31:00 +04:00
# include <asm/irq.h>
2011-09-06 12:56:17 +04:00
# include <asm/exception.h>
2012-01-20 15:01:12 +04:00
# include <asm/smp_plat.h>
2005-08-19 00:31:00 +04:00
2014-06-30 19:01:30 +04:00
# include "irq-gic-common.h"
2012-11-21 07:21:40 +04:00
# include "irqchip.h"
2005-08-19 00:31:00 +04:00
2011-11-12 20:09:49 +04:00
union gic_base {
void __iomem * common_base ;
2014-03-05 05:02:01 +04:00
void __percpu * __iomem * percpu_base ;
2011-11-12 20:09:49 +04:00
} ;
struct gic_chip_data {
union gic_base dist_base ;
union gic_base cpu_base ;
# ifdef CONFIG_CPU_PM
u32 saved_spi_enable [ DIV_ROUND_UP ( 1020 , 32 ) ] ;
u32 saved_spi_conf [ DIV_ROUND_UP ( 1020 , 16 ) ] ;
u32 saved_spi_target [ DIV_ROUND_UP ( 1020 , 4 ) ] ;
u32 __percpu * saved_ppi_enable ;
u32 __percpu * saved_ppi_conf ;
# endif
2012-02-15 01:06:57 +04:00
struct irq_domain * domain ;
2011-11-12 20:09:49 +04:00
unsigned int gic_irqs ;
# ifdef CONFIG_GIC_NON_BANKED
void __iomem * ( * get_base ) ( union gic_base * ) ;
# endif
} ;
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
2012-04-12 02:55:48 +04:00
/*
* The GIC mapping of CPU interfaces does not necessarily match
* the logical CPU numbering . Let ' s use a mapping as returned
* by the GIC itself .
*/
# define NR_GIC_CPU_IF 8
static u8 gic_cpu_map [ NR_GIC_CPU_IF ] __read_mostly ;
2011-03-02 10:03:22 +03:00
/*
* Supported arch specific GIC irq extension .
* Default make them NULL .
*/
struct irq_chip gic_arch_extn = {
2011-02-09 15:01:12 +03:00
. irq_eoi = NULL ,
2011-03-02 10:03:22 +03:00
. irq_mask = NULL ,
. irq_unmask = NULL ,
. irq_retrigger = NULL ,
. irq_set_type = NULL ,
. irq_set_wake = NULL ,
} ;
2007-02-14 21:14:56 +03:00
# ifndef MAX_GIC_NR
# define MAX_GIC_NR 1
# endif
2010-12-04 19:50:58 +03:00
static struct gic_chip_data gic_data [ MAX_GIC_NR ] __read_mostly ;
2007-02-14 21:14:56 +03:00
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
static void __iomem * gic_get_percpu_base ( union gic_base * base )
{
2014-09-02 19:00:07 +04:00
return raw_cpu_read ( * base - > percpu_base ) ;
2011-11-12 20:09:49 +04:00
}
static void __iomem * gic_get_common_base ( union gic_base * base )
{
return base - > common_base ;
}
static inline void __iomem * gic_data_dist_base ( struct gic_chip_data * data )
{
return data - > get_base ( & data - > dist_base ) ;
}
static inline void __iomem * gic_data_cpu_base ( struct gic_chip_data * data )
{
return data - > get_base ( & data - > cpu_base ) ;
}
static inline void gic_set_base_accessor ( struct gic_chip_data * data ,
void __iomem * ( * f ) ( union gic_base * ) )
{
data - > get_base = f ;
}
# else
# define gic_data_dist_base(d) ((d)->dist_base.common_base)
# define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
2013-03-13 13:35:15 +04:00
# define gic_set_base_accessor(d, f)
2011-11-12 20:09:49 +04:00
# endif
2010-11-29 12:18:20 +03:00
static inline void __iomem * gic_dist_base ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2010-11-29 12:18:20 +03:00
struct gic_chip_data * gic_data = irq_data_get_irq_chip_data ( d ) ;
2011-11-12 20:09:49 +04:00
return gic_data_dist_base ( gic_data ) ;
2007-02-14 21:14:56 +03:00
}
2010-11-29 12:18:20 +03:00
static inline void __iomem * gic_cpu_base ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2010-11-29 12:18:20 +03:00
struct gic_chip_data * gic_data = irq_data_get_irq_chip_data ( d ) ;
2011-11-12 20:09:49 +04:00
return gic_data_cpu_base ( gic_data ) ;
2007-02-14 21:14:56 +03:00
}
2010-11-29 12:18:20 +03:00
static inline unsigned int gic_irq ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2011-09-29 06:25:31 +04:00
return d - > hwirq ;
2007-02-14 21:14:56 +03:00
}
2005-08-19 00:31:00 +04:00
/*
* Routines to acknowledge , disable and enable interrupts
*/
2010-11-29 12:18:20 +03:00
static void gic_mask_irq ( struct irq_data * d )
2005-08-19 00:31:00 +04:00
{
2011-09-29 06:25:31 +04:00
u32 mask = 1 < < ( gic_irq ( d ) % 32 ) ;
2006-07-02 01:32:14 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-03-28 17:57:46 +04:00
writel_relaxed ( mask , gic_dist_base ( d ) + GIC_DIST_ENABLE_CLEAR + ( gic_irq ( d ) / 32 ) * 4 ) ;
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_mask )
gic_arch_extn . irq_mask ( d ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
}
2010-11-29 12:18:20 +03:00
static void gic_unmask_irq ( struct irq_data * d )
2005-08-19 00:31:00 +04:00
{
2011-09-29 06:25:31 +04:00
u32 mask = 1 < < ( gic_irq ( d ) % 32 ) ;
2006-07-02 01:32:14 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_unmask )
gic_arch_extn . irq_unmask ( d ) ;
2011-03-28 17:57:46 +04:00
writel_relaxed ( mask , gic_dist_base ( d ) + GIC_DIST_ENABLE_SET + ( gic_irq ( d ) / 32 ) * 4 ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
}
2011-02-09 15:01:12 +03:00
static void gic_eoi_irq ( struct irq_data * d )
{
if ( gic_arch_extn . irq_eoi ) {
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-02-09 15:01:12 +03:00
gic_arch_extn . irq_eoi ( d ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2011-02-09 15:01:12 +03:00
}
2011-03-28 17:57:46 +04:00
writel_relaxed ( gic_irq ( d ) , gic_cpu_base ( d ) + GIC_CPU_EOI ) ;
2011-02-09 15:01:12 +03:00
}
2010-11-29 12:18:20 +03:00
static int gic_set_type ( struct irq_data * d , unsigned int type )
2010-05-28 07:37:38 +04:00
{
2010-11-29 12:18:20 +03:00
void __iomem * base = gic_dist_base ( d ) ;
unsigned int gicirq = gic_irq ( d ) ;
2010-05-28 07:37:38 +04:00
/* Interrupt configuration for SGIs can't be changed */
if ( gicirq < 16 )
return - EINVAL ;
if ( type ! = IRQ_TYPE_LEVEL_HIGH & & type ! = IRQ_TYPE_EDGE_RISING )
return - EINVAL ;
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2010-05-28 07:37:38 +04:00
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_set_type )
gic_arch_extn . irq_set_type ( d , type ) ;
2014-06-30 19:01:30 +04:00
gic_configure_irq ( gicirq , type , base , NULL ) ;
2010-05-28 07:37:38 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2010-05-28 07:37:38 +04:00
return 0 ;
}
2011-03-02 10:03:22 +03:00
static int gic_retrigger ( struct irq_data * d )
{
if ( gic_arch_extn . irq_retrigger )
return gic_arch_extn . irq_retrigger ( d ) ;
2013-03-20 03:05:49 +04:00
/* the genirq layer expects 0 if we can't retrigger in hardware */
return 0 ;
2011-03-02 10:03:22 +03:00
}
2005-09-30 19:07:05 +04:00
# ifdef CONFIG_SMP
2011-01-23 15:12:01 +03:00
static int gic_set_affinity ( struct irq_data * d , const struct cpumask * mask_val ,
bool force )
2005-08-19 00:31:00 +04:00
{
2010-11-29 12:18:20 +03:00
void __iomem * reg = gic_dist_base ( d ) + GIC_DIST_TARGET + ( gic_irq ( d ) & ~ 3 ) ;
2014-04-16 18:36:44 +04:00
unsigned int cpu , shift = ( gic_irq ( d ) % 4 ) * 8 ;
2011-01-23 15:12:01 +03:00
u32 val , mask , bit ;
2005-08-19 00:31:00 +04:00
2014-04-16 18:36:44 +04:00
if ( ! force )
cpu = cpumask_any_and ( mask_val , cpu_online_mask ) ;
else
cpu = cpumask_first ( mask_val ) ;
2012-04-12 02:55:48 +04:00
if ( cpu > = NR_GIC_CPU_IF | | cpu > = nr_cpu_ids )
2010-12-06 09:01:10 +03:00
return - EINVAL ;
2011-01-23 15:12:01 +03:00
2012-04-12 09:40:31 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-01-23 15:12:01 +03:00
mask = 0xff < < shift ;
2012-04-12 02:55:48 +04:00
bit = gic_cpu_map [ cpu ] < < shift ;
2011-03-28 17:57:46 +04:00
val = readl_relaxed ( reg ) & ~ mask ;
writel_relaxed ( val | bit , reg ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2009-04-28 04:59:21 +04:00
2011-07-21 18:00:57 +04:00
return IRQ_SET_MASK_OK ;
2005-08-19 00:31:00 +04:00
}
2005-09-30 19:07:05 +04:00
# endif
2005-08-19 00:31:00 +04:00
2011-03-02 10:03:22 +03:00
# ifdef CONFIG_PM
static int gic_set_wake ( struct irq_data * d , unsigned int on )
{
int ret = - ENXIO ;
if ( gic_arch_extn . irq_set_wake )
ret = gic_arch_extn . irq_set_wake ( d , on ) ;
return ret ;
}
# else
# define gic_set_wake NULL
# endif
2014-03-05 04:40:30 +04:00
static void __exception_irq_entry gic_handle_irq ( struct pt_regs * regs )
2011-09-06 12:56:17 +04:00
{
u32 irqstat , irqnr ;
struct gic_chip_data * gic = & gic_data [ 0 ] ;
void __iomem * cpu_base = gic_data_cpu_base ( gic ) ;
do {
irqstat = readl_relaxed ( cpu_base + GIC_CPU_INTACK ) ;
2014-05-11 12:05:58 +04:00
irqnr = irqstat & GICC_IAR_INT_ID_MASK ;
2011-09-06 12:56:17 +04:00
if ( likely ( irqnr > 15 & & irqnr < 1021 ) ) {
2014-08-26 14:03:20 +04:00
handle_domain_irq ( gic - > domain , irqnr , regs ) ;
2011-09-06 12:56:17 +04:00
continue ;
}
if ( irqnr < 16 ) {
writel_relaxed ( irqstat , cpu_base + GIC_CPU_EOI ) ;
# ifdef CONFIG_SMP
handle_IPI ( irqnr , regs ) ;
# endif
continue ;
}
break ;
} while ( 1 ) ;
}
2007-05-17 13:11:34 +04:00
static void gic_handle_cascade_irq ( unsigned int irq , struct irq_desc * desc )
2007-02-14 21:14:56 +03:00
{
2011-03-24 15:25:22 +03:00
struct gic_chip_data * chip_data = irq_get_handler_data ( irq ) ;
struct irq_chip * chip = irq_get_chip ( irq ) ;
2007-05-17 13:11:34 +04:00
unsigned int cascade_irq , gic_irq ;
2007-02-14 21:14:56 +03:00
unsigned long status ;
2011-02-09 15:01:12 +03:00
chained_irq_enter ( chip , desc ) ;
2007-02-14 21:14:56 +03:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-11-12 20:09:49 +04:00
status = readl_relaxed ( gic_data_cpu_base ( chip_data ) + GIC_CPU_INTACK ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2007-02-14 21:14:56 +03:00
2014-07-31 01:56:58 +04:00
gic_irq = ( status & GICC_IAR_INT_ID_MASK ) ;
if ( gic_irq = = GICC_INT_SPURIOUS )
2007-02-14 21:14:56 +03:00
goto out ;
2012-02-15 01:06:57 +04:00
cascade_irq = irq_find_mapping ( chip_data - > domain , gic_irq ) ;
if ( unlikely ( gic_irq < 32 | | gic_irq > 1020 ) )
2013-01-14 21:53:39 +04:00
handle_bad_irq ( cascade_irq , desc ) ;
2007-05-17 13:11:34 +04:00
else
generic_handle_irq ( cascade_irq ) ;
2007-02-14 21:14:56 +03:00
out :
2011-02-09 15:01:12 +03:00
chained_irq_exit ( chip , desc ) ;
2007-02-14 21:14:56 +03:00
}
2006-08-02 01:26:25 +04:00
static struct irq_chip gic_chip = {
2010-11-29 12:18:20 +03:00
. name = " GIC " ,
. irq_mask = gic_mask_irq ,
. irq_unmask = gic_unmask_irq ,
2011-02-09 15:01:12 +03:00
. irq_eoi = gic_eoi_irq ,
2010-11-29 12:18:20 +03:00
. irq_set_type = gic_set_type ,
2011-03-02 10:03:22 +03:00
. irq_retrigger = gic_retrigger ,
2005-08-19 00:31:00 +04:00
# ifdef CONFIG_SMP
2011-01-23 15:12:01 +03:00
. irq_set_affinity = gic_set_affinity ,
2005-08-19 00:31:00 +04:00
# endif
2011-03-02 10:03:22 +03:00
. irq_set_wake = gic_set_wake ,
2005-08-19 00:31:00 +04:00
} ;
2007-02-14 21:14:56 +03:00
void __init gic_cascade_irq ( unsigned int gic_nr , unsigned int irq )
{
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-03-24 15:25:22 +03:00
if ( irq_set_handler_data ( irq , & gic_data [ gic_nr ] ) ! = 0 )
2007-02-14 21:14:56 +03:00
BUG ( ) ;
2011-03-24 15:25:22 +03:00
irq_set_chained_handler ( irq , gic_handle_cascade_irq ) ;
2007-02-14 21:14:56 +03:00
}
2013-01-31 03:49:57 +04:00
static u8 gic_get_cpumask ( struct gic_chip_data * gic )
{
void __iomem * base = gic_data_dist_base ( gic ) ;
u32 mask , i ;
for ( i = mask = 0 ; i < 32 ; i + = 4 ) {
mask = readl_relaxed ( base + GIC_DIST_TARGET + i ) ;
mask | = mask > > 16 ;
mask | = mask > > 8 ;
if ( mask )
break ;
}
if ( ! mask )
pr_crit ( " GIC CPU mask not found - kernel will fail to boot. \n " ) ;
return mask ;
}
2014-07-31 01:56:59 +04:00
static void gic_cpu_if_up ( void )
{
void __iomem * cpu_base = gic_data_cpu_base ( & gic_data [ 0 ] ) ;
u32 bypass = 0 ;
/*
* Preserve bypass disable bits to be written back later
*/
bypass = readl ( cpu_base + GIC_CPU_CTRL ) ;
bypass & = GICC_DIS_BYPASS_MASK ;
writel_relaxed ( bypass | GICC_ENABLE , cpu_base + GIC_CPU_CTRL ) ;
}
2011-09-29 06:25:31 +04:00
static void __init gic_dist_init ( struct gic_chip_data * gic )
2005-08-19 00:31:00 +04:00
{
2012-02-15 01:06:57 +04:00
unsigned int i ;
2011-08-24 01:20:03 +04:00
u32 cpumask ;
2011-09-29 06:25:31 +04:00
unsigned int gic_irqs = gic - > gic_irqs ;
2011-11-12 20:09:49 +04:00
void __iomem * base = gic_data_dist_base ( gic ) ;
2005-08-19 00:31:00 +04:00
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_DISABLE , base + GIC_DIST_CTRL ) ;
2005-08-19 00:31:00 +04:00
/*
* Set all global interrupts to this CPU only .
*/
2013-01-31 03:49:57 +04:00
cpumask = gic_get_cpumask ( gic ) ;
cpumask | = cpumask < < 8 ;
cpumask | = cpumask < < 16 ;
2010-11-26 15:45:43 +03:00
for ( i = 32 ; i < gic_irqs ; i + = 4 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( cpumask , base + GIC_DIST_TARGET + i * 4 / 4 ) ;
2005-08-19 00:31:00 +04:00
2014-06-30 19:01:30 +04:00
gic_dist_config ( base , gic_irqs , NULL ) ;
2005-08-19 00:31:00 +04:00
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_ENABLE , base + GIC_DIST_CTRL ) ;
2005-08-19 00:31:00 +04:00
}
2013-06-19 19:32:08 +04:00
static void gic_cpu_init ( struct gic_chip_data * gic )
2005-08-19 00:31:00 +04:00
{
2011-11-12 20:09:49 +04:00
void __iomem * dist_base = gic_data_dist_base ( gic ) ;
void __iomem * base = gic_data_cpu_base ( gic ) ;
2012-04-12 02:55:48 +04:00
unsigned int cpu_mask , cpu = smp_processor_id ( ) ;
2010-11-12 02:10:30 +03:00
int i ;
2012-04-12 02:55:48 +04:00
/*
* Get what the GIC says our CPU mask is .
*/
BUG_ON ( cpu > = NR_GIC_CPU_IF ) ;
2013-01-31 03:49:57 +04:00
cpu_mask = gic_get_cpumask ( gic ) ;
2012-04-12 02:55:48 +04:00
gic_cpu_map [ cpu ] = cpu_mask ;
/*
* Clear our mask from the other map entries in case they ' re
* still undefined .
*/
for ( i = 0 ; i < NR_GIC_CPU_IF ; i + + )
if ( i ! = cpu )
gic_cpu_map [ i ] & = ~ cpu_mask ;
2014-06-30 19:01:30 +04:00
gic_cpu_config ( dist_base , NULL ) ;
2010-11-12 02:10:30 +03:00
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICC_INT_PRI_THRESHOLD , base + GIC_CPU_PRIMASK ) ;
2014-07-31 01:56:59 +04:00
gic_cpu_if_up ( ) ;
2005-08-19 00:31:00 +04:00
}
2013-03-20 07:59:04 +04:00
void gic_cpu_if_down ( void )
{
void __iomem * cpu_base = gic_data_cpu_base ( & gic_data [ 0 ] ) ;
2014-07-31 01:56:59 +04:00
u32 val = 0 ;
val = readl ( cpu_base + GIC_CPU_CTRL ) ;
val & = ~ GICC_ENABLE ;
writel_relaxed ( val , cpu_base + GIC_CPU_CTRL ) ;
2013-03-20 07:59:04 +04:00
}
2011-02-10 23:54:10 +03:00
# ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle . Must be called
* with interrupts disabled but before powering down the GIC . After calling
* this function , no interrupts will be delivered by the GIC , and another
* platform - specific wakeup source must be enabled .
*/
static void gic_dist_save ( unsigned int gic_nr )
{
unsigned int gic_irqs ;
void __iomem * dist_base ;
int i ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
gic_irqs = gic_data [ gic_nr ] . gic_irqs ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base )
return ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 16 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_conf [ i ] =
readl_relaxed ( dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_target [ i ] =
readl_relaxed ( dist_base + GIC_DIST_TARGET + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 32 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_enable [ i ] =
readl_relaxed ( dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
}
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle . Must be called before enabling interrupts . If a level interrupt
* that occured while the GIC was suspended is still present , it will be
* handled normally , but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform - specific wakeup source .
*/
static void gic_dist_restore ( unsigned int gic_nr )
{
unsigned int gic_irqs ;
unsigned int i ;
void __iomem * dist_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
gic_irqs = gic_data [ gic_nr ] . gic_irqs ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base )
return ;
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_DISABLE , dist_base + GIC_DIST_CTRL ) ;
2011-02-10 23:54:10 +03:00
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 16 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_conf [ i ] ,
dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_INT_DEF_PRI_X4 ,
2011-02-10 23:54:10 +03:00
dist_base + GIC_DIST_PRI + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_target [ i ] ,
dist_base + GIC_DIST_TARGET + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 32 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_enable [ i ] ,
dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_ENABLE , dist_base + GIC_DIST_CTRL ) ;
2011-02-10 23:54:10 +03:00
}
static void gic_cpu_save ( unsigned int gic_nr )
{
int i ;
u32 * ptr ;
void __iomem * dist_base ;
void __iomem * cpu_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
cpu_base = gic_data_cpu_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base | | ! cpu_base )
return ;
2014-08-17 21:30:39 +04:00
ptr = raw_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_enable ) ;
2011-02-10 23:54:10 +03:00
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 32 ) ; i + + )
ptr [ i ] = readl_relaxed ( dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
2014-08-17 21:30:39 +04:00
ptr = raw_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_conf ) ;
2011-02-10 23:54:10 +03:00
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 16 ) ; i + + )
ptr [ i ] = readl_relaxed ( dist_base + GIC_DIST_CONFIG + i * 4 ) ;
}
static void gic_cpu_restore ( unsigned int gic_nr )
{
int i ;
u32 * ptr ;
void __iomem * dist_base ;
void __iomem * cpu_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
cpu_base = gic_data_cpu_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base | | ! cpu_base )
return ;
2014-08-17 21:30:39 +04:00
ptr = raw_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_enable ) ;
2011-02-10 23:54:10 +03:00
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 32 ) ; i + + )
writel_relaxed ( ptr [ i ] , dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
2014-08-17 21:30:39 +04:00
ptr = raw_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_conf ) ;
2011-02-10 23:54:10 +03:00
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 16 ) ; i + + )
writel_relaxed ( ptr [ i ] , dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 4 ) ; i + + )
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICD_INT_DEF_PRI_X4 ,
dist_base + GIC_DIST_PRI + i * 4 ) ;
2011-02-10 23:54:10 +03:00
2014-07-31 01:56:58 +04:00
writel_relaxed ( GICC_INT_PRI_THRESHOLD , cpu_base + GIC_CPU_PRIMASK ) ;
2014-07-31 01:56:59 +04:00
gic_cpu_if_up ( ) ;
2011-02-10 23:54:10 +03:00
}
static int gic_notifier ( struct notifier_block * self , unsigned long cmd , void * v )
{
int i ;
for ( i = 0 ; i < MAX_GIC_NR ; i + + ) {
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
/* Skip over unused GICs */
if ( ! gic_data [ i ] . get_base )
continue ;
# endif
2011-02-10 23:54:10 +03:00
switch ( cmd ) {
case CPU_PM_ENTER :
gic_cpu_save ( i ) ;
break ;
case CPU_PM_ENTER_FAILED :
case CPU_PM_EXIT :
gic_cpu_restore ( i ) ;
break ;
case CPU_CLUSTER_PM_ENTER :
gic_dist_save ( i ) ;
break ;
case CPU_CLUSTER_PM_ENTER_FAILED :
case CPU_CLUSTER_PM_EXIT :
gic_dist_restore ( i ) ;
break ;
}
}
return NOTIFY_OK ;
}
static struct notifier_block gic_notifier_block = {
. notifier_call = gic_notifier ,
} ;
static void __init gic_pm_init ( struct gic_chip_data * gic )
{
gic - > saved_ppi_enable = __alloc_percpu ( DIV_ROUND_UP ( 32 , 32 ) * 4 ,
sizeof ( u32 ) ) ;
BUG_ON ( ! gic - > saved_ppi_enable ) ;
gic - > saved_ppi_conf = __alloc_percpu ( DIV_ROUND_UP ( 32 , 16 ) * 4 ,
sizeof ( u32 ) ) ;
BUG_ON ( ! gic - > saved_ppi_conf ) ;
2011-11-25 20:58:19 +04:00
if ( gic = = & gic_data [ 0 ] )
cpu_pm_register_notifier ( & gic_notifier_block ) ;
2011-02-10 23:54:10 +03:00
}
# else
static void __init gic_pm_init ( struct gic_chip_data * gic )
{
}
# endif
2012-11-27 01:05:48 +04:00
# ifdef CONFIG_SMP
2014-03-05 05:02:01 +04:00
static void gic_raise_softirq ( const struct cpumask * mask , unsigned int irq )
2012-11-27 01:05:48 +04:00
{
int cpu ;
2012-04-12 09:40:31 +04:00
unsigned long flags , map = 0 ;
raw_spin_lock_irqsave ( & irq_controller_lock , flags ) ;
2012-11-27 01:05:48 +04:00
/* Convert our logical CPU mask into a physical one. */
for_each_cpu ( cpu , mask )
2013-02-19 17:52:22 +04:00
map | = gic_cpu_map [ cpu ] ;
2012-11-27 01:05:48 +04:00
/*
* Ensure that stores to Normal memory are visible to the
2014-02-20 21:42:07 +04:00
* other CPUs before they observe us issuing the IPI .
2012-11-27 01:05:48 +04:00
*/
2014-02-20 21:42:07 +04:00
dmb ( ishst ) ;
2012-11-27 01:05:48 +04:00
/* this always happens on GIC0 */
writel_relaxed ( map < < 16 | irq , gic_data_dist_base ( & gic_data [ 0 ] ) + GIC_DIST_SOFTINT ) ;
2012-04-12 09:40:31 +04:00
raw_spin_unlock_irqrestore ( & irq_controller_lock , flags ) ;
}
# endif
# ifdef CONFIG_BL_SWITCHER
2012-11-29 03:48:19 +04:00
/*
* gic_send_sgi - send a SGI directly to given CPU interface number
*
* cpu_id : the ID for the destination CPU interface
* irq : the IPI number to send a SGI for
*/
void gic_send_sgi ( unsigned int cpu_id , unsigned int irq )
{
BUG_ON ( cpu_id > = NR_GIC_CPU_IF ) ;
cpu_id = 1 < < cpu_id ;
/* this always happens on GIC0 */
writel_relaxed ( ( cpu_id < < 16 ) | irq , gic_data_dist_base ( & gic_data [ 0 ] ) + GIC_DIST_SOFTINT ) ;
}
2012-07-06 05:33:26 +04:00
/*
* gic_get_cpu_id - get the CPU interface ID for the specified CPU
*
* @ cpu : the logical CPU number to get the GIC ID for .
*
* Return the CPU interface ID for the given logical CPU number ,
* or - 1 if the CPU number is too large or the interface ID is
* unknown ( more than one bit set ) .
*/
int gic_get_cpu_id ( unsigned int cpu )
{
unsigned int cpu_bit ;
if ( cpu > = NR_GIC_CPU_IF )
return - 1 ;
cpu_bit = gic_cpu_map [ cpu ] ;
if ( cpu_bit & ( cpu_bit - 1 ) )
return - 1 ;
return __ffs ( cpu_bit ) ;
}
2012-04-12 09:40:31 +04:00
/*
* gic_migrate_target - migrate IRQs to another CPU interface
*
* @ new_cpu_id : the CPU target ID to migrate IRQs to
*
* Migrate all peripheral interrupts with a target matching the current CPU
* to the interface corresponding to @ new_cpu_id . The CPU interface mapping
* is also updated . Targets to other CPU interfaces are unchanged .
* This must be called with IRQs locally disabled .
*/
void gic_migrate_target ( unsigned int new_cpu_id )
{
unsigned int cur_cpu_id , gic_irqs , gic_nr = 0 ;
void __iomem * dist_base ;
int i , ror_val , cpu = smp_processor_id ( ) ;
u32 val , cur_target_mask , active_mask ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
if ( ! dist_base )
return ;
gic_irqs = gic_data [ gic_nr ] . gic_irqs ;
cur_cpu_id = __ffs ( gic_cpu_map [ cpu ] ) ;
cur_target_mask = 0x01010101 < < cur_cpu_id ;
ror_val = ( cur_cpu_id - new_cpu_id ) & 31 ;
raw_spin_lock ( & irq_controller_lock ) ;
/* Update the target interface for this logical CPU */
gic_cpu_map [ cpu ] = 1 < < new_cpu_id ;
/*
* Find all the peripheral interrupts targetting the current
* CPU interface and migrate them to the new CPU interface .
* We skip DIST_TARGET 0 to 7 as they are read - only .
*/
for ( i = 8 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + ) {
val = readl_relaxed ( dist_base + GIC_DIST_TARGET + i * 4 ) ;
active_mask = val & cur_target_mask ;
if ( active_mask ) {
val & = ~ active_mask ;
val | = ror32 ( active_mask , ror_val ) ;
writel_relaxed ( val , dist_base + GIC_DIST_TARGET + i * 4 ) ;
}
}
raw_spin_unlock ( & irq_controller_lock ) ;
/*
* Now let ' s migrate and clear any potential SGIs that might be
* pending for us ( cur_cpu_id ) . Since GIC_DIST_SGI_PENDING_SET
* is a banked register , we can only forward the SGI using
* GIC_DIST_SOFTINT . The original SGI source is lost but Linux
* doesn ' t use that information anyway .
*
* For the same reason we do not adjust SGI source information
* for previously sent SGIs by us to other CPUs either .
*/
for ( i = 0 ; i < 16 ; i + = 4 ) {
int j ;
val = readl_relaxed ( dist_base + GIC_DIST_SGI_PENDING_SET + i ) ;
if ( ! val )
continue ;
writel_relaxed ( val , dist_base + GIC_DIST_SGI_PENDING_CLEAR + i ) ;
for ( j = i ; j < i + 4 ; j + + ) {
if ( val & 0xff )
writel_relaxed ( ( 1 < < ( new_cpu_id + 16 ) ) | j ,
dist_base + GIC_DIST_SOFTINT ) ;
val > > = 8 ;
}
}
2012-11-27 01:05:48 +04:00
}
2012-11-29 03:17:25 +04:00
/*
* gic_get_sgir_physaddr - get the physical address for the SGI register
*
* REturn the physical address of the SGI register to be used
* by some early assembly code when the kernel is not yet available .
*/
static unsigned long gic_dist_physaddr ;
unsigned long gic_get_sgir_physaddr ( void )
{
if ( ! gic_dist_physaddr )
return 0 ;
return gic_dist_physaddr + GIC_DIST_SOFTINT ;
}
void __init gic_init_physaddr ( struct device_node * node )
{
struct resource res ;
if ( of_address_to_resource ( node , 0 , & res ) = = 0 ) {
gic_dist_physaddr = res . start ;
pr_info ( " GIC physical location is %#lx \n " , gic_dist_physaddr ) ;
}
}
# else
# define gic_init_physaddr(node) do { } while (0)
2012-11-27 01:05:48 +04:00
# endif
2012-02-15 01:06:57 +04:00
static int gic_irq_domain_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hw )
{
if ( hw < 32 ) {
irq_set_percpu_devid ( irq ) ;
irq_set_chip_and_handler ( irq , & gic_chip ,
handle_percpu_devid_irq ) ;
set_irq_flags ( irq , IRQF_VALID | IRQF_NOAUTOEN ) ;
} else {
irq_set_chip_and_handler ( irq , & gic_chip ,
handle_fasteoi_irq ) ;
set_irq_flags ( irq , IRQF_VALID | IRQF_PROBE ) ;
2013-12-03 14:27:22 +04:00
gic_routable_irq_domain_ops - > map ( d , irq , hw ) ;
2012-02-15 01:06:57 +04:00
}
irq_set_chip_data ( irq , d - > host_data ) ;
return 0 ;
}
2013-12-03 14:27:22 +04:00
static void gic_irq_domain_unmap ( struct irq_domain * d , unsigned int irq )
{
gic_routable_irq_domain_ops - > unmap ( d , irq ) ;
}
2012-02-15 01:06:48 +04:00
static int gic_irq_domain_xlate ( struct irq_domain * d ,
struct device_node * controller ,
const u32 * intspec , unsigned int intsize ,
unsigned long * out_hwirq , unsigned int * out_type )
2011-09-29 06:27:52 +04:00
{
2013-12-03 14:27:22 +04:00
unsigned long ret = 0 ;
2011-09-29 06:27:52 +04:00
if ( d - > of_node ! = controller )
return - EINVAL ;
if ( intsize < 3 )
return - EINVAL ;
/* Get the interrupt number and add 16 to skip over SGIs */
* out_hwirq = intspec [ 1 ] + 16 ;
/* For SPIs, we need to add 16 more to get the GIC irq ID number */
2013-12-03 14:27:22 +04:00
if ( ! intspec [ 0 ] ) {
ret = gic_routable_irq_domain_ops - > xlate ( d , controller ,
intspec ,
intsize ,
out_hwirq ,
out_type ) ;
if ( IS_ERR_VALUE ( ret ) )
return ret ;
}
2011-09-29 06:27:52 +04:00
* out_type = intspec [ 2 ] & IRQ_TYPE_SENSE_MASK ;
2013-12-03 14:27:22 +04:00
return ret ;
2011-09-29 06:27:52 +04:00
}
2013-01-14 22:05:37 +04:00
# ifdef CONFIG_SMP
2013-06-19 19:32:08 +04:00
static int gic_secondary_init ( struct notifier_block * nfb , unsigned long action ,
void * hcpu )
2013-01-14 22:05:37 +04:00
{
2013-06-12 15:30:27 +04:00
if ( action = = CPU_STARTING | | action = = CPU_STARTING_FROZEN )
2013-01-14 22:05:37 +04:00
gic_cpu_init ( & gic_data [ 0 ] ) ;
return NOTIFY_OK ;
}
/*
* Notifier for enabling the GIC CPU interface . Set an arbitrarily high
* priority because the GIC needs to be up before the ARM generic timers .
*/
2013-06-19 19:32:08 +04:00
static struct notifier_block gic_cpu_notifier = {
2013-01-14 22:05:37 +04:00
. notifier_call = gic_secondary_init ,
. priority = 100 ,
} ;
# endif
2014-03-05 05:02:01 +04:00
static const struct irq_domain_ops gic_irq_domain_ops = {
2012-02-15 01:06:57 +04:00
. map = gic_irq_domain_map ,
2013-12-03 14:27:22 +04:00
. unmap = gic_irq_domain_unmap ,
2012-02-15 01:06:48 +04:00
. xlate = gic_irq_domain_xlate ,
2011-09-29 06:25:31 +04:00
} ;
2013-12-03 14:27:22 +04:00
/* Default functions for routable irq domain */
static int gic_routable_irq_domain_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hw )
{
return 0 ;
}
static void gic_routable_irq_domain_unmap ( struct irq_domain * d ,
unsigned int irq )
{
}
static int gic_routable_irq_domain_xlate ( struct irq_domain * d ,
struct device_node * controller ,
const u32 * intspec , unsigned int intsize ,
unsigned long * out_hwirq ,
unsigned int * out_type )
{
* out_hwirq + = 16 ;
return 0 ;
}
2014-08-26 18:13:26 +04:00
static const struct irq_domain_ops gic_default_routable_irq_domain_ops = {
2013-12-03 14:27:22 +04:00
. map = gic_routable_irq_domain_map ,
. unmap = gic_routable_irq_domain_unmap ,
. xlate = gic_routable_irq_domain_xlate ,
} ;
const struct irq_domain_ops * gic_routable_irq_domain_ops =
& gic_default_routable_irq_domain_ops ;
2011-11-12 20:09:49 +04:00
void __init gic_init_bases ( unsigned int gic_nr , int irq_start ,
void __iomem * dist_base , void __iomem * cpu_base ,
2012-02-15 01:06:57 +04:00
u32 percpu_offset , struct device_node * node )
2010-12-04 18:55:14 +03:00
{
2012-02-15 01:06:57 +04:00
irq_hw_number_t hwirq_base ;
2010-12-04 19:50:58 +03:00
struct gic_chip_data * gic ;
2012-04-12 02:55:48 +04:00
int gic_irqs , irq_base , i ;
2013-12-03 14:27:22 +04:00
int nr_routable_irqs ;
2010-12-04 19:50:58 +03:00
BUG_ON ( gic_nr > = MAX_GIC_NR ) ;
gic = & gic_data [ gic_nr ] ;
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
if ( percpu_offset ) { /* Frankein-GIC without banked registers... */
unsigned int cpu ;
gic - > dist_base . percpu_base = alloc_percpu ( void __iomem * ) ;
gic - > cpu_base . percpu_base = alloc_percpu ( void __iomem * ) ;
if ( WARN_ON ( ! gic - > dist_base . percpu_base | |
! gic - > cpu_base . percpu_base ) ) {
free_percpu ( gic - > dist_base . percpu_base ) ;
free_percpu ( gic - > cpu_base . percpu_base ) ;
return ;
}
for_each_possible_cpu ( cpu ) {
2014-07-17 19:23:44 +04:00
u32 mpidr = cpu_logical_map ( cpu ) ;
u32 core_id = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
unsigned long offset = percpu_offset * core_id ;
2011-11-12 20:09:49 +04:00
* per_cpu_ptr ( gic - > dist_base . percpu_base , cpu ) = dist_base + offset ;
* per_cpu_ptr ( gic - > cpu_base . percpu_base , cpu ) = cpu_base + offset ;
}
gic_set_base_accessor ( gic , gic_get_percpu_base ) ;
} else
# endif
{ /* Normal, sane GIC... */
WARN ( percpu_offset ,
" GIC_NON_BANKED not enabled, ignoring %08x offset! " ,
percpu_offset ) ;
gic - > dist_base . common_base = dist_base ;
gic - > cpu_base . common_base = cpu_base ;
gic_set_base_accessor ( gic , gic_get_common_base ) ;
}
2010-12-04 19:50:58 +03:00
2012-04-12 02:55:48 +04:00
/*
* Initialize the CPU interface map to all CPUs .
* It will be refined as each CPU probes its ID .
*/
for ( i = 0 ; i < NR_GIC_CPU_IF ; i + + )
gic_cpu_map [ i ] = 0xff ;
2011-09-29 06:25:31 +04:00
/*
* For primary GICs , skip over SGIs .
* For secondary GICs , skip over PPIs , too .
*/
2012-02-03 17:52:14 +04:00
if ( gic_nr = = 0 & & ( irq_start & 31 ) > 0 ) {
2012-03-30 03:53:48 +04:00
hwirq_base = 16 ;
2012-02-03 17:52:14 +04:00
if ( irq_start ! = - 1 )
irq_start = ( irq_start & ~ 31 ) + 16 ;
} else {
2012-03-30 03:53:48 +04:00
hwirq_base = 32 ;
2011-11-25 22:23:36 +04:00
}
2011-09-29 06:25:31 +04:00
/*
* Find out how many interrupts are supported .
* The GIC only supports up to 1020 interrupt sources .
*/
2011-11-12 20:09:49 +04:00
gic_irqs = readl_relaxed ( gic_data_dist_base ( gic ) + GIC_DIST_CTR ) & 0x1f ;
2011-09-29 06:25:31 +04:00
gic_irqs = ( gic_irqs + 1 ) * 32 ;
if ( gic_irqs > 1020 )
gic_irqs = 1020 ;
gic - > gic_irqs = gic_irqs ;
2012-02-15 01:06:57 +04:00
gic_irqs - = hwirq_base ; /* calculate # of irqs to allocate */
2013-12-03 14:27:22 +04:00
if ( of_property_read_u32 ( node , " arm,routable-irqs " ,
& nr_routable_irqs ) ) {
irq_base = irq_alloc_descs ( irq_start , 16 , gic_irqs ,
numa_node_id ( ) ) ;
if ( IS_ERR_VALUE ( irq_base ) ) {
WARN ( 1 , " Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated \n " ,
irq_start ) ;
irq_base = irq_start ;
}
gic - > domain = irq_domain_add_legacy ( node , gic_irqs , irq_base ,
hwirq_base , & gic_irq_domain_ops , gic ) ;
} else {
gic - > domain = irq_domain_add_linear ( node , nr_routable_irqs ,
& gic_irq_domain_ops ,
gic ) ;
2011-10-22 02:14:27 +04:00
}
2013-12-03 14:27:22 +04:00
2012-02-15 01:06:57 +04:00
if ( WARN_ON ( ! gic - > domain ) )
return ;
2010-12-04 19:50:58 +03:00
2013-11-28 18:21:40 +04:00
if ( gic_nr = = 0 ) {
2012-11-27 01:05:48 +04:00
# ifdef CONFIG_SMP
2013-11-28 18:21:40 +04:00
set_smp_cross_call ( gic_raise_softirq ) ;
register_cpu_notifier ( & gic_cpu_notifier ) ;
2012-11-27 01:05:48 +04:00
# endif
2013-11-28 18:21:40 +04:00
set_handle_irq ( gic_handle_irq ) ;
}
2012-11-03 21:59:51 +04:00
2011-06-13 04:45:59 +04:00
gic_chip . flags | = gic_arch_extn . flags ;
2011-09-29 06:25:31 +04:00
gic_dist_init ( gic ) ;
2010-12-04 19:50:58 +03:00
gic_cpu_init ( gic ) ;
2011-02-10 23:54:10 +03:00
gic_pm_init ( gic ) ;
2010-12-04 18:55:14 +03:00
}
2011-09-29 06:27:52 +04:00
# ifdef CONFIG_OF
2013-03-13 13:35:15 +04:00
static int gic_cnt __initdata ;
2011-09-29 06:27:52 +04:00
2014-03-05 05:02:01 +04:00
static int __init
gic_of_init ( struct device_node * node , struct device_node * parent )
2011-09-29 06:27:52 +04:00
{
void __iomem * cpu_base ;
void __iomem * dist_base ;
2011-11-12 20:09:49 +04:00
u32 percpu_offset ;
2011-09-29 06:27:52 +04:00
int irq ;
if ( WARN_ON ( ! node ) )
return - ENODEV ;
dist_base = of_iomap ( node , 0 ) ;
WARN ( ! dist_base , " unable to map gic dist registers \n " ) ;
cpu_base = of_iomap ( node , 1 ) ;
WARN ( ! cpu_base , " unable to map gic cpu registers \n " ) ;
2011-11-12 20:09:49 +04:00
if ( of_property_read_u32 ( node , " cpu-offset " , & percpu_offset ) )
percpu_offset = 0 ;
2012-02-15 01:06:57 +04:00
gic_init_bases ( gic_cnt , - 1 , dist_base , cpu_base , percpu_offset , node ) ;
2012-11-29 03:17:25 +04:00
if ( ! gic_cnt )
gic_init_physaddr ( node ) ;
2011-09-29 06:27:52 +04:00
if ( parent ) {
irq = irq_of_parse_and_map ( node , 0 ) ;
gic_cascade_irq ( gic_cnt , irq ) ;
}
gic_cnt + + ;
return 0 ;
}
2014-07-15 02:03:03 +04:00
IRQCHIP_DECLARE ( gic_400 , " arm,gic-400 " , gic_of_init ) ;
2012-11-21 07:21:40 +04:00
IRQCHIP_DECLARE ( cortex_a15_gic , " arm,cortex-a15-gic " , gic_of_init ) ;
IRQCHIP_DECLARE ( cortex_a9_gic , " arm,cortex-a9-gic " , gic_of_init ) ;
2014-07-03 15:58:52 +04:00
IRQCHIP_DECLARE ( cortex_a7_gic , " arm,cortex-a7-gic " , gic_of_init ) ;
2012-11-21 07:21:40 +04:00
IRQCHIP_DECLARE ( msm_8660_qgic , " qcom,msm-8660-qgic " , gic_of_init ) ;
IRQCHIP_DECLARE ( msm_qgic2 , " qcom,msm-qgic2 " , gic_of_init ) ;
2011-09-29 06:27:52 +04:00
# endif