2005-08-19 00:31:00 +04:00
/*
* linux / arch / arm / common / gic . c
*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Interrupt architecture for the GIC :
*
* o There is one Interrupt Distributor , which receives interrupts
* from system devices and sends them to the Interrupt Controllers .
*
* o There is one CPU Interface per CPU , which sends interrupts sent
* by the Distributor , and interrupts generated locally , to the
2007-02-14 21:14:56 +03:00
* associated CPU . The base address of the CPU interface is usually
* aliased so that the same address points to different chips depending
* on the CPU it is accessed from .
2005-08-19 00:31:00 +04:00
*
* Note that IRQs 0 - 31 are special - they are local to each CPU .
* As such , the enable set / clear , pending set / clear and active bit
* registers are banked per - cpu for these sources .
*/
# include <linux/init.h>
# include <linux/kernel.h>
2011-10-22 02:14:27 +04:00
# include <linux/err.h>
2011-11-01 03:28:37 +04:00
# include <linux/module.h>
2005-08-19 00:31:00 +04:00
# include <linux/list.h>
# include <linux/smp.h>
2013-01-14 22:05:37 +04:00
# include <linux/cpu.h>
2011-02-10 23:54:10 +03:00
# include <linux/cpu_pm.h>
2005-09-01 00:45:14 +04:00
# include <linux/cpumask.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2011-09-29 06:27:52 +04:00
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
2011-09-29 06:25:31 +04:00
# include <linux/irqdomain.h>
2011-07-20 19:24:14 +04:00
# include <linux/interrupt.h>
# include <linux/percpu.h>
# include <linux/slab.h>
2013-01-18 19:31:37 +04:00
# include <linux/irqchip/chained_irq.h>
2012-12-27 23:10:24 +04:00
# include <linux/irqchip/arm-gic.h>
2005-08-19 00:31:00 +04:00
# include <asm/irq.h>
2011-09-06 12:56:17 +04:00
# include <asm/exception.h>
2012-01-20 15:01:12 +04:00
# include <asm/smp_plat.h>
2005-08-19 00:31:00 +04:00
2012-11-21 07:21:40 +04:00
# include "irqchip.h"
2005-08-19 00:31:00 +04:00
2011-11-12 20:09:49 +04:00
union gic_base {
void __iomem * common_base ;
void __percpu __iomem * * percpu_base ;
} ;
struct gic_chip_data {
union gic_base dist_base ;
union gic_base cpu_base ;
# ifdef CONFIG_CPU_PM
u32 saved_spi_enable [ DIV_ROUND_UP ( 1020 , 32 ) ] ;
u32 saved_spi_conf [ DIV_ROUND_UP ( 1020 , 16 ) ] ;
u32 saved_spi_target [ DIV_ROUND_UP ( 1020 , 4 ) ] ;
u32 __percpu * saved_ppi_enable ;
u32 __percpu * saved_ppi_conf ;
# endif
2012-02-15 01:06:57 +04:00
struct irq_domain * domain ;
2011-11-12 20:09:49 +04:00
unsigned int gic_irqs ;
# ifdef CONFIG_GIC_NON_BANKED
void __iomem * ( * get_base ) ( union gic_base * ) ;
# endif
} ;
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
2012-04-12 02:55:48 +04:00
/*
* The GIC mapping of CPU interfaces does not necessarily match
* the logical CPU numbering . Let ' s use a mapping as returned
* by the GIC itself .
*/
# define NR_GIC_CPU_IF 8
static u8 gic_cpu_map [ NR_GIC_CPU_IF ] __read_mostly ;
2011-03-02 10:03:22 +03:00
/*
* Supported arch specific GIC irq extension .
* Default make them NULL .
*/
struct irq_chip gic_arch_extn = {
2011-02-09 15:01:12 +03:00
. irq_eoi = NULL ,
2011-03-02 10:03:22 +03:00
. irq_mask = NULL ,
. irq_unmask = NULL ,
. irq_retrigger = NULL ,
. irq_set_type = NULL ,
. irq_set_wake = NULL ,
} ;
2007-02-14 21:14:56 +03:00
# ifndef MAX_GIC_NR
# define MAX_GIC_NR 1
# endif
2010-12-04 19:50:58 +03:00
static struct gic_chip_data gic_data [ MAX_GIC_NR ] __read_mostly ;
2007-02-14 21:14:56 +03:00
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
static void __iomem * gic_get_percpu_base ( union gic_base * base )
{
return * __this_cpu_ptr ( base - > percpu_base ) ;
}
static void __iomem * gic_get_common_base ( union gic_base * base )
{
return base - > common_base ;
}
static inline void __iomem * gic_data_dist_base ( struct gic_chip_data * data )
{
return data - > get_base ( & data - > dist_base ) ;
}
static inline void __iomem * gic_data_cpu_base ( struct gic_chip_data * data )
{
return data - > get_base ( & data - > cpu_base ) ;
}
static inline void gic_set_base_accessor ( struct gic_chip_data * data ,
void __iomem * ( * f ) ( union gic_base * ) )
{
data - > get_base = f ;
}
# else
# define gic_data_dist_base(d) ((d)->dist_base.common_base)
# define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
2013-03-13 13:35:15 +04:00
# define gic_set_base_accessor(d, f)
2011-11-12 20:09:49 +04:00
# endif
2010-11-29 12:18:20 +03:00
static inline void __iomem * gic_dist_base ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2010-11-29 12:18:20 +03:00
struct gic_chip_data * gic_data = irq_data_get_irq_chip_data ( d ) ;
2011-11-12 20:09:49 +04:00
return gic_data_dist_base ( gic_data ) ;
2007-02-14 21:14:56 +03:00
}
2010-11-29 12:18:20 +03:00
static inline void __iomem * gic_cpu_base ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2010-11-29 12:18:20 +03:00
struct gic_chip_data * gic_data = irq_data_get_irq_chip_data ( d ) ;
2011-11-12 20:09:49 +04:00
return gic_data_cpu_base ( gic_data ) ;
2007-02-14 21:14:56 +03:00
}
2010-11-29 12:18:20 +03:00
static inline unsigned int gic_irq ( struct irq_data * d )
2007-02-14 21:14:56 +03:00
{
2011-09-29 06:25:31 +04:00
return d - > hwirq ;
2007-02-14 21:14:56 +03:00
}
2005-08-19 00:31:00 +04:00
/*
* Routines to acknowledge , disable and enable interrupts
*/
2010-11-29 12:18:20 +03:00
static void gic_mask_irq ( struct irq_data * d )
2005-08-19 00:31:00 +04:00
{
2011-09-29 06:25:31 +04:00
u32 mask = 1 < < ( gic_irq ( d ) % 32 ) ;
2006-07-02 01:32:14 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-03-28 17:57:46 +04:00
writel_relaxed ( mask , gic_dist_base ( d ) + GIC_DIST_ENABLE_CLEAR + ( gic_irq ( d ) / 32 ) * 4 ) ;
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_mask )
gic_arch_extn . irq_mask ( d ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
}
2010-11-29 12:18:20 +03:00
static void gic_unmask_irq ( struct irq_data * d )
2005-08-19 00:31:00 +04:00
{
2011-09-29 06:25:31 +04:00
u32 mask = 1 < < ( gic_irq ( d ) % 32 ) ;
2006-07-02 01:32:14 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_unmask )
gic_arch_extn . irq_unmask ( d ) ;
2011-03-28 17:57:46 +04:00
writel_relaxed ( mask , gic_dist_base ( d ) + GIC_DIST_ENABLE_SET + ( gic_irq ( d ) / 32 ) * 4 ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2005-08-19 00:31:00 +04:00
}
2011-02-09 15:01:12 +03:00
static void gic_eoi_irq ( struct irq_data * d )
{
if ( gic_arch_extn . irq_eoi ) {
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-02-09 15:01:12 +03:00
gic_arch_extn . irq_eoi ( d ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2011-02-09 15:01:12 +03:00
}
2011-03-28 17:57:46 +04:00
writel_relaxed ( gic_irq ( d ) , gic_cpu_base ( d ) + GIC_CPU_EOI ) ;
2011-02-09 15:01:12 +03:00
}
2010-11-29 12:18:20 +03:00
static int gic_set_type ( struct irq_data * d , unsigned int type )
2010-05-28 07:37:38 +04:00
{
2010-11-29 12:18:20 +03:00
void __iomem * base = gic_dist_base ( d ) ;
unsigned int gicirq = gic_irq ( d ) ;
2010-05-28 07:37:38 +04:00
u32 enablemask = 1 < < ( gicirq % 32 ) ;
u32 enableoff = ( gicirq / 32 ) * 4 ;
u32 confmask = 0x2 < < ( ( gicirq % 16 ) * 2 ) ;
u32 confoff = ( gicirq / 16 ) * 4 ;
bool enabled = false ;
u32 val ;
/* Interrupt configuration for SGIs can't be changed */
if ( gicirq < 16 )
return - EINVAL ;
if ( type ! = IRQ_TYPE_LEVEL_HIGH & & type ! = IRQ_TYPE_EDGE_RISING )
return - EINVAL ;
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2010-05-28 07:37:38 +04:00
2011-03-02 10:03:22 +03:00
if ( gic_arch_extn . irq_set_type )
gic_arch_extn . irq_set_type ( d , type ) ;
2011-03-28 17:57:46 +04:00
val = readl_relaxed ( base + GIC_DIST_CONFIG + confoff ) ;
2010-05-28 07:37:38 +04:00
if ( type = = IRQ_TYPE_LEVEL_HIGH )
val & = ~ confmask ;
else if ( type = = IRQ_TYPE_EDGE_RISING )
val | = confmask ;
/*
* As recommended by the spec , disable the interrupt before changing
* the configuration
*/
2011-03-28 17:57:46 +04:00
if ( readl_relaxed ( base + GIC_DIST_ENABLE_SET + enableoff ) & enablemask ) {
writel_relaxed ( enablemask , base + GIC_DIST_ENABLE_CLEAR + enableoff ) ;
2010-05-28 07:37:38 +04:00
enabled = true ;
}
2011-03-28 17:57:46 +04:00
writel_relaxed ( val , base + GIC_DIST_CONFIG + confoff ) ;
2010-05-28 07:37:38 +04:00
if ( enabled )
2011-03-28 17:57:46 +04:00
writel_relaxed ( enablemask , base + GIC_DIST_ENABLE_SET + enableoff ) ;
2010-05-28 07:37:38 +04:00
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2010-05-28 07:37:38 +04:00
return 0 ;
}
2011-03-02 10:03:22 +03:00
static int gic_retrigger ( struct irq_data * d )
{
if ( gic_arch_extn . irq_retrigger )
return gic_arch_extn . irq_retrigger ( d ) ;
2013-03-20 03:05:49 +04:00
/* the genirq layer expects 0 if we can't retrigger in hardware */
return 0 ;
2011-03-02 10:03:22 +03:00
}
2005-09-30 19:07:05 +04:00
# ifdef CONFIG_SMP
2011-01-23 15:12:01 +03:00
static int gic_set_affinity ( struct irq_data * d , const struct cpumask * mask_val ,
bool force )
2005-08-19 00:31:00 +04:00
{
2010-11-29 12:18:20 +03:00
void __iomem * reg = gic_dist_base ( d ) + GIC_DIST_TARGET + ( gic_irq ( d ) & ~ 3 ) ;
2011-09-29 06:25:31 +04:00
unsigned int shift = ( gic_irq ( d ) % 4 ) * 8 ;
2011-07-21 18:00:57 +04:00
unsigned int cpu = cpumask_any_and ( mask_val , cpu_online_mask ) ;
2011-01-23 15:12:01 +03:00
u32 val , mask , bit ;
2005-08-19 00:31:00 +04:00
2012-04-12 02:55:48 +04:00
if ( cpu > = NR_GIC_CPU_IF | | cpu > = nr_cpu_ids )
2010-12-06 09:01:10 +03:00
return - EINVAL ;
2011-01-23 15:12:01 +03:00
mask = 0xff < < shift ;
2012-04-12 02:55:48 +04:00
bit = gic_cpu_map [ cpu ] < < shift ;
2011-01-23 15:12:01 +03:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-03-28 17:57:46 +04:00
val = readl_relaxed ( reg ) & ~ mask ;
writel_relaxed ( val | bit , reg ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2009-04-28 04:59:21 +04:00
2011-07-21 18:00:57 +04:00
return IRQ_SET_MASK_OK ;
2005-08-19 00:31:00 +04:00
}
2005-09-30 19:07:05 +04:00
# endif
2005-08-19 00:31:00 +04:00
2011-03-02 10:03:22 +03:00
# ifdef CONFIG_PM
static int gic_set_wake ( struct irq_data * d , unsigned int on )
{
int ret = - ENXIO ;
if ( gic_arch_extn . irq_set_wake )
ret = gic_arch_extn . irq_set_wake ( d , on ) ;
return ret ;
}
# else
# define gic_set_wake NULL
# endif
2012-11-21 05:52:32 +04:00
static asmlinkage void __exception_irq_entry gic_handle_irq ( struct pt_regs * regs )
2011-09-06 12:56:17 +04:00
{
u32 irqstat , irqnr ;
struct gic_chip_data * gic = & gic_data [ 0 ] ;
void __iomem * cpu_base = gic_data_cpu_base ( gic ) ;
do {
irqstat = readl_relaxed ( cpu_base + GIC_CPU_INTACK ) ;
irqnr = irqstat & ~ 0x1c00 ;
if ( likely ( irqnr > 15 & & irqnr < 1021 ) ) {
2012-02-15 01:06:57 +04:00
irqnr = irq_find_mapping ( gic - > domain , irqnr ) ;
2011-09-06 12:56:17 +04:00
handle_IRQ ( irqnr , regs ) ;
continue ;
}
if ( irqnr < 16 ) {
writel_relaxed ( irqstat , cpu_base + GIC_CPU_EOI ) ;
# ifdef CONFIG_SMP
handle_IPI ( irqnr , regs ) ;
# endif
continue ;
}
break ;
} while ( 1 ) ;
}
2007-05-17 13:11:34 +04:00
static void gic_handle_cascade_irq ( unsigned int irq , struct irq_desc * desc )
2007-02-14 21:14:56 +03:00
{
2011-03-24 15:25:22 +03:00
struct gic_chip_data * chip_data = irq_get_handler_data ( irq ) ;
struct irq_chip * chip = irq_get_chip ( irq ) ;
2007-05-17 13:11:34 +04:00
unsigned int cascade_irq , gic_irq ;
2007-02-14 21:14:56 +03:00
unsigned long status ;
2011-02-09 15:01:12 +03:00
chained_irq_enter ( chip , desc ) ;
2007-02-14 21:14:56 +03:00
2009-07-03 17:44:46 +04:00
raw_spin_lock ( & irq_controller_lock ) ;
2011-11-12 20:09:49 +04:00
status = readl_relaxed ( gic_data_cpu_base ( chip_data ) + GIC_CPU_INTACK ) ;
2009-07-03 17:44:46 +04:00
raw_spin_unlock ( & irq_controller_lock ) ;
2007-02-14 21:14:56 +03:00
2007-05-17 13:11:34 +04:00
gic_irq = ( status & 0x3ff ) ;
if ( gic_irq = = 1023 )
2007-02-14 21:14:56 +03:00
goto out ;
2012-02-15 01:06:57 +04:00
cascade_irq = irq_find_mapping ( chip_data - > domain , gic_irq ) ;
if ( unlikely ( gic_irq < 32 | | gic_irq > 1020 ) )
2013-01-14 21:53:39 +04:00
handle_bad_irq ( cascade_irq , desc ) ;
2007-05-17 13:11:34 +04:00
else
generic_handle_irq ( cascade_irq ) ;
2007-02-14 21:14:56 +03:00
out :
2011-02-09 15:01:12 +03:00
chained_irq_exit ( chip , desc ) ;
2007-02-14 21:14:56 +03:00
}
2006-08-02 01:26:25 +04:00
static struct irq_chip gic_chip = {
2010-11-29 12:18:20 +03:00
. name = " GIC " ,
. irq_mask = gic_mask_irq ,
. irq_unmask = gic_unmask_irq ,
2011-02-09 15:01:12 +03:00
. irq_eoi = gic_eoi_irq ,
2010-11-29 12:18:20 +03:00
. irq_set_type = gic_set_type ,
2011-03-02 10:03:22 +03:00
. irq_retrigger = gic_retrigger ,
2005-08-19 00:31:00 +04:00
# ifdef CONFIG_SMP
2011-01-23 15:12:01 +03:00
. irq_set_affinity = gic_set_affinity ,
2005-08-19 00:31:00 +04:00
# endif
2011-03-02 10:03:22 +03:00
. irq_set_wake = gic_set_wake ,
2005-08-19 00:31:00 +04:00
} ;
2007-02-14 21:14:56 +03:00
void __init gic_cascade_irq ( unsigned int gic_nr , unsigned int irq )
{
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-03-24 15:25:22 +03:00
if ( irq_set_handler_data ( irq , & gic_data [ gic_nr ] ) ! = 0 )
2007-02-14 21:14:56 +03:00
BUG ( ) ;
2011-03-24 15:25:22 +03:00
irq_set_chained_handler ( irq , gic_handle_cascade_irq ) ;
2007-02-14 21:14:56 +03:00
}
2013-01-31 03:49:57 +04:00
static u8 gic_get_cpumask ( struct gic_chip_data * gic )
{
void __iomem * base = gic_data_dist_base ( gic ) ;
u32 mask , i ;
for ( i = mask = 0 ; i < 32 ; i + = 4 ) {
mask = readl_relaxed ( base + GIC_DIST_TARGET + i ) ;
mask | = mask > > 16 ;
mask | = mask > > 8 ;
if ( mask )
break ;
}
if ( ! mask )
pr_crit ( " GIC CPU mask not found - kernel will fail to boot. \n " ) ;
return mask ;
}
2011-09-29 06:25:31 +04:00
static void __init gic_dist_init ( struct gic_chip_data * gic )
2005-08-19 00:31:00 +04:00
{
2012-02-15 01:06:57 +04:00
unsigned int i ;
2011-08-24 01:20:03 +04:00
u32 cpumask ;
2011-09-29 06:25:31 +04:00
unsigned int gic_irqs = gic - > gic_irqs ;
2011-11-12 20:09:49 +04:00
void __iomem * base = gic_data_dist_base ( gic ) ;
2005-08-19 00:31:00 +04:00
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0 , base + GIC_DIST_CTRL ) ;
2005-08-19 00:31:00 +04:00
/*
* Set all global interrupts to be level triggered , active low .
*/
2010-11-26 15:45:43 +03:00
for ( i = 32 ; i < gic_irqs ; i + = 16 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0 , base + GIC_DIST_CONFIG + i * 4 / 16 ) ;
2005-08-19 00:31:00 +04:00
/*
* Set all global interrupts to this CPU only .
*/
2013-01-31 03:49:57 +04:00
cpumask = gic_get_cpumask ( gic ) ;
cpumask | = cpumask < < 8 ;
cpumask | = cpumask < < 16 ;
2010-11-26 15:45:43 +03:00
for ( i = 32 ; i < gic_irqs ; i + = 4 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( cpumask , base + GIC_DIST_TARGET + i * 4 / 4 ) ;
2005-08-19 00:31:00 +04:00
/*
2010-11-12 02:10:30 +03:00
* Set priority on all global interrupts .
2005-08-19 00:31:00 +04:00
*/
2010-11-26 15:45:43 +03:00
for ( i = 32 ; i < gic_irqs ; i + = 4 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0xa0a0a0a0 , base + GIC_DIST_PRI + i * 4 / 4 ) ;
2005-08-19 00:31:00 +04:00
/*
2010-11-12 02:10:30 +03:00
* Disable all interrupts . Leave the PPI and SGIs alone
* as these enables are banked registers .
2005-08-19 00:31:00 +04:00
*/
2010-11-26 15:45:43 +03:00
for ( i = 32 ; i < gic_irqs ; i + = 32 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0xffffffff , base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32 ) ;
2005-08-19 00:31:00 +04:00
2011-03-28 17:57:46 +04:00
writel_relaxed ( 1 , base + GIC_DIST_CTRL ) ;
2005-08-19 00:31:00 +04:00
}
2010-12-04 19:50:58 +03:00
static void __cpuinit gic_cpu_init ( struct gic_chip_data * gic )
2005-08-19 00:31:00 +04:00
{
2011-11-12 20:09:49 +04:00
void __iomem * dist_base = gic_data_dist_base ( gic ) ;
void __iomem * base = gic_data_cpu_base ( gic ) ;
2012-04-12 02:55:48 +04:00
unsigned int cpu_mask , cpu = smp_processor_id ( ) ;
2010-11-12 02:10:30 +03:00
int i ;
2012-04-12 02:55:48 +04:00
/*
* Get what the GIC says our CPU mask is .
*/
BUG_ON ( cpu > = NR_GIC_CPU_IF ) ;
2013-01-31 03:49:57 +04:00
cpu_mask = gic_get_cpumask ( gic ) ;
2012-04-12 02:55:48 +04:00
gic_cpu_map [ cpu ] = cpu_mask ;
/*
* Clear our mask from the other map entries in case they ' re
* still undefined .
*/
for ( i = 0 ; i < NR_GIC_CPU_IF ; i + + )
if ( i ! = cpu )
gic_cpu_map [ i ] & = ~ cpu_mask ;
2010-11-12 02:10:30 +03:00
/*
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts , ensure all SGI interrupts are enabled .
*/
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0xffff0000 , dist_base + GIC_DIST_ENABLE_CLEAR ) ;
writel_relaxed ( 0x0000ffff , dist_base + GIC_DIST_ENABLE_SET ) ;
2010-11-12 02:10:30 +03:00
/*
* Set priority on PPI and SGI interrupts
*/
for ( i = 0 ; i < 32 ; i + = 4 )
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0xa0a0a0a0 , dist_base + GIC_DIST_PRI + i * 4 / 4 ) ;
2010-11-12 02:10:30 +03:00
2011-03-28 17:57:46 +04:00
writel_relaxed ( 0xf0 , base + GIC_CPU_PRIMASK ) ;
writel_relaxed ( 1 , base + GIC_CPU_CTRL ) ;
2005-08-19 00:31:00 +04:00
}
2011-02-10 23:54:10 +03:00
# ifdef CONFIG_CPU_PM
/*
* Saves the GIC distributor registers during suspend or idle . Must be called
* with interrupts disabled but before powering down the GIC . After calling
* this function , no interrupts will be delivered by the GIC , and another
* platform - specific wakeup source must be enabled .
*/
static void gic_dist_save ( unsigned int gic_nr )
{
unsigned int gic_irqs ;
void __iomem * dist_base ;
int i ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
gic_irqs = gic_data [ gic_nr ] . gic_irqs ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base )
return ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 16 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_conf [ i ] =
readl_relaxed ( dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_target [ i ] =
readl_relaxed ( dist_base + GIC_DIST_TARGET + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 32 ) ; i + + )
gic_data [ gic_nr ] . saved_spi_enable [ i ] =
readl_relaxed ( dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
}
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle . Must be called before enabling interrupts . If a level interrupt
* that occured while the GIC was suspended is still present , it will be
* handled normally , but any edge interrupts that occured will not be seen by
* the GIC and need to be handled by the platform - specific wakeup source .
*/
static void gic_dist_restore ( unsigned int gic_nr )
{
unsigned int gic_irqs ;
unsigned int i ;
void __iomem * dist_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
gic_irqs = gic_data [ gic_nr ] . gic_irqs ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base )
return ;
writel_relaxed ( 0 , dist_base + GIC_DIST_CTRL ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 16 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_conf [ i ] ,
dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
writel_relaxed ( 0xa0a0a0a0 ,
dist_base + GIC_DIST_PRI + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 4 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_target [ i ] ,
dist_base + GIC_DIST_TARGET + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( gic_irqs , 32 ) ; i + + )
writel_relaxed ( gic_data [ gic_nr ] . saved_spi_enable [ i ] ,
dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
writel_relaxed ( 1 , dist_base + GIC_DIST_CTRL ) ;
}
static void gic_cpu_save ( unsigned int gic_nr )
{
int i ;
u32 * ptr ;
void __iomem * dist_base ;
void __iomem * cpu_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
cpu_base = gic_data_cpu_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base | | ! cpu_base )
return ;
ptr = __this_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_enable ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 32 ) ; i + + )
ptr [ i ] = readl_relaxed ( dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
ptr = __this_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_conf ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 16 ) ; i + + )
ptr [ i ] = readl_relaxed ( dist_base + GIC_DIST_CONFIG + i * 4 ) ;
}
static void gic_cpu_restore ( unsigned int gic_nr )
{
int i ;
u32 * ptr ;
void __iomem * dist_base ;
void __iomem * cpu_base ;
if ( gic_nr > = MAX_GIC_NR )
BUG ( ) ;
2011-11-12 20:09:49 +04:00
dist_base = gic_data_dist_base ( & gic_data [ gic_nr ] ) ;
cpu_base = gic_data_cpu_base ( & gic_data [ gic_nr ] ) ;
2011-02-10 23:54:10 +03:00
if ( ! dist_base | | ! cpu_base )
return ;
ptr = __this_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_enable ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 32 ) ; i + + )
writel_relaxed ( ptr [ i ] , dist_base + GIC_DIST_ENABLE_SET + i * 4 ) ;
ptr = __this_cpu_ptr ( gic_data [ gic_nr ] . saved_ppi_conf ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 16 ) ; i + + )
writel_relaxed ( ptr [ i ] , dist_base + GIC_DIST_CONFIG + i * 4 ) ;
for ( i = 0 ; i < DIV_ROUND_UP ( 32 , 4 ) ; i + + )
writel_relaxed ( 0xa0a0a0a0 , dist_base + GIC_DIST_PRI + i * 4 ) ;
writel_relaxed ( 0xf0 , cpu_base + GIC_CPU_PRIMASK ) ;
writel_relaxed ( 1 , cpu_base + GIC_CPU_CTRL ) ;
}
static int gic_notifier ( struct notifier_block * self , unsigned long cmd , void * v )
{
int i ;
for ( i = 0 ; i < MAX_GIC_NR ; i + + ) {
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
/* Skip over unused GICs */
if ( ! gic_data [ i ] . get_base )
continue ;
# endif
2011-02-10 23:54:10 +03:00
switch ( cmd ) {
case CPU_PM_ENTER :
gic_cpu_save ( i ) ;
break ;
case CPU_PM_ENTER_FAILED :
case CPU_PM_EXIT :
gic_cpu_restore ( i ) ;
break ;
case CPU_CLUSTER_PM_ENTER :
gic_dist_save ( i ) ;
break ;
case CPU_CLUSTER_PM_ENTER_FAILED :
case CPU_CLUSTER_PM_EXIT :
gic_dist_restore ( i ) ;
break ;
}
}
return NOTIFY_OK ;
}
static struct notifier_block gic_notifier_block = {
. notifier_call = gic_notifier ,
} ;
static void __init gic_pm_init ( struct gic_chip_data * gic )
{
gic - > saved_ppi_enable = __alloc_percpu ( DIV_ROUND_UP ( 32 , 32 ) * 4 ,
sizeof ( u32 ) ) ;
BUG_ON ( ! gic - > saved_ppi_enable ) ;
gic - > saved_ppi_conf = __alloc_percpu ( DIV_ROUND_UP ( 32 , 16 ) * 4 ,
sizeof ( u32 ) ) ;
BUG_ON ( ! gic - > saved_ppi_conf ) ;
2011-11-25 20:58:19 +04:00
if ( gic = = & gic_data [ 0 ] )
cpu_pm_register_notifier ( & gic_notifier_block ) ;
2011-02-10 23:54:10 +03:00
}
# else
static void __init gic_pm_init ( struct gic_chip_data * gic )
{
}
# endif
2012-11-27 01:05:48 +04:00
# ifdef CONFIG_SMP
void gic_raise_softirq ( const struct cpumask * mask , unsigned int irq )
{
int cpu ;
unsigned long map = 0 ;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu ( cpu , mask )
2013-02-19 17:52:22 +04:00
map | = gic_cpu_map [ cpu ] ;
2012-11-27 01:05:48 +04:00
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI .
*/
dsb ( ) ;
/* this always happens on GIC0 */
writel_relaxed ( map < < 16 | irq , gic_data_dist_base ( & gic_data [ 0 ] ) + GIC_DIST_SOFTINT ) ;
}
# endif
2012-02-15 01:06:57 +04:00
static int gic_irq_domain_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hw )
{
if ( hw < 32 ) {
irq_set_percpu_devid ( irq ) ;
irq_set_chip_and_handler ( irq , & gic_chip ,
handle_percpu_devid_irq ) ;
set_irq_flags ( irq , IRQF_VALID | IRQF_NOAUTOEN ) ;
} else {
irq_set_chip_and_handler ( irq , & gic_chip ,
handle_fasteoi_irq ) ;
set_irq_flags ( irq , IRQF_VALID | IRQF_PROBE ) ;
}
irq_set_chip_data ( irq , d - > host_data ) ;
return 0 ;
}
2012-02-15 01:06:48 +04:00
static int gic_irq_domain_xlate ( struct irq_domain * d ,
struct device_node * controller ,
const u32 * intspec , unsigned int intsize ,
unsigned long * out_hwirq , unsigned int * out_type )
2011-09-29 06:27:52 +04:00
{
if ( d - > of_node ! = controller )
return - EINVAL ;
if ( intsize < 3 )
return - EINVAL ;
/* Get the interrupt number and add 16 to skip over SGIs */
* out_hwirq = intspec [ 1 ] + 16 ;
/* For SPIs, we need to add 16 more to get the GIC irq ID number */
if ( ! intspec [ 0 ] )
* out_hwirq + = 16 ;
* out_type = intspec [ 2 ] & IRQ_TYPE_SENSE_MASK ;
return 0 ;
}
2013-01-14 22:05:37 +04:00
# ifdef CONFIG_SMP
static int __cpuinit gic_secondary_init ( struct notifier_block * nfb ,
unsigned long action , void * hcpu )
{
if ( action = = CPU_STARTING )
gic_cpu_init ( & gic_data [ 0 ] ) ;
return NOTIFY_OK ;
}
/*
* Notifier for enabling the GIC CPU interface . Set an arbitrarily high
* priority because the GIC needs to be up before the ARM generic timers .
*/
static struct notifier_block __cpuinitdata gic_cpu_notifier = {
. notifier_call = gic_secondary_init ,
. priority = 100 ,
} ;
# endif
2012-01-26 23:25:18 +04:00
const struct irq_domain_ops gic_irq_domain_ops = {
2012-02-15 01:06:57 +04:00
. map = gic_irq_domain_map ,
2012-02-15 01:06:48 +04:00
. xlate = gic_irq_domain_xlate ,
2011-09-29 06:25:31 +04:00
} ;
2011-11-12 20:09:49 +04:00
void __init gic_init_bases ( unsigned int gic_nr , int irq_start ,
void __iomem * dist_base , void __iomem * cpu_base ,
2012-02-15 01:06:57 +04:00
u32 percpu_offset , struct device_node * node )
2010-12-04 18:55:14 +03:00
{
2012-02-15 01:06:57 +04:00
irq_hw_number_t hwirq_base ;
2010-12-04 19:50:58 +03:00
struct gic_chip_data * gic ;
2012-04-12 02:55:48 +04:00
int gic_irqs , irq_base , i ;
2010-12-04 19:50:58 +03:00
BUG_ON ( gic_nr > = MAX_GIC_NR ) ;
gic = & gic_data [ gic_nr ] ;
2011-11-12 20:09:49 +04:00
# ifdef CONFIG_GIC_NON_BANKED
if ( percpu_offset ) { /* Frankein-GIC without banked registers... */
unsigned int cpu ;
gic - > dist_base . percpu_base = alloc_percpu ( void __iomem * ) ;
gic - > cpu_base . percpu_base = alloc_percpu ( void __iomem * ) ;
if ( WARN_ON ( ! gic - > dist_base . percpu_base | |
! gic - > cpu_base . percpu_base ) ) {
free_percpu ( gic - > dist_base . percpu_base ) ;
free_percpu ( gic - > cpu_base . percpu_base ) ;
return ;
}
for_each_possible_cpu ( cpu ) {
unsigned long offset = percpu_offset * cpu_logical_map ( cpu ) ;
* per_cpu_ptr ( gic - > dist_base . percpu_base , cpu ) = dist_base + offset ;
* per_cpu_ptr ( gic - > cpu_base . percpu_base , cpu ) = cpu_base + offset ;
}
gic_set_base_accessor ( gic , gic_get_percpu_base ) ;
} else
# endif
{ /* Normal, sane GIC... */
WARN ( percpu_offset ,
" GIC_NON_BANKED not enabled, ignoring %08x offset! " ,
percpu_offset ) ;
gic - > dist_base . common_base = dist_base ;
gic - > cpu_base . common_base = cpu_base ;
gic_set_base_accessor ( gic , gic_get_common_base ) ;
}
2010-12-04 19:50:58 +03:00
2012-04-12 02:55:48 +04:00
/*
* Initialize the CPU interface map to all CPUs .
* It will be refined as each CPU probes its ID .
*/
for ( i = 0 ; i < NR_GIC_CPU_IF ; i + + )
gic_cpu_map [ i ] = 0xff ;
2011-09-29 06:25:31 +04:00
/*
* For primary GICs , skip over SGIs .
* For secondary GICs , skip over PPIs , too .
*/
2012-02-03 17:52:14 +04:00
if ( gic_nr = = 0 & & ( irq_start & 31 ) > 0 ) {
2012-03-30 03:53:48 +04:00
hwirq_base = 16 ;
2012-02-03 17:52:14 +04:00
if ( irq_start ! = - 1 )
irq_start = ( irq_start & ~ 31 ) + 16 ;
} else {
2012-03-30 03:53:48 +04:00
hwirq_base = 32 ;
2011-11-25 22:23:36 +04:00
}
2011-09-29 06:25:31 +04:00
/*
* Find out how many interrupts are supported .
* The GIC only supports up to 1020 interrupt sources .
*/
2011-11-12 20:09:49 +04:00
gic_irqs = readl_relaxed ( gic_data_dist_base ( gic ) + GIC_DIST_CTR ) & 0x1f ;
2011-09-29 06:25:31 +04:00
gic_irqs = ( gic_irqs + 1 ) * 32 ;
if ( gic_irqs > 1020 )
gic_irqs = 1020 ;
gic - > gic_irqs = gic_irqs ;
2012-02-15 01:06:57 +04:00
gic_irqs - = hwirq_base ; /* calculate # of irqs to allocate */
irq_base = irq_alloc_descs ( irq_start , 16 , gic_irqs , numa_node_id ( ) ) ;
if ( IS_ERR_VALUE ( irq_base ) ) {
2011-10-22 02:14:27 +04:00
WARN ( 1 , " Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated \n " ,
irq_start ) ;
2012-02-15 01:06:57 +04:00
irq_base = irq_start ;
2011-10-22 02:14:27 +04:00
}
2012-02-15 01:06:57 +04:00
gic - > domain = irq_domain_add_legacy ( node , gic_irqs , irq_base ,
hwirq_base , & gic_irq_domain_ops , gic ) ;
if ( WARN_ON ( ! gic - > domain ) )
return ;
2010-12-04 19:50:58 +03:00
2012-11-27 01:05:48 +04:00
# ifdef CONFIG_SMP
set_smp_cross_call ( gic_raise_softirq ) ;
2013-01-14 22:05:37 +04:00
register_cpu_notifier ( & gic_cpu_notifier ) ;
2012-11-27 01:05:48 +04:00
# endif
2012-11-03 21:59:51 +04:00
set_handle_irq ( gic_handle_irq ) ;
2011-06-13 04:45:59 +04:00
gic_chip . flags | = gic_arch_extn . flags ;
2011-09-29 06:25:31 +04:00
gic_dist_init ( gic ) ;
2010-12-04 19:50:58 +03:00
gic_cpu_init ( gic ) ;
2011-02-10 23:54:10 +03:00
gic_pm_init ( gic ) ;
2010-12-04 18:55:14 +03:00
}
2011-09-29 06:27:52 +04:00
# ifdef CONFIG_OF
2013-03-13 13:35:15 +04:00
static int gic_cnt __initdata ;
2011-09-29 06:27:52 +04:00
int __init gic_of_init ( struct device_node * node , struct device_node * parent )
{
void __iomem * cpu_base ;
void __iomem * dist_base ;
2011-11-12 20:09:49 +04:00
u32 percpu_offset ;
2011-09-29 06:27:52 +04:00
int irq ;
if ( WARN_ON ( ! node ) )
return - ENODEV ;
dist_base = of_iomap ( node , 0 ) ;
WARN ( ! dist_base , " unable to map gic dist registers \n " ) ;
cpu_base = of_iomap ( node , 1 ) ;
WARN ( ! cpu_base , " unable to map gic cpu registers \n " ) ;
2011-11-12 20:09:49 +04:00
if ( of_property_read_u32 ( node , " cpu-offset " , & percpu_offset ) )
percpu_offset = 0 ;
2012-02-15 01:06:57 +04:00
gic_init_bases ( gic_cnt , - 1 , dist_base , cpu_base , percpu_offset , node ) ;
2011-09-29 06:27:52 +04:00
if ( parent ) {
irq = irq_of_parse_and_map ( node , 0 ) ;
gic_cascade_irq ( gic_cnt , irq ) ;
}
gic_cnt + + ;
return 0 ;
}
2012-11-21 07:21:40 +04:00
IRQCHIP_DECLARE ( cortex_a15_gic , " arm,cortex-a15-gic " , gic_of_init ) ;
IRQCHIP_DECLARE ( cortex_a9_gic , " arm,cortex-a9-gic " , gic_of_init ) ;
IRQCHIP_DECLARE ( msm_8660_qgic , " qcom,msm-8660-qgic " , gic_of_init ) ;
IRQCHIP_DECLARE ( msm_qgic2 , " qcom,msm-qgic2 " , gic_of_init ) ;
2011-09-29 06:27:52 +04:00
# endif