2012-09-01 01:13:07 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2008 Ralf Baechle ( ralf @ linux - mips . org )
* Copyright ( C ) 2012 MIPS Technologies , Inc . All rights reserved .
*/
2008-04-28 20:14:26 +04:00
# include <linux/bitmap.h>
2014-10-20 23:03:55 +04:00
# include <linux/clocksource.h>
2008-04-28 20:14:26 +04:00
# include <linux/init.h>
2014-09-19 01:47:24 +04:00
# include <linux/interrupt.h>
2014-10-20 23:03:55 +04:00
# include <linux/irq.h>
2015-07-08 00:11:46 +03:00
# include <linux/irqchip.h>
2014-11-12 22:43:38 +03:00
# include <linux/of_address.h>
2017-08-13 07:36:42 +03:00
# include <linux/percpu.h>
2014-09-19 01:47:24 +04:00
# include <linux/sched.h>
2009-06-19 17:05:26 +04:00
# include <linux/smp.h>
2008-04-28 20:14:26 +04:00
2017-08-13 05:49:41 +03:00
# include <asm/mips-cps.h>
2012-09-01 01:18:49 +04:00
# include <asm/setup.h>
# include <asm/traps.h>
2008-04-28 20:14:26 +04:00
2014-11-12 22:43:38 +03:00
# include <dt-bindings/interrupt-controller/mips-gic.h>
2017-08-13 07:36:29 +03:00
# define GIC_MAX_INTRS 256
2017-08-13 07:36:42 +03:00
# define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS)
2017-08-13 07:36:29 +03:00
/* Add 2 to convert GIC CPU pin to core interrupt */
# define GIC_CPU_PIN_OFFSET 2
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
# define GIC_PIN_TO_VEC_OFFSET 1
/* Convert between local/shared IRQ number and GIC HW IRQ number. */
# define GIC_LOCAL_HWIRQ_BASE 0
# define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
# define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
# define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
# define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
# define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
2017-08-13 07:36:10 +03:00
void __iomem * mips_gic_base ;
2012-09-01 01:18:49 +04:00
2017-08-13 07:36:42 +03:00
DEFINE_PER_CPU_READ_MOSTLY ( unsigned long [ GIC_MAX_LONGS ] , pcpu_masks ) ;
2014-07-17 12:20:53 +04:00
2014-09-19 01:47:21 +04:00
static DEFINE_SPINLOCK ( gic_lock ) ;
2014-09-19 01:47:23 +04:00
static struct irq_domain * gic_irq_domain ;
2015-12-08 16:20:23 +03:00
static struct irq_domain * gic_ipi_domain ;
2014-09-19 01:47:25 +04:00
static int gic_shared_intrs ;
2014-09-19 01:47:27 +04:00
static int gic_vpes ;
2014-09-19 01:47:28 +04:00
static unsigned int gic_cpu_pin ;
2015-01-19 18:38:24 +03:00
static unsigned int timer_cpu_pin ;
2014-09-19 01:47:26 +04:00
static struct irq_chip gic_level_irq_controller , gic_edge_irq_controller ;
2015-12-08 16:20:23 +03:00
DECLARE_BITMAP ( ipi_resrv , GIC_MAX_INTRS ) ;
2017-04-20 12:07:34 +03:00
DECLARE_BITMAP ( ipi_available , GIC_MAX_INTRS ) ;
2008-04-28 20:14:26 +04:00
2017-08-19 00:02:21 +03:00
static void gic_clear_pcpu_masks ( unsigned int intr )
{
unsigned int i ;
/* Clear the interrupt's bit in all pcpu_masks */
for_each_possible_cpu ( i )
clear_bit ( intr , per_cpu_ptr ( pcpu_masks , i ) ) ;
}
2014-09-19 01:47:27 +04:00
static bool gic_local_irq_is_routable ( int intr )
{
u32 vpe_ctl ;
/* All local interrupts are routable in EIC mode. */
if ( cpu_has_veic )
return true ;
2017-08-13 07:36:26 +03:00
vpe_ctl = read_gic_vl_ctl ( ) ;
2014-09-19 01:47:27 +04:00
switch ( intr ) {
case GIC_LOCAL_INT_TIMER :
2017-08-13 07:36:26 +03:00
return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE ;
2014-09-19 01:47:27 +04:00
case GIC_LOCAL_INT_PERFCTR :
2017-08-13 07:36:26 +03:00
return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE ;
2014-09-19 01:47:27 +04:00
case GIC_LOCAL_INT_FDC :
2017-08-13 07:36:26 +03:00
return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE ;
2014-09-19 01:47:27 +04:00
case GIC_LOCAL_INT_SWINT0 :
case GIC_LOCAL_INT_SWINT1 :
2017-08-13 07:36:26 +03:00
return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE ;
2014-09-19 01:47:27 +04:00
default :
return true ;
}
}
2014-09-19 01:47:28 +04:00
static void gic_bind_eic_interrupt ( int irq , int set )
2012-09-01 01:18:49 +04:00
{
/* Convert irq vector # to hw int # */
irq - = GIC_PIN_TO_VEC_OFFSET ;
/* Set irq to use shadow set */
2017-08-13 07:36:26 +03:00
write_gic_vl_eic_shadow_set ( irq , set ) ;
2012-09-01 01:18:49 +04:00
}
2015-12-08 16:20:28 +03:00
static void gic_send_ipi ( struct irq_data * d , unsigned int cpu )
2008-04-28 20:14:26 +04:00
{
2015-12-08 16:20:28 +03:00
irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED ( irqd_to_hwirq ( d ) ) ;
2017-08-13 07:36:24 +03:00
write_gic_wedge ( GIC_WEDGE_RW | hwirq ) ;
2008-04-28 20:14:26 +04:00
}
2014-09-19 01:47:27 +04:00
int gic_get_c0_compare_int ( void )
{
if ( ! gic_local_irq_is_routable ( GIC_LOCAL_INT_TIMER ) )
return MIPS_CPU_IRQ_BASE + cp0_compare_irq ;
return irq_create_mapping ( gic_irq_domain ,
GIC_LOCAL_TO_HWIRQ ( GIC_LOCAL_INT_TIMER ) ) ;
}
int gic_get_c0_perfcount_int ( void )
{
if ( ! gic_local_irq_is_routable ( GIC_LOCAL_INT_PERFCTR ) ) {
2015-01-28 00:45:50 +03:00
/* Is the performance counter shared with the timer? */
2014-09-19 01:47:27 +04:00
if ( cp0_perfcount_irq < 0 )
return - 1 ;
return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq ;
}
return irq_create_mapping ( gic_irq_domain ,
GIC_LOCAL_TO_HWIRQ ( GIC_LOCAL_INT_PERFCTR ) ) ;
}
2015-01-29 14:14:09 +03:00
int gic_get_c0_fdc_int ( void )
{
if ( ! gic_local_irq_is_routable ( GIC_LOCAL_INT_FDC ) ) {
/* Is the FDC IRQ even present? */
if ( cp0_fdc_irq < 0 )
return - 1 ;
return MIPS_CPU_IRQ_BASE + cp0_fdc_irq ;
}
return irq_create_mapping ( gic_irq_domain ,
GIC_LOCAL_TO_HWIRQ ( GIC_LOCAL_INT_FDC ) ) ;
}
2015-06-12 11:01:56 +03:00
static void gic_handle_shared_int ( bool chained )
2008-04-28 20:14:26 +04:00
{
2017-08-13 07:36:16 +03:00
unsigned int intr , virq ;
2014-10-20 23:03:56 +04:00
unsigned long * pcpu_mask ;
DECLARE_BITMAP ( pending , GIC_MAX_INTRS ) ;
2008-04-28 20:14:26 +04:00
/* Get per-cpu bitmaps */
2017-08-13 07:36:42 +03:00
pcpu_mask = this_cpu_ptr ( pcpu_masks ) ;
2008-04-28 20:14:26 +04:00
2017-08-19 00:02:21 +03:00
if ( mips_cm_is64 )
2017-08-13 07:36:16 +03:00
__ioread64_copy ( pending , addr_gic_pend ( ) ,
DIV_ROUND_UP ( gic_shared_intrs , 64 ) ) ;
2017-08-19 00:02:21 +03:00
else
2017-08-13 07:36:16 +03:00
__ioread32_copy ( pending , addr_gic_pend ( ) ,
DIV_ROUND_UP ( gic_shared_intrs , 32 ) ) ;
2008-04-28 20:14:26 +04:00
2014-09-19 01:47:25 +04:00
bitmap_and ( pending , pending , pcpu_mask , gic_shared_intrs ) ;
2008-04-28 20:14:26 +04:00
2016-08-19 20:11:19 +03:00
for_each_set_bit ( intr , pending , gic_shared_intrs ) {
2015-01-19 14:51:29 +03:00
virq = irq_linear_revmap ( gic_irq_domain ,
GIC_SHARED_TO_HWIRQ ( intr ) ) ;
2015-06-12 11:01:56 +03:00
if ( chained )
generic_handle_irq ( virq ) ;
else
do_IRQ ( virq ) ;
2015-01-19 14:51:29 +03:00
}
2008-04-28 20:14:26 +04:00
}
2011-03-24 00:08:58 +03:00
static void gic_mask_irq ( struct irq_data * d )
2008-04-28 20:14:26 +04:00
{
2017-08-19 00:02:21 +03:00
unsigned int intr = GIC_HWIRQ_TO_SHARED ( d - > hwirq ) ;
write_gic_rmask ( BIT ( intr ) ) ;
gic_clear_pcpu_masks ( intr ) ;
2008-04-28 20:14:26 +04:00
}
2011-03-24 00:08:58 +03:00
static void gic_unmask_irq ( struct irq_data * d )
2008-04-28 20:14:26 +04:00
{
2017-08-19 00:02:21 +03:00
struct cpumask * affinity = irq_data_get_affinity_mask ( d ) ;
unsigned int intr = GIC_HWIRQ_TO_SHARED ( d - > hwirq ) ;
unsigned int cpu ;
write_gic_smask ( BIT ( intr ) ) ;
gic_clear_pcpu_masks ( intr ) ;
cpu = cpumask_first_and ( affinity , cpu_online_mask ) ;
set_bit ( intr , per_cpu_ptr ( pcpu_masks , cpu ) ) ;
2008-04-28 20:14:26 +04:00
}
2014-09-19 01:47:20 +04:00
static void gic_ack_irq ( struct irq_data * d )
{
2014-09-19 01:47:27 +04:00
unsigned int irq = GIC_HWIRQ_TO_SHARED ( d - > hwirq ) ;
2014-09-19 01:47:23 +04:00
2017-08-13 07:36:24 +03:00
write_gic_wedge ( irq ) ;
2014-09-19 01:47:20 +04:00
}
2014-09-19 01:47:21 +04:00
static int gic_set_type ( struct irq_data * d , unsigned int type )
{
2014-09-19 01:47:27 +04:00
unsigned int irq = GIC_HWIRQ_TO_SHARED ( d - > hwirq ) ;
2014-09-19 01:47:21 +04:00
unsigned long flags ;
bool is_edge ;
spin_lock_irqsave ( & gic_lock , flags ) ;
switch ( type & IRQ_TYPE_SENSE_MASK ) {
case IRQ_TYPE_EDGE_FALLING :
2017-08-13 07:36:19 +03:00
change_gic_pol ( irq , GIC_POL_FALLING_EDGE ) ;
2017-08-13 07:36:20 +03:00
change_gic_trig ( irq , GIC_TRIG_EDGE ) ;
2017-08-13 07:36:21 +03:00
change_gic_dual ( irq , GIC_DUAL_SINGLE ) ;
2014-09-19 01:47:21 +04:00
is_edge = true ;
break ;
case IRQ_TYPE_EDGE_RISING :
2017-08-13 07:36:19 +03:00
change_gic_pol ( irq , GIC_POL_RISING_EDGE ) ;
2017-08-13 07:36:20 +03:00
change_gic_trig ( irq , GIC_TRIG_EDGE ) ;
2017-08-13 07:36:21 +03:00
change_gic_dual ( irq , GIC_DUAL_SINGLE ) ;
2014-09-19 01:47:21 +04:00
is_edge = true ;
break ;
case IRQ_TYPE_EDGE_BOTH :
/* polarity is irrelevant in this case */
2017-08-13 07:36:20 +03:00
change_gic_trig ( irq , GIC_TRIG_EDGE ) ;
2017-08-13 07:36:21 +03:00
change_gic_dual ( irq , GIC_DUAL_DUAL ) ;
2014-09-19 01:47:21 +04:00
is_edge = true ;
break ;
case IRQ_TYPE_LEVEL_LOW :
2017-08-13 07:36:19 +03:00
change_gic_pol ( irq , GIC_POL_ACTIVE_LOW ) ;
2017-08-13 07:36:20 +03:00
change_gic_trig ( irq , GIC_TRIG_LEVEL ) ;
2017-08-13 07:36:21 +03:00
change_gic_dual ( irq , GIC_DUAL_SINGLE ) ;
2014-09-19 01:47:21 +04:00
is_edge = false ;
break ;
case IRQ_TYPE_LEVEL_HIGH :
default :
2017-08-13 07:36:19 +03:00
change_gic_pol ( irq , GIC_POL_ACTIVE_HIGH ) ;
2017-08-13 07:36:20 +03:00
change_gic_trig ( irq , GIC_TRIG_LEVEL ) ;
2017-08-13 07:36:21 +03:00
change_gic_dual ( irq , GIC_DUAL_SINGLE ) ;
2014-09-19 01:47:21 +04:00
is_edge = false ;
break ;
}
2015-06-23 15:41:25 +03:00
if ( is_edge )
irq_set_chip_handler_name_locked ( d , & gic_edge_irq_controller ,
handle_edge_irq , NULL ) ;
else
irq_set_chip_handler_name_locked ( d , & gic_level_irq_controller ,
handle_level_irq , NULL ) ;
2014-09-19 01:47:21 +04:00
spin_unlock_irqrestore ( & gic_lock , flags ) ;
2008-04-28 20:14:26 +04:00
2014-09-19 01:47:21 +04:00
return 0 ;
}
# ifdef CONFIG_SMP
2011-03-24 00:08:58 +03:00
static int gic_set_affinity ( struct irq_data * d , const struct cpumask * cpumask ,
bool force )
2008-04-28 20:14:26 +04:00
{
2014-09-19 01:47:27 +04:00
unsigned int irq = GIC_HWIRQ_TO_SHARED ( d - > hwirq ) ;
2017-08-19 00:04:35 +03:00
unsigned long flags ;
unsigned int cpu ;
2008-04-28 20:14:26 +04:00
2017-08-19 00:04:35 +03:00
cpu = cpumask_first_and ( cpumask , cpu_online_mask ) ;
if ( cpu > = NR_CPUS )
2014-09-19 01:47:22 +04:00
return - EINVAL ;
2008-04-28 20:14:26 +04:00
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave ( & gic_lock , flags ) ;
2013-06-21 14:13:08 +04:00
/* Re-route this IRQ */
2017-08-19 00:04:35 +03:00
write_gic_map_vp ( irq , BIT ( mips_cm_vp_id ( cpu ) ) ) ;
2013-06-21 14:13:08 +04:00
/* Update the pcpu_masks */
2017-08-19 00:02:21 +03:00
gic_clear_pcpu_masks ( irq ) ;
if ( read_gic_mask ( irq ) )
2017-08-19 00:04:35 +03:00
set_bit ( irq , per_cpu_ptr ( pcpu_masks , cpu ) ) ;
2008-04-28 20:14:26 +04:00
spin_unlock_irqrestore ( & gic_lock , flags ) ;
2017-08-13 07:36:46 +03:00
return IRQ_SET_MASK_OK ;
2008-04-28 20:14:26 +04:00
}
# endif
2014-09-19 01:47:26 +04:00
static struct irq_chip gic_level_irq_controller = {
. name = " MIPS GIC " ,
. irq_mask = gic_mask_irq ,
. irq_unmask = gic_unmask_irq ,
. irq_set_type = gic_set_type ,
# ifdef CONFIG_SMP
. irq_set_affinity = gic_set_affinity ,
# endif
} ;
static struct irq_chip gic_edge_irq_controller = {
2011-03-24 00:08:58 +03:00
. name = " MIPS GIC " ,
2014-09-19 01:47:20 +04:00
. irq_ack = gic_ack_irq ,
2011-03-24 00:08:58 +03:00
. irq_mask = gic_mask_irq ,
. irq_unmask = gic_unmask_irq ,
2014-09-19 01:47:21 +04:00
. irq_set_type = gic_set_type ,
2008-04-28 20:14:26 +04:00
# ifdef CONFIG_SMP
2011-03-24 00:08:58 +03:00
. irq_set_affinity = gic_set_affinity ,
2008-04-28 20:14:26 +04:00
# endif
2015-12-08 16:20:28 +03:00
. ipi_send_single = gic_send_ipi ,
2008-04-28 20:14:26 +04:00
} ;
2015-06-12 11:01:56 +03:00
static void gic_handle_local_int ( bool chained )
2014-09-19 01:47:27 +04:00
{
unsigned long pending , masked ;
2015-01-19 14:51:29 +03:00
unsigned int intr , virq ;
2014-09-19 01:47:27 +04:00
2017-08-13 07:36:25 +03:00
pending = read_gic_vl_pend ( ) ;
masked = read_gic_vl_mask ( ) ;
2014-09-19 01:47:27 +04:00
bitmap_and ( & pending , & pending , & masked , GIC_NUM_LOCAL_INTRS ) ;
2016-09-13 19:54:27 +03:00
for_each_set_bit ( intr , & pending , GIC_NUM_LOCAL_INTRS ) {
2015-01-19 14:51:29 +03:00
virq = irq_linear_revmap ( gic_irq_domain ,
GIC_LOCAL_TO_HWIRQ ( intr ) ) ;
2015-06-12 11:01:56 +03:00
if ( chained )
generic_handle_irq ( virq ) ;
else
do_IRQ ( virq ) ;
2015-01-19 14:51:29 +03:00
}
2014-09-19 01:47:27 +04:00
}
static void gic_mask_local_irq ( struct irq_data * d )
{
int intr = GIC_HWIRQ_TO_LOCAL ( d - > hwirq ) ;
2017-08-13 07:36:25 +03:00
write_gic_vl_rmask ( BIT ( intr ) ) ;
2014-09-19 01:47:27 +04:00
}
static void gic_unmask_local_irq ( struct irq_data * d )
{
int intr = GIC_HWIRQ_TO_LOCAL ( d - > hwirq ) ;
2017-08-13 07:36:25 +03:00
write_gic_vl_smask ( BIT ( intr ) ) ;
2014-09-19 01:47:27 +04:00
}
static struct irq_chip gic_local_irq_controller = {
. name = " MIPS GIC Local " ,
. irq_mask = gic_mask_local_irq ,
. irq_unmask = gic_unmask_local_irq ,
} ;
static void gic_mask_local_irq_all_vpes ( struct irq_data * d )
{
int intr = GIC_HWIRQ_TO_LOCAL ( d - > hwirq ) ;
int i ;
unsigned long flags ;
spin_lock_irqsave ( & gic_lock , flags ) ;
for ( i = 0 ; i < gic_vpes ; i + + ) {
2017-08-13 07:36:26 +03:00
write_gic_vl_other ( mips_cm_vp_id ( i ) ) ;
2017-08-13 07:36:25 +03:00
write_gic_vo_rmask ( BIT ( intr ) ) ;
2014-09-19 01:47:27 +04:00
}
spin_unlock_irqrestore ( & gic_lock , flags ) ;
}
static void gic_unmask_local_irq_all_vpes ( struct irq_data * d )
{
int intr = GIC_HWIRQ_TO_LOCAL ( d - > hwirq ) ;
int i ;
unsigned long flags ;
spin_lock_irqsave ( & gic_lock , flags ) ;
for ( i = 0 ; i < gic_vpes ; i + + ) {
2017-08-13 07:36:26 +03:00
write_gic_vl_other ( mips_cm_vp_id ( i ) ) ;
2017-08-13 07:36:25 +03:00
write_gic_vo_smask ( BIT ( intr ) ) ;
2014-09-19 01:47:27 +04:00
}
spin_unlock_irqrestore ( & gic_lock , flags ) ;
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
. name = " MIPS GIC Local " ,
. irq_mask = gic_mask_local_irq_all_vpes ,
. irq_unmask = gic_unmask_local_irq_all_vpes ,
} ;
2014-09-19 01:47:24 +04:00
static void __gic_irq_dispatch ( void )
2008-04-28 20:14:26 +04:00
{
2015-06-12 11:01:56 +03:00
gic_handle_local_int ( false ) ;
gic_handle_shared_int ( false ) ;
2014-09-19 01:47:24 +04:00
}
2008-04-28 20:14:26 +04:00
2015-09-14 11:42:37 +03:00
static void gic_irq_dispatch ( struct irq_desc * desc )
2014-09-19 01:47:24 +04:00
{
2015-06-12 11:01:56 +03:00
gic_handle_local_int ( true ) ;
gic_handle_shared_int ( true ) ;
2014-09-19 01:47:24 +04:00
}
2014-09-19 01:47:27 +04:00
static int gic_local_irq_domain_map ( struct irq_domain * d , unsigned int virq ,
irq_hw_number_t hw )
2014-09-19 01:47:23 +04:00
{
2014-09-19 01:47:27 +04:00
int intr = GIC_HWIRQ_TO_LOCAL ( hw ) ;
int i ;
unsigned long flags ;
2017-08-13 07:36:17 +03:00
u32 val ;
2014-09-19 01:47:27 +04:00
if ( ! gic_local_irq_is_routable ( intr ) )
return - EPERM ;
2017-08-13 07:36:17 +03:00
if ( intr > GIC_LOCAL_INT_FDC ) {
pr_err ( " Invalid local IRQ %d \n " , intr ) ;
return - EINVAL ;
}
2014-09-19 01:47:27 +04:00
2017-08-13 07:36:17 +03:00
if ( intr = = GIC_LOCAL_INT_TIMER ) {
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin ;
} else {
val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin ;
}
2014-09-19 01:47:27 +04:00
2017-08-13 07:36:17 +03:00
spin_lock_irqsave ( & gic_lock , flags ) ;
for ( i = 0 ; i < gic_vpes ; i + + ) {
write_gic_vl_other ( mips_cm_vp_id ( i ) ) ;
write_gic_vo_map ( intr , val ) ;
2014-09-19 01:47:27 +04:00
}
spin_unlock_irqrestore ( & gic_lock , flags ) ;
2017-08-13 07:36:17 +03:00
return 0 ;
2014-09-19 01:47:27 +04:00
}
static int gic_shared_irq_domain_map ( struct irq_domain * d , unsigned int virq ,
2017-08-19 00:02:21 +03:00
irq_hw_number_t hw , unsigned int cpu )
2014-09-19 01:47:27 +04:00
{
int intr = GIC_HWIRQ_TO_SHARED ( hw ) ;
2014-09-19 01:47:23 +04:00
unsigned long flags ;
spin_lock_irqsave ( & gic_lock , flags ) ;
2017-08-13 07:36:22 +03:00
write_gic_map_pin ( intr , GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin ) ;
2017-08-19 00:02:21 +03:00
write_gic_map_vp ( intr , BIT ( mips_cm_vp_id ( cpu ) ) ) ;
gic_clear_pcpu_masks ( intr ) ;
set_bit ( intr , per_cpu_ptr ( pcpu_masks , cpu ) ) ;
2014-09-19 01:47:23 +04:00
spin_unlock_irqrestore ( & gic_lock , flags ) ;
return 0 ;
}
2017-04-20 12:07:35 +03:00
static int gic_irq_domain_xlate ( struct irq_domain * d , struct device_node * ctrlr ,
2015-12-08 16:20:24 +03:00
const u32 * intspec , unsigned int intsize ,
irq_hw_number_t * out_hwirq ,
unsigned int * out_type )
{
if ( intsize ! = 3 )
return - EINVAL ;
if ( intspec [ 0 ] = = GIC_SHARED )
* out_hwirq = GIC_SHARED_TO_HWIRQ ( intspec [ 1 ] ) ;
else if ( intspec [ 0 ] = = GIC_LOCAL )
* out_hwirq = GIC_LOCAL_TO_HWIRQ ( intspec [ 1 ] ) ;
else
return - EINVAL ;
* out_type = intspec [ 2 ] & IRQ_TYPE_SENSE_MASK ;
return 0 ;
}
2017-04-20 12:07:36 +03:00
static int gic_irq_domain_map ( struct irq_domain * d , unsigned int virq ,
irq_hw_number_t hwirq )
2015-12-08 16:20:24 +03:00
{
2017-04-20 12:07:35 +03:00
int err ;
2015-12-08 16:20:24 +03:00
2017-04-20 12:07:36 +03:00
if ( hwirq > = GIC_SHARED_HWIRQ_BASE ) {
2017-04-20 12:07:35 +03:00
/* verify that shared irqs don't conflict with an IPI irq */
if ( test_bit ( GIC_HWIRQ_TO_SHARED ( hwirq ) , ipi_resrv ) )
return - EBUSY ;
2015-12-08 16:20:24 +03:00
2017-04-20 12:07:35 +03:00
err = irq_domain_set_hwirq_and_chip ( d , virq , hwirq ,
& gic_level_irq_controller ,
NULL ) ;
if ( err )
return err ;
return gic_shared_irq_domain_map ( d , virq , hwirq , 0 ) ;
2015-12-08 16:20:24 +03:00
}
2017-04-20 12:07:35 +03:00
switch ( GIC_HWIRQ_TO_LOCAL ( hwirq ) ) {
case GIC_LOCAL_INT_TIMER :
case GIC_LOCAL_INT_PERFCTR :
case GIC_LOCAL_INT_FDC :
/*
* HACK : These are all really percpu interrupts , but
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them .
*/
err = irq_domain_set_hwirq_and_chip ( d , virq , hwirq ,
& gic_all_vpes_local_irq_controller ,
NULL ) ;
if ( err )
return err ;
2015-12-08 16:20:24 +03:00
2017-04-20 12:07:35 +03:00
irq_set_handler ( virq , handle_percpu_irq ) ;
break ;
default :
err = irq_domain_set_hwirq_and_chip ( d , virq , hwirq ,
& gic_local_irq_controller ,
NULL ) ;
if ( err )
return err ;
irq_set_handler ( virq , handle_percpu_devid_irq ) ;
irq_set_percpu_devid ( virq ) ;
break ;
}
return gic_local_irq_domain_map ( d , virq , hwirq ) ;
2015-12-08 16:20:24 +03:00
}
2017-04-20 12:07:36 +03:00
static int gic_irq_domain_alloc ( struct irq_domain * d , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
struct irq_fwspec * fwspec = arg ;
irq_hw_number_t hwirq ;
if ( fwspec - > param [ 0 ] = = GIC_SHARED )
hwirq = GIC_SHARED_TO_HWIRQ ( fwspec - > param [ 1 ] ) ;
else
hwirq = GIC_LOCAL_TO_HWIRQ ( fwspec - > param [ 1 ] ) ;
return gic_irq_domain_map ( d , virq , hwirq ) ;
}
2017-04-20 12:07:35 +03:00
void gic_irq_domain_free ( struct irq_domain * d , unsigned int virq ,
unsigned int nr_irqs )
2016-08-19 20:07:15 +03:00
{
}
2017-04-20 12:07:35 +03:00
static const struct irq_domain_ops gic_irq_domain_ops = {
. xlate = gic_irq_domain_xlate ,
. alloc = gic_irq_domain_alloc ,
. free = gic_irq_domain_free ,
2017-04-20 12:07:36 +03:00
. map = gic_irq_domain_map ,
2015-12-08 16:20:23 +03:00
} ;
static int gic_ipi_domain_xlate ( struct irq_domain * d , struct device_node * ctrlr ,
const u32 * intspec , unsigned int intsize ,
irq_hw_number_t * out_hwirq ,
unsigned int * out_type )
{
/*
* There ' s nothing to translate here . hwirq is dynamically allocated and
* the irq type is always edge triggered .
* */
* out_hwirq = 0 ;
* out_type = IRQ_TYPE_EDGE_RISING ;
return 0 ;
}
static int gic_ipi_domain_alloc ( struct irq_domain * d , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
struct cpumask * ipimask = arg ;
2017-04-20 12:07:35 +03:00
irq_hw_number_t hwirq , base_hwirq ;
int cpu , ret , i ;
2015-12-08 16:20:23 +03:00
2017-04-20 12:07:35 +03:00
base_hwirq = find_first_bit ( ipi_available , gic_shared_intrs ) ;
if ( base_hwirq = = gic_shared_intrs )
return - ENOMEM ;
/* check that we have enough space */
for ( i = base_hwirq ; i < nr_irqs ; i + + ) {
if ( ! test_bit ( i , ipi_available ) )
return - EBUSY ;
}
bitmap_clear ( ipi_available , base_hwirq , nr_irqs ) ;
/* map the hwirq for each cpu consecutively */
i = 0 ;
for_each_cpu ( cpu , ipimask ) {
hwirq = GIC_SHARED_TO_HWIRQ ( base_hwirq + i ) ;
ret = irq_domain_set_hwirq_and_chip ( d , virq + i , hwirq ,
& gic_edge_irq_controller ,
NULL ) ;
if ( ret )
goto error ;
2015-12-08 16:20:23 +03:00
2017-04-20 12:07:35 +03:00
ret = irq_domain_set_hwirq_and_chip ( d - > parent , virq + i , hwirq ,
2015-12-08 16:20:23 +03:00
& gic_edge_irq_controller ,
NULL ) ;
if ( ret )
goto error ;
ret = irq_set_irq_type ( virq + i , IRQ_TYPE_EDGE_RISING ) ;
if ( ret )
goto error ;
2017-04-20 12:07:35 +03:00
ret = gic_shared_irq_domain_map ( d , virq + i , hwirq , cpu ) ;
if ( ret )
goto error ;
i + + ;
2015-12-08 16:20:23 +03:00
}
return 0 ;
error :
2017-04-20 12:07:35 +03:00
bitmap_set ( ipi_available , base_hwirq , nr_irqs ) ;
2015-12-08 16:20:23 +03:00
return ret ;
}
void gic_ipi_domain_free ( struct irq_domain * d , unsigned int virq ,
unsigned int nr_irqs )
{
2017-04-20 12:07:35 +03:00
irq_hw_number_t base_hwirq ;
struct irq_data * data ;
data = irq_get_irq_data ( virq ) ;
if ( ! data )
return ;
base_hwirq = GIC_HWIRQ_TO_SHARED ( irqd_to_hwirq ( data ) ) ;
bitmap_set ( ipi_available , base_hwirq , nr_irqs ) ;
2015-12-08 16:20:23 +03:00
}
int gic_ipi_domain_match ( struct irq_domain * d , struct device_node * node ,
enum irq_domain_bus_token bus_token )
{
bool is_ipi ;
switch ( bus_token ) {
case DOMAIN_BUS_IPI :
is_ipi = d - > bus_token = = bus_token ;
2016-07-05 16:26:00 +03:00
return ( ! node | | to_of_node ( d - > fwnode ) = = node ) & & is_ipi ;
2015-12-08 16:20:23 +03:00
break ;
default :
return 0 ;
}
}
2017-06-02 11:20:56 +03:00
static const struct irq_domain_ops gic_ipi_domain_ops = {
2015-12-08 16:20:23 +03:00
. xlate = gic_ipi_domain_xlate ,
. alloc = gic_ipi_domain_alloc ,
. free = gic_ipi_domain_free ,
. match = gic_ipi_domain_match ,
2014-09-19 01:47:23 +04:00
} ;
2017-08-13 07:36:40 +03:00
static int __init gic_of_init ( struct device_node * node ,
struct device_node * parent )
2008-04-28 20:14:26 +04:00
{
2017-08-13 07:36:44 +03:00
unsigned int cpu_vec , i , j , gicconfig , cpu , v [ 2 ] ;
unsigned long reserved ;
2017-08-13 07:36:40 +03:00
phys_addr_t gic_base ;
struct resource res ;
size_t gic_len ;
/* Find the first available CPU vector. */
2017-08-13 07:36:44 +03:00
i = 0 ;
reserved = ( C_SW0 | C_SW1 ) > > __fls ( C_SW0 ) ;
2017-08-13 07:36:40 +03:00
while ( ! of_property_read_u32_index ( node , " mti,reserved-cpu-vectors " ,
i + + , & cpu_vec ) )
reserved | = BIT ( cpu_vec ) ;
2017-08-13 07:36:44 +03:00
cpu_vec = find_first_zero_bit ( & reserved , hweight_long ( ST0_IM ) ) ;
if ( cpu_vec = = hweight_long ( ST0_IM ) ) {
2017-08-13 07:36:40 +03:00
pr_err ( " No CPU vectors available for GIC \n " ) ;
return - ENODEV ;
}
if ( of_address_to_resource ( node , 0 , & res ) ) {
/*
* Probe the CM for the GIC base address if not specified
* in the device - tree .
*/
if ( mips_cm_present ( ) ) {
gic_base = read_gcr_gic_base ( ) &
~ CM_GCR_GIC_BASE_GICEN ;
gic_len = 0x20000 ;
} else {
pr_err ( " Failed to get GIC memory range \n " ) ;
return - ENODEV ;
}
} else {
gic_base = res . start ;
gic_len = resource_size ( & res ) ;
}
2008-04-28 20:14:26 +04:00
2017-08-13 07:36:40 +03:00
if ( mips_cm_present ( ) ) {
write_gcr_gic_base ( gic_base | CM_GCR_GIC_BASE_GICEN ) ;
/* Ensure GIC region is enabled before trying to access it */
__sync ( ) ;
}
mips_gic_base = ioremap_nocache ( gic_base , gic_len ) ;
2008-04-28 20:14:26 +04:00
2017-08-13 07:36:24 +03:00
gicconfig = read_gic_config ( ) ;
gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS ;
gic_shared_intrs > > = __fls ( GIC_CONFIG_NUMINTERRUPTS ) ;
gic_shared_intrs = ( gic_shared_intrs + 1 ) * 8 ;
2008-04-28 20:14:26 +04:00
2017-08-13 07:36:24 +03:00
gic_vpes = gicconfig & GIC_CONFIG_PVPS ;
gic_vpes > > = __fls ( GIC_CONFIG_PVPS ) ;
2014-09-19 01:47:27 +04:00
gic_vpes = gic_vpes + 1 ;
2008-04-28 20:14:26 +04:00
2014-09-19 01:47:24 +04:00
if ( cpu_has_veic ) {
2016-05-17 17:31:06 +03:00
/* Set EIC mode for all VPEs */
for_each_present_cpu ( cpu ) {
2017-08-13 07:36:26 +03:00
write_gic_vl_other ( mips_cm_vp_id ( cpu ) ) ;
write_gic_vo_ctl ( GIC_VX_CTL_EIC ) ;
2016-05-17 17:31:06 +03:00
}
2014-09-19 01:47:24 +04:00
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0 ;
2015-01-19 18:38:24 +03:00
timer_cpu_pin = gic_cpu_pin ;
2014-09-19 01:47:24 +04:00
set_vi_handler ( gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET ,
__gic_irq_dispatch ) ;
} else {
gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET ;
irq_set_chained_handler ( MIPS_CPU_IRQ_BASE + cpu_vec ,
gic_irq_dispatch ) ;
2015-01-19 18:38:24 +03:00
/*
* With the CMP implementation of SMP ( deprecated ) , other CPUs
* are started by the bootloader and put into a timer based
* waiting poll loop . We must not re - route those CPU ' s local
* timer interrupts as the wait instruction will never finish ,
* so just handle whatever CPU interrupt it is routed to by
* default .
*
* This workaround should be removed when CMP support is
* dropped .
*/
if ( IS_ENABLED ( CONFIG_MIPS_CMP ) & &
gic_local_irq_is_routable ( GIC_LOCAL_INT_TIMER ) ) {
2017-08-13 07:36:26 +03:00
timer_cpu_pin = read_gic_vl_timer_map ( ) & GIC_MAP_PIN_MAP ;
2015-01-19 18:38:24 +03:00
irq_set_chained_handler ( MIPS_CPU_IRQ_BASE +
GIC_CPU_PIN_OFFSET +
timer_cpu_pin ,
gic_irq_dispatch ) ;
} else {
timer_cpu_pin = gic_cpu_pin ;
}
2014-09-19 01:47:24 +04:00
}
2014-11-12 22:43:38 +03:00
gic_irq_domain = irq_domain_add_simple ( node , GIC_NUM_LOCAL_INTRS +
2017-08-13 07:36:40 +03:00
gic_shared_intrs , 0 ,
2014-09-19 01:47:23 +04:00
& gic_irq_domain_ops , NULL ) ;
2017-08-13 07:36:40 +03:00
if ( ! gic_irq_domain ) {
pr_err ( " Failed to add GIC IRQ domain " ) ;
return - ENXIO ;
}
2012-09-01 01:05:37 +04:00
2015-12-08 16:20:23 +03:00
gic_ipi_domain = irq_domain_add_hierarchy ( gic_irq_domain ,
IRQ_DOMAIN_FLAG_IPI_PER_CPU ,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs ,
node , & gic_ipi_domain_ops , NULL ) ;
2017-08-13 07:36:40 +03:00
if ( ! gic_ipi_domain ) {
pr_err ( " Failed to add GIC IPI domain " ) ;
return - ENXIO ;
}
2015-12-08 16:20:23 +03:00
2017-06-22 13:42:50 +03:00
irq_domain_update_bus_token ( gic_ipi_domain , DOMAIN_BUS_IPI ) ;
2015-12-08 16:20:23 +03:00
2015-12-08 16:20:30 +03:00
if ( node & &
! of_property_read_u32_array ( node , " mti,reserved-ipi-vectors " , v , 2 ) ) {
bitmap_set ( ipi_resrv , v [ 0 ] , v [ 1 ] ) ;
} else {
/* Make the last 2 * gic_vpes available for IPIs */
bitmap_set ( ipi_resrv ,
gic_shared_intrs - 2 * gic_vpes ,
2 * gic_vpes ) ;
}
2015-12-08 16:20:23 +03:00
2017-04-20 12:07:34 +03:00
bitmap_copy ( ipi_available , ipi_resrv , GIC_MAX_INTRS ) ;
2017-08-13 07:36:41 +03:00
board_bind_eic_interrupt = & gic_bind_eic_interrupt ;
/* Setup defaults */
for ( i = 0 ; i < gic_shared_intrs ; i + + ) {
change_gic_pol ( i , GIC_POL_ACTIVE_HIGH ) ;
change_gic_trig ( i , GIC_TRIG_LEVEL ) ;
write_gic_rmask ( BIT ( i ) ) ;
}
for ( i = 0 ; i < gic_vpes ; i + + ) {
write_gic_vl_other ( mips_cm_vp_id ( i ) ) ;
for ( j = 0 ; j < GIC_NUM_LOCAL_INTRS ; j + + ) {
if ( ! gic_local_irq_is_routable ( j ) )
continue ;
write_gic_vo_rmask ( BIT ( j ) ) ;
}
}
2014-11-12 22:43:38 +03:00
return 0 ;
}
IRQCHIP_DECLARE ( mips_gic , " mti,gic " , gic_of_init ) ;