2019-05-27 09:55:01 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2005-04-17 02:20:36 +04:00
/*
* Copyright 2001 MontaVista Software Inc .
* Author : Jun Sun , jsun @ mvista . com or jsun @ junsun . net
*
* Copyright ( C ) 2001 Ralf Baechle
2013-01-22 15:59:30 +04:00
* Copyright ( C ) 2005 MIPS Technologies , Inc . All rights reserved .
* Author : Maciej W . Rozycki < macro @ mips . com >
2005-04-17 02:20:36 +04:00
*
* This file define the irq handler for MIPS CPU interrupts .
*/
/*
* Almost all MIPS CPUs define 8 interrupt sources . They are typically
* level triggered ( i . e . , cannot be cleared from CPU ; must be cleared from
2017-03-30 22:06:11 +03:00
* device ) .
2005-04-17 02:20:36 +04:00
*
2017-03-30 22:06:11 +03:00
* The first two are software interrupts ( i . e . not exposed as pins ) which
* may be used for IPIs in multi - threaded single - core systems .
2005-04-17 02:20:36 +04:00
*
2017-03-30 22:06:11 +03:00
* The last one is usually the CPU timer interrupt if the counter register
* is present , or for old CPUs with an external FPU by convention it ' s the
* FPU exception interrupt .
2005-04-17 02:20:36 +04:00
*/
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kernel.h>
2010-10-07 17:08:54 +04:00
# include <linux/irq.h>
2015-07-08 00:11:46 +03:00
# include <linux/irqchip.h>
2013-01-31 16:20:43 +04:00
# include <linux/irqdomain.h>
2005-04-17 02:20:36 +04:00
# include <asm/irq_cpu.h>
# include <asm/mipsregs.h>
2005-08-17 17:44:26 +04:00
# include <asm/mipsmtregs.h>
2014-09-19 01:47:10 +04:00
# include <asm/setup.h>
2005-04-17 02:20:36 +04:00
2017-03-30 22:06:10 +03:00
static struct irq_domain * irq_domain ;
2017-03-30 22:06:11 +03:00
static struct irq_domain * ipi_domain ;
2017-03-30 22:06:10 +03:00
2011-03-24 00:09:02 +03:00
static inline void unmask_mips_irq ( struct irq_data * d )
2005-04-17 02:20:36 +04:00
{
2017-03-30 22:06:10 +03:00
set_c0_status ( IE_SW0 < < d - > hwirq ) ;
2005-07-13 22:20:33 +04:00
irq_enable_hazard ( ) ;
2005-04-17 02:20:36 +04:00
}
2011-03-24 00:09:02 +03:00
static inline void mask_mips_irq ( struct irq_data * d )
2005-04-17 02:20:36 +04:00
{
2017-03-30 22:06:10 +03:00
clear_c0_status ( IE_SW0 < < d - > hwirq ) ;
2005-07-13 22:20:33 +04:00
irq_disable_hazard ( ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-02 17:41:42 +04:00
static struct irq_chip mips_cpu_irq_controller = {
2007-01-14 18:07:25 +03:00
. name = " MIPS " ,
2011-03-24 00:09:02 +03:00
. irq_ack = mask_mips_irq ,
. irq_mask = mask_mips_irq ,
. irq_mask_ack = mask_mips_irq ,
. irq_unmask = unmask_mips_irq ,
. irq_eoi = unmask_mips_irq ,
2015-01-15 21:05:28 +03:00
. irq_disable = mask_mips_irq ,
. irq_enable = unmask_mips_irq ,
2005-04-17 02:20:36 +04:00
} ;
2005-08-17 17:44:26 +04:00
/*
* Basically the same as above but taking care of all the MT stuff
*/
2011-03-24 00:09:02 +03:00
static unsigned int mips_mt_cpu_irq_startup ( struct irq_data * d )
2005-08-17 17:44:26 +04:00
{
unsigned int vpflags = dvpe ( ) ;
2017-03-30 22:06:10 +03:00
clear_c0_cause ( C_SW0 < < d - > hwirq ) ;
2005-08-17 17:44:26 +04:00
evpe ( vpflags ) ;
2011-03-24 00:09:02 +03:00
unmask_mips_irq ( d ) ;
2005-08-17 17:44:26 +04:00
return 0 ;
}
/*
* While we ack the interrupt interrupts are disabled and thus we don ' t need
* to deal with concurrency issues . Same for mips_cpu_irq_end .
*/
2011-03-24 00:09:02 +03:00
static void mips_mt_cpu_irq_ack ( struct irq_data * d )
2005-08-17 17:44:26 +04:00
{
unsigned int vpflags = dvpe ( ) ;
2017-03-30 22:06:10 +03:00
clear_c0_cause ( C_SW0 < < d - > hwirq ) ;
2005-08-17 17:44:26 +04:00
evpe ( vpflags ) ;
2011-03-24 00:09:02 +03:00
mask_mips_irq ( d ) ;
2005-08-17 17:44:26 +04:00
}
2017-03-30 22:06:11 +03:00
# ifdef CONFIG_GENERIC_IRQ_IPI
static void mips_mt_send_ipi ( struct irq_data * d , unsigned int cpu )
{
irq_hw_number_t hwirq = irqd_to_hwirq ( d ) ;
unsigned long flags ;
int vpflags ;
local_irq_save ( flags ) ;
/* We can only send IPIs to VPEs within the local core */
2017-08-13 05:49:37 +03:00
WARN_ON ( ! cpus_are_siblings ( smp_processor_id ( ) , cpu ) ) ;
2017-03-30 22:06:11 +03:00
vpflags = dvpe ( ) ;
settc ( cpu_vpe_id ( & cpu_data [ cpu ] ) ) ;
write_vpe_c0_cause ( read_vpe_c0_cause ( ) | ( C_SW0 < < hwirq ) ) ;
evpe ( vpflags ) ;
local_irq_restore ( flags ) ;
}
# endif /* CONFIG_GENERIC_IRQ_IPI */
2006-07-02 17:41:42 +04:00
static struct irq_chip mips_mt_cpu_irq_controller = {
2007-01-14 18:07:25 +03:00
. name = " MIPS " ,
2011-03-24 00:09:02 +03:00
. irq_startup = mips_mt_cpu_irq_startup ,
. irq_ack = mips_mt_cpu_irq_ack ,
. irq_mask = mask_mips_irq ,
. irq_mask_ack = mips_mt_cpu_irq_ack ,
. irq_unmask = unmask_mips_irq ,
. irq_eoi = unmask_mips_irq ,
2015-01-15 21:05:28 +03:00
. irq_disable = mask_mips_irq ,
. irq_enable = unmask_mips_irq ,
2017-03-30 22:06:11 +03:00
# ifdef CONFIG_GENERIC_IRQ_IPI
. ipi_send_single = mips_mt_send_ipi ,
# endif
2005-08-17 17:44:26 +04:00
} ;
2005-04-17 02:20:36 +04:00
2014-09-19 01:47:09 +04:00
asmlinkage void __weak plat_irq_dispatch ( void )
{
unsigned long pending = read_c0_cause ( ) & read_c0_status ( ) & ST0_IM ;
int irq ;
if ( ! pending ) {
spurious_interrupt ( ) ;
return ;
}
pending > > = CAUSEB_IP ;
while ( pending ) {
2021-07-06 13:38:59 +03:00
struct irq_domain * d ;
2014-09-19 01:47:09 +04:00
irq = fls ( pending ) - 1 ;
2017-03-30 22:06:11 +03:00
if ( IS_ENABLED ( CONFIG_GENERIC_IRQ_IPI ) & & irq < 2 )
2021-07-06 13:38:59 +03:00
d = ipi_domain ;
2017-03-30 22:06:11 +03:00
else
2021-07-06 13:38:59 +03:00
d = irq_domain ;
do_domain_IRQ ( d , irq ) ;
2014-09-19 01:47:09 +04:00
pending & = ~ BIT ( irq ) ;
}
}
2013-01-31 16:20:43 +04:00
static int mips_cpu_intc_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hw )
{
2017-07-15 23:07:41 +03:00
struct irq_chip * chip ;
2013-01-31 16:20:43 +04:00
if ( hw < 2 & & cpu_has_mipsmt ) {
/* Software interrupts are used for MT/CMT IPI */
chip = & mips_mt_cpu_irq_controller ;
} else {
chip = & mips_cpu_irq_controller ;
}
2014-09-19 01:47:10 +04:00
if ( cpu_has_vint )
set_vi_handler ( hw , plat_irq_dispatch ) ;
2013-01-31 16:20:43 +04:00
irq_set_chip_and_handler ( irq , chip , handle_percpu_irq ) ;
return 0 ;
}
static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
. map = mips_cpu_intc_map ,
. xlate = irq_domain_xlate_onecell ,
} ;
2017-03-30 22:06:11 +03:00
# ifdef CONFIG_GENERIC_IRQ_IPI
struct cpu_ipi_domain_state {
DECLARE_BITMAP ( allocated , 2 ) ;
} ;
static int mips_cpu_ipi_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
struct cpu_ipi_domain_state * state = domain - > host_data ;
unsigned int i , hwirq ;
int ret ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
hwirq = find_first_zero_bit ( state - > allocated , 2 ) ;
if ( hwirq = = 2 )
return - EBUSY ;
bitmap_set ( state - > allocated , hwirq , 1 ) ;
ret = irq_domain_set_hwirq_and_chip ( domain , virq + i , hwirq ,
& mips_mt_cpu_irq_controller ,
NULL ) ;
if ( ret )
2021-01-08 00:36:03 +03:00
return ret ;
ret = irq_domain_set_hwirq_and_chip ( domain - > parent , virq + i , hwirq ,
& mips_mt_cpu_irq_controller ,
NULL ) ;
if ( ret )
2017-03-30 22:06:11 +03:00
return ret ;
ret = irq_set_irq_type ( virq + i , IRQ_TYPE_LEVEL_HIGH ) ;
if ( ret )
return ret ;
}
return 0 ;
}
static int mips_cpu_ipi_match ( struct irq_domain * d , struct device_node * node ,
enum irq_domain_bus_token bus_token )
{
bool is_ipi ;
switch ( bus_token ) {
case DOMAIN_BUS_IPI :
is_ipi = d - > bus_token = = bus_token ;
return ( ! node | | ( to_of_node ( d - > fwnode ) = = node ) ) & & is_ipi ;
default :
return 0 ;
}
}
static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
. alloc = mips_cpu_ipi_alloc ,
. match = mips_cpu_ipi_match ,
} ;
static void mips_cpu_register_ipi_domain ( struct device_node * of_node )
{
struct cpu_ipi_domain_state * ipi_domain_state ;
ipi_domain_state = kzalloc ( sizeof ( * ipi_domain_state ) , GFP_KERNEL ) ;
ipi_domain = irq_domain_add_hierarchy ( irq_domain ,
IRQ_DOMAIN_FLAG_IPI_SINGLE ,
2 , of_node ,
& mips_cpu_ipi_chip_ops ,
ipi_domain_state ) ;
if ( ! ipi_domain )
panic ( " Failed to add MIPS CPU IPI domain " ) ;
2017-06-22 13:42:50 +03:00
irq_domain_update_bus_token ( ipi_domain , DOMAIN_BUS_IPI ) ;
2017-03-30 22:06:11 +03:00
}
# else /* !CONFIG_GENERIC_IRQ_IPI */
static inline void mips_cpu_register_ipi_domain ( struct device_node * of_node ) { }
# endif /* !CONFIG_GENERIC_IRQ_IPI */
2014-09-19 01:47:07 +04:00
static void __init __mips_cpu_irq_init ( struct device_node * of_node )
2013-01-31 16:20:43 +04:00
{
/* Mask interrupts. */
clear_c0_status ( ST0_IM ) ;
clear_c0_cause ( CAUSEF_IP ) ;
2017-03-30 22:06:10 +03:00
irq_domain = irq_domain_add_legacy ( of_node , 8 , MIPS_CPU_IRQ_BASE , 0 ,
& mips_cpu_intc_irq_domain_ops ,
NULL ) ;
if ( ! irq_domain )
2013-09-18 18:05:26 +04:00
panic ( " Failed to add irqdomain for MIPS CPU " ) ;
2017-03-30 22:06:11 +03:00
/*
* Only proceed to register the software interrupt IPI implementation
* for CPUs which implement the MIPS MT ( multi - threading ) ASE .
*/
if ( cpu_has_mipsmt )
mips_cpu_register_ipi_domain ( of_node ) ;
2014-09-19 01:47:07 +04:00
}
void __init mips_cpu_irq_init ( void )
{
__mips_cpu_irq_init ( NULL ) ;
}
2013-01-31 16:20:43 +04:00
2014-09-19 01:47:08 +04:00
int __init mips_cpu_irq_of_init ( struct device_node * of_node ,
struct device_node * parent )
2014-09-19 01:47:07 +04:00
{
__mips_cpu_irq_init ( of_node ) ;
2013-01-31 16:20:43 +04:00
return 0 ;
}
2015-05-24 18:11:16 +03:00
IRQCHIP_DECLARE ( cpu_intc , " mti,cpu-interrupt-controller " , mips_cpu_irq_of_init ) ;