2019-06-04 11:11:33 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2014-09-10 17:35:38 +04:00
/*
* ARC ARConnect ( MultiCore IP ) support ( formerly known as MCIP )
*
* Copyright ( C ) 2013 Synopsys , Inc . ( www . synopsys . com )
*/
# include <linux/smp.h>
# include <linux/irq.h>
2016-12-28 11:46:25 +03:00
# include <linux/irqchip/chained_irq.h>
2014-09-10 17:35:38 +04:00
# include <linux/spinlock.h>
2016-10-31 21:27:08 +03:00
# include <soc/arc/mcip.h>
2016-02-23 09:25:16 +03:00
# include <asm/irqflags-arcv2.h>
2015-10-02 16:50:27 +03:00
# include <asm/setup.h>
2014-09-10 17:35:38 +04:00
static DEFINE_RAW_SPINLOCK ( mcip_lock ) ;
2016-09-29 20:00:14 +03:00
# ifdef CONFIG_SMP
static char smp_cpuinfo_buf [ 128 ] ;
2018-02-23 19:41:52 +03:00
/*
* Set mask to halt GFRC if any online core in SMP cluster is halted .
* Only works for ARC HS v3 .0 + , on earlier versions has no effect .
*/
static void mcip_update_gfrc_halt_mask ( int cpu )
{
struct bcr_generic gfrc ;
unsigned long flags ;
u32 gfrc_halt_mask ;
READ_BCR ( ARC_REG_GFRC_BUILD , gfrc ) ;
/*
* CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
* GFRC 0x3 version .
*/
if ( gfrc . ver < 0x3 )
return ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
__mcip_cmd ( CMD_GFRC_READ_CORE , 0 ) ;
gfrc_halt_mask = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
gfrc_halt_mask | = BIT ( cpu ) ;
__mcip_cmd_data ( CMD_GFRC_SET_CORE , 0 , gfrc_halt_mask ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2018-02-23 19:41:53 +03:00
static void mcip_update_debug_halt_mask ( int cpu )
{
u32 mcip_mask = 0 ;
unsigned long flags ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
/*
* mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
* commands . So read it once instead of reading both CMD_DEBUG_READ_MASK
* and CMD_DEBUG_READ_SELECT .
*/
__mcip_cmd ( CMD_DEBUG_READ_SELECT , 0 ) ;
mcip_mask = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
mcip_mask | = BIT ( cpu ) ;
__mcip_cmd_data ( CMD_DEBUG_SET_SELECT , 0 , mcip_mask ) ;
/*
* Parameter specified halt cause :
* STATUS32 [ H ] / actionpoint / breakpoint / self - halt
* We choose all of them ( 0xF ) .
*/
__mcip_cmd_data ( CMD_DEBUG_SET_MASK , 0xF , mcip_mask ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2015-10-12 12:45:48 +03:00
static void mcip_setup_per_cpu ( int cpu )
2014-09-10 17:35:38 +04:00
{
2018-02-23 19:41:52 +03:00
struct mcip_bcr mp ;
READ_BCR ( ARC_REG_MCIP_BCR , mp ) ;
2014-09-10 17:35:38 +04:00
smp_ipi_irq_setup ( cpu , IPI_IRQ ) ;
2016-02-23 09:25:16 +03:00
smp_ipi_irq_setup ( cpu , SOFTIRQ_IRQ ) ;
2018-02-23 19:41:52 +03:00
/* Update GFRC halt mask as new CPU came online */
if ( mp . gfrc )
mcip_update_gfrc_halt_mask ( cpu ) ;
2018-02-23 19:41:53 +03:00
/* Update MCIP debug mask as new CPU came online */
if ( mp . dbg )
mcip_update_debug_halt_mask ( cpu ) ;
2014-09-10 17:35:38 +04:00
}
static void mcip_ipi_send ( int cpu )
{
unsigned long flags ;
2014-11-07 08:15:28 +03:00
int ipi_was_pending ;
2016-02-23 09:25:16 +03:00
/* ARConnect can only send IPI to others */
if ( unlikely ( cpu = = raw_smp_processor_id ( ) ) ) {
arc_softirq_trigger ( SOFTIRQ_IRQ ) ;
return ;
}
2016-02-19 05:27:41 +03:00
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
2014-11-07 08:15:28 +03:00
/*
2016-02-19 05:27:41 +03:00
* If receiver already has a pending interrupt , elide sending this one .
* Linux cross core calling works well with concurrent IPIs
* coalesced into one
* see arch / arc / kernel / smp . c : ipi_send_msg_one ( )
2014-11-07 08:15:28 +03:00
*/
2016-02-19 05:27:41 +03:00
__mcip_cmd ( CMD_INTRPT_READ_STATUS , cpu ) ;
ipi_was_pending = read_aux_reg ( ARC_REG_MCIP_READBACK ) ;
if ( ! ipi_was_pending )
__mcip_cmd ( CMD_INTRPT_GENERATE_IRQ , cpu ) ;
2014-11-07 08:15:28 +03:00
2014-09-10 17:35:38 +04:00
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
static void mcip_ipi_clear ( int irq )
{
2014-11-07 08:15:28 +03:00
unsigned int cpu , c ;
2014-09-10 17:35:38 +04:00
unsigned long flags ;
2016-02-23 09:25:16 +03:00
if ( unlikely ( irq = = SOFTIRQ_IRQ ) ) {
arc_softirq_clear ( irq ) ;
return ;
}
2014-09-10 17:35:38 +04:00
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
/* Who sent the IPI */
__mcip_cmd ( CMD_INTRPT_CHECK_SOURCE , 0 ) ;
2016-02-19 05:48:11 +03:00
cpu = read_aux_reg ( ARC_REG_MCIP_READBACK ) ; /* 1,2,4,8... */
2014-09-10 17:35:38 +04:00
2014-11-07 08:15:28 +03:00
/*
* In rare case , multiple concurrent IPIs sent to same target can
* possibly be coalesced by MCIP into 1 asserted IRQ , so @ cpus can be
* " vectored " ( multiple bits sets ) as opposed to typical single bit
*/
do {
c = __ffs ( cpu ) ; /* 0,1,2,3 */
__mcip_cmd ( CMD_INTRPT_GENERATE_ACK , c ) ;
cpu & = ~ ( 1U < < c ) ;
} while ( cpu ) ;
2014-09-10 17:35:38 +04:00
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2015-10-12 14:08:07 +03:00
static void mcip_probe_n_setup ( void )
2014-09-10 17:35:38 +04:00
{
2016-09-29 20:00:14 +03:00
struct mcip_bcr mp ;
2014-09-10 17:35:38 +04:00
READ_BCR ( ARC_REG_MCIP_BCR , mp ) ;
sprintf ( smp_cpuinfo_buf ,
2017-01-20 04:05:00 +03:00
" Extn [SMP] \t : ARConnect (v%d): %d cores with %s%s%s%s \n " ,
2014-09-10 17:35:38 +04:00
mp . ver , mp . num_cores ,
IS_AVAIL1 ( mp . ipi , " IPI " ) ,
IS_AVAIL1 ( mp . idu , " IDU " ) ,
IS_AVAIL1 ( mp . dbg , " DEBUG " ) ,
2016-01-22 11:57:50 +03:00
IS_AVAIL1 ( mp . gfrc , " GFRC " ) ) ;
2014-09-10 17:35:38 +04:00
2016-01-01 15:35:48 +03:00
cpuinfo_arc700 [ 0 ] . extn . gfrc = mp . gfrc ;
2014-09-10 17:35:38 +04:00
}
2015-03-09 11:33:10 +03:00
2015-10-12 14:08:07 +03:00
struct plat_smp_ops plat_smp_ops = {
. info = smp_cpuinfo_buf ,
. init_early_smp = mcip_probe_n_setup ,
2015-12-16 04:10:27 +03:00
. init_per_cpu = mcip_setup_per_cpu ,
2015-10-12 14:08:07 +03:00
. ipi_send = mcip_ipi_send ,
. ipi_clear = mcip_ipi_clear ,
} ;
2016-09-29 20:00:14 +03:00
# endif
2015-03-09 11:33:10 +03:00
/***************************************************************************
* ARCv2 Interrupt Distribution Unit ( IDU )
*
* Connects external " COMMON " IRQs to core intc , providing :
* - dynamic routing ( IRQ affinity )
* - load balancing ( Round Robin interrupt distribution )
* - 1 : N distribution
*
* It physically resides in the MCIP hw block
*/
# include <linux/irqchip.h>
# include <linux/of.h>
# include <linux/of_irq.h>
/*
* Set the DEST for @ cmn_irq to @ cpu_mask ( 1 bit per core )
*/
static void idu_set_dest ( unsigned int cmn_irq , unsigned int cpu_mask )
{
__mcip_cmd_data ( CMD_IDU_SET_DEST , cmn_irq , cpu_mask ) ;
}
2019-07-24 15:04:34 +03:00
static void idu_set_mode ( unsigned int cmn_irq , bool set_lvl , unsigned int lvl ,
bool set_distr , unsigned int distr )
2015-03-09 11:33:10 +03:00
{
union {
unsigned int word ;
struct {
unsigned int distr : 2 , pad : 2 , lvl : 1 , pad2 : 27 ;
} ;
} data ;
2019-07-24 15:04:34 +03:00
data . word = __mcip_cmd_read ( CMD_IDU_READ_MODE , cmn_irq ) ;
if ( set_distr )
data . distr = distr ;
if ( set_lvl )
data . lvl = lvl ;
2015-03-09 11:33:10 +03:00
__mcip_cmd_data ( CMD_IDU_SET_MODE , cmn_irq , data . word ) ;
}
2017-02-01 22:00:30 +03:00
static void idu_irq_mask_raw ( irq_hw_number_t hwirq )
2015-03-09 11:33:10 +03:00
{
unsigned long flags ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
2017-02-01 22:00:30 +03:00
__mcip_cmd_data ( CMD_IDU_SET_MASK , hwirq , 1 ) ;
2015-03-09 11:33:10 +03:00
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2017-02-01 22:00:30 +03:00
static void idu_irq_mask ( struct irq_data * data )
{
idu_irq_mask_raw ( data - > hwirq ) ;
}
2015-03-09 11:33:10 +03:00
static void idu_irq_unmask ( struct irq_data * data )
{
unsigned long flags ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
__mcip_cmd_data ( CMD_IDU_SET_MASK , data - > hwirq , 0 ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2019-07-24 15:04:34 +03:00
static void idu_irq_ack ( struct irq_data * data )
{
unsigned long flags ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
__mcip_cmd ( CMD_IDU_ACK_CIRQ , data - > hwirq ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
static void idu_irq_mask_ack ( struct irq_data * data )
{
unsigned long flags ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
__mcip_cmd_data ( CMD_IDU_SET_MASK , data - > hwirq , 1 ) ;
__mcip_cmd ( CMD_IDU_ACK_CIRQ , data - > hwirq ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
}
2015-03-09 11:33:10 +03:00
static int
2015-06-30 11:07:28 +03:00
idu_irq_set_affinity ( struct irq_data * data , const struct cpumask * cpumask ,
bool force )
2015-03-09 11:33:10 +03:00
{
2015-06-30 11:07:28 +03:00
unsigned long flags ;
cpumask_t online ;
2016-11-08 10:08:32 +03:00
unsigned int destination_bits ;
unsigned int distribution_mode ;
2015-06-30 11:07:28 +03:00
/* errout if no online cpu per @cpumask */
if ( ! cpumask_and ( & online , cpumask , cpu_online_mask ) )
return - EINVAL ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
2016-11-08 10:08:32 +03:00
destination_bits = cpumask_bits ( & online ) [ 0 ] ;
idu_set_dest ( data - > hwirq , destination_bits ) ;
if ( ffs ( destination_bits ) = = fls ( destination_bits ) )
distribution_mode = IDU_M_DISTRI_DEST ;
else
distribution_mode = IDU_M_DISTRI_RR ;
2019-07-24 15:04:34 +03:00
idu_set_mode ( data - > hwirq , false , 0 , true , distribution_mode ) ;
2015-06-30 11:07:28 +03:00
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
2015-03-09 11:33:10 +03:00
return IRQ_SET_MASK_OK ;
}
2016-12-28 11:46:26 +03:00
2019-07-24 15:04:34 +03:00
static int idu_irq_set_type ( struct irq_data * data , u32 type )
{
unsigned long flags ;
/*
* ARCv2 IDU HW does not support inverse polarity , so these are the
* only interrupt types supported .
*/
if ( type & ~ ( IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH ) )
return - EINVAL ;
raw_spin_lock_irqsave ( & mcip_lock , flags ) ;
idu_set_mode ( data - > hwirq , true ,
type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
IDU_M_TRIG_LEVEL ,
false , 0 ) ;
raw_spin_unlock_irqrestore ( & mcip_lock , flags ) ;
return 0 ;
}
2016-12-28 11:46:26 +03:00
static void idu_irq_enable ( struct irq_data * data )
{
/*
* By default send all common interrupts to all available online CPUs .
* The affinity of common interrupts in IDU must be set manually since
* in some cases the kernel will not call irq_set_affinity ( ) by itself :
* 1. When the kernel is not configured with support of SMP .
* 2. When the kernel is configured with support of SMP but upper
* interrupt controllers does not support setting of the affinity
* and cannot propagate it to IDU .
*/
idu_irq_set_affinity ( data , cpu_online_mask , false ) ;
idu_irq_unmask ( data ) ;
}
2015-03-09 11:33:10 +03:00
static struct irq_chip idu_irq_chip = {
. name = " MCIP IDU Intc " ,
. irq_mask = idu_irq_mask ,
. irq_unmask = idu_irq_unmask ,
2019-07-24 15:04:34 +03:00
. irq_ack = idu_irq_ack ,
. irq_mask_ack = idu_irq_mask_ack ,
2016-12-28 11:46:26 +03:00
. irq_enable = idu_irq_enable ,
2019-07-24 15:04:34 +03:00
. irq_set_type = idu_irq_set_type ,
2015-03-09 11:33:10 +03:00
# ifdef CONFIG_SMP
. irq_set_affinity = idu_irq_set_affinity ,
# endif
} ;
2015-09-14 11:42:37 +03:00
static void idu_cascade_isr ( struct irq_desc * desc )
2015-03-09 11:33:10 +03:00
{
2016-11-08 10:08:31 +03:00
struct irq_domain * idu_domain = irq_desc_get_handler_data ( desc ) ;
2016-12-28 11:46:25 +03:00
struct irq_chip * core_chip = irq_desc_get_chip ( desc ) ;
2016-11-08 10:08:31 +03:00
irq_hw_number_t core_hwirq = irqd_to_hwirq ( irq_desc_get_irq_data ( desc ) ) ;
2017-01-31 14:45:23 +03:00
irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ ;
2015-03-09 11:33:10 +03:00
2016-12-28 11:46:25 +03:00
chained_irq_enter ( core_chip , desc ) ;
2021-05-04 19:42:18 +03:00
generic_handle_domain_irq ( idu_domain , idu_hwirq ) ;
2016-12-28 11:46:25 +03:00
chained_irq_exit ( core_chip , desc ) ;
2015-03-09 11:33:10 +03:00
}
static int idu_irq_map ( struct irq_domain * d , unsigned int virq , irq_hw_number_t hwirq )
{
irq_set_chip_and_handler ( virq , & idu_irq_chip , handle_level_irq ) ;
irq_set_status_flags ( virq , IRQ_MOVE_PCNTXT ) ;
return 0 ;
}
static const struct irq_domain_ops idu_irq_ops = {
2019-07-24 15:04:34 +03:00
. xlate = irq_domain_xlate_onetwocell ,
2015-03-09 11:33:10 +03:00
. map = idu_irq_map ,
} ;
/*
* [ 16 , 23 ] : Statically assigned always private - per - core ( Timers , WDT , IPI )
* [ 24 , 23 + C ] : If C > 0 then " C " common IRQs
* [ 24 + C , N ] : Not statically assigned , private - per - core
*/
static int __init
idu_of_init ( struct device_node * intc , struct device_node * parent )
{
struct irq_domain * domain ;
2017-01-31 14:45:23 +03:00
int nr_irqs ;
2016-11-08 10:08:31 +03:00
int i , virq ;
2016-09-29 20:00:14 +03:00
struct mcip_bcr mp ;
2017-01-31 14:45:23 +03:00
struct mcip_idu_bcr idu_bcr ;
2016-09-29 20:00:14 +03:00
READ_BCR ( ARC_REG_MCIP_BCR , mp ) ;
2015-03-09 11:33:10 +03:00
2016-09-29 20:00:14 +03:00
if ( ! mp . idu )
2015-03-09 11:33:10 +03:00
panic ( " IDU not detected, but DeviceTree using it " ) ;
2017-01-31 14:45:23 +03:00
READ_BCR ( ARC_REG_MCIP_IDU_BCR , idu_bcr ) ;
nr_irqs = mcip_idu_bcr_to_nr_irqs ( idu_bcr ) ;
pr_info ( " MCIP: IDU supports %u common irqs \n " , nr_irqs ) ;
2015-03-09 11:33:10 +03:00
domain = irq_domain_add_linear ( intc , nr_irqs , & idu_irq_ops , NULL ) ;
/* Parent interrupts (core-intc) are already mapped */
for ( i = 0 ; i < nr_irqs ; i + + ) {
2017-02-01 22:00:30 +03:00
/* Mask all common interrupts by default */
idu_irq_mask_raw ( i ) ;
2015-03-09 11:33:10 +03:00
/*
* Return parent uplink IRQs ( towards core intc ) 24 , 25 , . . . . .
* this step has been done before already
* however we need it to get the parent virq and set IDU handler
* as first level isr
*/
2017-01-31 14:45:23 +03:00
virq = irq_create_mapping ( NULL , i + FIRST_EXT_IRQ ) ;
BUG_ON ( ! virq ) ;
2016-11-08 10:08:31 +03:00
irq_set_chained_handler_and_data ( virq , idu_cascade_isr , domain ) ;
2015-03-09 11:33:10 +03:00
}
__mcip_cmd ( CMD_IDU_ENABLE , 0 ) ;
return 0 ;
}
IRQCHIP_DECLARE ( arcv2_idu_intc , " snps,archs-idu-intc " , idu_of_init ) ;