2018-07-26 16:27:00 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2017 SiFive
* Copyright ( C ) 2018 Christoph Hellwig
*/
2020-03-02 15:11:45 -08:00
# include <linux/cpu.h>
2018-07-26 16:27:00 +02:00
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/irq.h>
# include <linux/irqchip.h>
2020-06-01 14:45:40 +05:30
# include <linux/irqchip/chained_irq.h>
2018-07-26 16:27:00 +02:00
# include <linux/irqdomain.h>
# include <linux/module.h>
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/platform_device.h>
# include <linux/spinlock.h>
2023-04-04 11:29:08 +08:00
# include <linux/syscore_ops.h>
2018-10-02 12:15:05 -07:00
# include <asm/smp.h>
2018-07-26 16:27:00 +02:00
/*
* This driver implements a version of the RISC - V PLIC with the actual layout
* specified in chapter 8 of the SiFive U5 Coreplex Series Manual :
*
* https : //static.dev.sifive.com/U54-MC-RVCoreIP.pdf
*
* The largest number supported by devices marked as ' sifive , plic - 1.0 .0 ' , is
* 1024 , of which device 0 is defined as non - existent by the RISC - V Privileged
* Spec .
*/
# define MAX_DEVICES 1024
# define MAX_CONTEXTS 15872
/*
* Each interrupt source has a priority register associated with it .
* We always hardwire it to one in Linux .
*/
# define PRIORITY_BASE 0
# define PRIORITY_PER_ID 4
/*
* Each hart context has a vector of interrupt enable bits associated with it .
* There ' s one bit for each interrupt source .
*/
2022-03-02 13:15:52 +00:00
# define CONTEXT_ENABLE_BASE 0x2000
# define CONTEXT_ENABLE_SIZE 0x80
2018-07-26 16:27:00 +02:00
/*
* Each hart context has a set of control registers associated with it . Right
* now there ' s only two : a source priority threshold over which the hart will
* take an interrupt , and a register to claim interrupts .
*/
# define CONTEXT_BASE 0x200000
2022-03-02 13:15:52 +00:00
# define CONTEXT_SIZE 0x1000
2018-07-26 16:27:00 +02:00
# define CONTEXT_THRESHOLD 0x00
# define CONTEXT_CLAIM 0x04
2020-04-02 18:46:09 -07:00
# define PLIC_DISABLE_THRESHOLD 0x7
2020-03-02 15:11:45 -08:00
# define PLIC_ENABLE_THRESHOLD 0
2022-06-30 05:02:39 -05:00
# define PLIC_QUIRK_EDGE_INTERRUPT 0
2020-03-02 15:11:46 -08:00
struct plic_priv {
2024-02-22 15:09:49 +05:30
struct device * dev ;
2020-03-02 15:11:46 -08:00
struct cpumask lmask ;
struct irq_domain * irqdomain ;
void __iomem * regs ;
2022-06-30 05:02:39 -05:00
unsigned long plic_quirks ;
2023-04-04 11:29:08 +08:00
unsigned int nr_irqs ;
unsigned long * prio_save ;
2020-03-02 15:11:46 -08:00
} ;
2018-07-26 16:27:00 +02:00
struct plic_handler {
bool present ;
2019-02-12 18:22:43 +05:30
void __iomem * hart_base ;
/*
* Protect mask operations on the registers given that we can ' t
* assume atomic memory operations work on them .
*/
raw_spinlock_t enable_lock ;
void __iomem * enable_base ;
2023-04-04 11:29:08 +08:00
u32 * enable_save ;
2020-03-02 15:11:46 -08:00
struct plic_priv * priv ;
2018-07-26 16:27:00 +02:00
} ;
2021-03-30 02:09:11 +08:00
static int plic_parent_irq __ro_after_init ;
2024-05-29 14:54:56 -07:00
static bool plic_global_setup_done __ro_after_init ;
2018-07-26 16:27:00 +02:00
static DEFINE_PER_CPU ( struct plic_handler , plic_handlers ) ;
2022-06-30 05:02:39 -05:00
static int plic_irq_set_type ( struct irq_data * d , unsigned int type ) ;
2022-03-02 13:15:53 +00:00
static void __plic_toggle ( void __iomem * enable_base , int hwirq , int enable )
2018-07-26 16:27:00 +02:00
{
2022-03-02 13:15:53 +00:00
u32 __iomem * reg = enable_base + ( hwirq / 32 ) * sizeof ( u32 ) ;
2018-07-26 16:27:00 +02:00
u32 hwirq_mask = 1 < < ( hwirq % 32 ) ;
if ( enable )
writel ( readl ( reg ) | hwirq_mask , reg ) ;
else
writel ( readl ( reg ) & ~ hwirq_mask , reg ) ;
2022-03-02 13:15:53 +00:00
}
static void plic_toggle ( struct plic_handler * handler , int hwirq , int enable )
{
2024-02-22 15:09:55 +05:30
unsigned long flags ;
raw_spin_lock_irqsave ( & handler - > enable_lock , flags ) ;
2022-03-02 13:15:53 +00:00
__plic_toggle ( handler - > enable_base , hwirq , enable ) ;
2024-02-22 15:09:55 +05:30
raw_spin_unlock_irqrestore ( & handler - > enable_lock , flags ) ;
2018-07-26 16:27:00 +02:00
}
2019-02-12 18:22:46 +05:30
static inline void plic_irq_toggle ( const struct cpumask * mask ,
2020-03-02 15:11:46 -08:00
struct irq_data * d , int enable )
2018-07-26 16:27:00 +02:00
{
int cpu ;
2019-02-12 18:22:46 +05:30
for_each_cpu ( cpu , mask ) {
2018-07-26 16:27:00 +02:00
struct plic_handler * handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
2022-07-01 15:24:39 -05:00
plic_toggle ( handler , d - > hwirq , enable ) ;
2018-07-26 16:27:00 +02:00
}
}
2022-07-01 15:24:40 -05:00
static void plic_irq_enable ( struct irq_data * d )
2018-07-26 16:27:00 +02:00
{
2022-07-01 15:24:39 -05:00
plic_irq_toggle ( irq_data_get_effective_affinity_mask ( d ) , d , 1 ) ;
2018-07-26 16:27:00 +02:00
}
2022-07-01 15:24:40 -05:00
static void plic_irq_disable ( struct irq_data * d )
2018-07-26 16:27:00 +02:00
{
2022-07-01 15:24:39 -05:00
plic_irq_toggle ( irq_data_get_effective_affinity_mask ( d ) , d , 0 ) ;
2018-07-26 16:27:00 +02:00
}
2022-07-01 15:24:40 -05:00
static void plic_irq_unmask ( struct irq_data * d )
{
struct plic_priv * priv = irq_data_get_irq_chip_data ( d ) ;
writel ( 1 , priv - > regs + PRIORITY_BASE + d - > hwirq * PRIORITY_PER_ID ) ;
}
static void plic_irq_mask ( struct irq_data * d )
{
struct plic_priv * priv = irq_data_get_irq_chip_data ( d ) ;
writel ( 0 , priv - > regs + PRIORITY_BASE + d - > hwirq * PRIORITY_PER_ID ) ;
}
static void plic_irq_eoi ( struct irq_data * d )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
2024-01-31 09:19:33 +01:00
if ( unlikely ( irqd_irq_disabled ( d ) ) ) {
plic_toggle ( handler , d - > hwirq , 1 ) ;
writel ( d - > hwirq , handler - > hart_base + CONTEXT_CLAIM ) ;
plic_toggle ( handler , d - > hwirq , 0 ) ;
} else {
writel ( d - > hwirq , handler - > hart_base + CONTEXT_CLAIM ) ;
}
2022-07-01 15:24:40 -05:00
}
2019-02-12 18:22:46 +05:30
# ifdef CONFIG_SMP
static int plic_set_affinity ( struct irq_data * d ,
const struct cpumask * mask_val , bool force )
{
unsigned int cpu ;
2020-10-29 10:37:38 +08:00
struct plic_priv * priv = irq_data_get_irq_chip_data ( d ) ;
2020-03-02 15:11:46 -08:00
2019-02-12 18:22:46 +05:30
if ( force )
2024-04-16 16:54:53 +08:00
cpu = cpumask_first_and ( & priv - > lmask , mask_val ) ;
2019-02-12 18:22:46 +05:30
else
2024-04-16 16:54:53 +08:00
cpu = cpumask_first_and_and ( & priv - > lmask , mask_val , cpu_online_mask ) ;
2019-02-12 18:22:46 +05:30
if ( cpu > = nr_cpu_ids )
return - EINVAL ;
2022-07-01 15:24:40 -05:00
plic_irq_disable ( d ) ;
2019-02-12 18:22:46 +05:30
irq_data_update_effective_affinity ( d , cpumask_of ( cpu ) ) ;
2022-07-01 15:24:40 -05:00
if ( ! irqd_irq_disabled ( d ) )
plic_irq_enable ( d ) ;
2022-07-01 15:24:39 -05:00
2019-02-12 18:22:46 +05:30
return IRQ_SET_MASK_OK_DONE ;
}
# endif
2022-06-30 05:02:39 -05:00
static struct irq_chip plic_edge_chip = {
. name = " SiFive PLIC " ,
2022-07-01 15:24:40 -05:00
. irq_enable = plic_irq_enable ,
. irq_disable = plic_irq_disable ,
2022-06-30 05:02:39 -05:00
. irq_ack = plic_irq_eoi ,
. irq_mask = plic_irq_mask ,
. irq_unmask = plic_irq_unmask ,
# ifdef CONFIG_SMP
. irq_set_affinity = plic_set_affinity ,
# endif
. irq_set_type = plic_irq_set_type ,
2022-11-26 13:48:05 -06:00
. flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP ,
2022-06-30 05:02:39 -05:00
} ;
2018-07-26 16:27:00 +02:00
static struct irq_chip plic_chip = {
. name = " SiFive PLIC " ,
2022-07-01 15:24:40 -05:00
. irq_enable = plic_irq_enable ,
. irq_disable = plic_irq_disable ,
2019-09-15 15:17:45 +01:00
. irq_mask = plic_irq_mask ,
. irq_unmask = plic_irq_unmask ,
. irq_eoi = plic_irq_eoi ,
2019-02-12 18:22:46 +05:30
# ifdef CONFIG_SMP
. irq_set_affinity = plic_set_affinity ,
# endif
2022-06-30 05:02:39 -05:00
. irq_set_type = plic_irq_set_type ,
2022-11-26 13:48:05 -06:00
. flags = IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_AFFINITY_PRE_STARTUP ,
2018-07-26 16:27:00 +02:00
} ;
2022-06-30 05:02:39 -05:00
static int plic_irq_set_type ( struct irq_data * d , unsigned int type )
{
struct plic_priv * priv = irq_data_get_irq_chip_data ( d ) ;
if ( ! test_bit ( PLIC_QUIRK_EDGE_INTERRUPT , & priv - > plic_quirks ) )
return IRQ_SET_MASK_OK_NOCOPY ;
switch ( type ) {
case IRQ_TYPE_EDGE_RISING :
irq_set_chip_handler_name_locked ( d , & plic_edge_chip ,
handle_edge_irq , NULL ) ;
break ;
case IRQ_TYPE_LEVEL_HIGH :
irq_set_chip_handler_name_locked ( d , & plic_chip ,
handle_fasteoi_irq , NULL ) ;
break ;
default :
return - EINVAL ;
}
return IRQ_SET_MASK_OK ;
}
2023-04-04 11:29:08 +08:00
static int plic_irq_suspend ( void )
{
unsigned int i , cpu ;
2024-02-22 15:09:55 +05:30
unsigned long flags ;
2023-04-04 11:29:08 +08:00
u32 __iomem * reg ;
struct plic_priv * priv ;
priv = per_cpu_ptr ( & plic_handlers , smp_processor_id ( ) ) - > priv ;
for ( i = 0 ; i < priv - > nr_irqs ; i + + )
if ( readl ( priv - > regs + PRIORITY_BASE + i * PRIORITY_PER_ID ) )
__set_bit ( i , priv - > prio_save ) ;
else
__clear_bit ( i , priv - > prio_save ) ;
for_each_cpu ( cpu , cpu_present_mask ) {
struct plic_handler * handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
if ( ! handler - > present )
continue ;
2024-02-22 15:09:55 +05:30
raw_spin_lock_irqsave ( & handler - > enable_lock , flags ) ;
2023-04-04 11:29:08 +08:00
for ( i = 0 ; i < DIV_ROUND_UP ( priv - > nr_irqs , 32 ) ; i + + ) {
reg = handler - > enable_base + i * sizeof ( u32 ) ;
handler - > enable_save [ i ] = readl ( reg ) ;
}
2024-02-22 15:09:55 +05:30
raw_spin_unlock_irqrestore ( & handler - > enable_lock , flags ) ;
2023-04-04 11:29:08 +08:00
}
return 0 ;
}
static void plic_irq_resume ( void )
{
unsigned int i , index , cpu ;
2024-02-22 15:09:55 +05:30
unsigned long flags ;
2023-04-04 11:29:08 +08:00
u32 __iomem * reg ;
struct plic_priv * priv ;
priv = per_cpu_ptr ( & plic_handlers , smp_processor_id ( ) ) - > priv ;
for ( i = 0 ; i < priv - > nr_irqs ; i + + ) {
index = BIT_WORD ( i ) ;
writel ( ( priv - > prio_save [ index ] & BIT_MASK ( i ) ) ? 1 : 0 ,
priv - > regs + PRIORITY_BASE + i * PRIORITY_PER_ID ) ;
}
for_each_cpu ( cpu , cpu_present_mask ) {
struct plic_handler * handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
if ( ! handler - > present )
continue ;
2024-02-22 15:09:55 +05:30
raw_spin_lock_irqsave ( & handler - > enable_lock , flags ) ;
2023-04-04 11:29:08 +08:00
for ( i = 0 ; i < DIV_ROUND_UP ( priv - > nr_irqs , 32 ) ; i + + ) {
reg = handler - > enable_base + i * sizeof ( u32 ) ;
writel ( handler - > enable_save [ i ] , reg ) ;
}
2024-02-22 15:09:55 +05:30
raw_spin_unlock_irqrestore ( & handler - > enable_lock , flags ) ;
2023-04-04 11:29:08 +08:00
}
}
static struct syscore_ops plic_irq_syscore_ops = {
. suspend = plic_irq_suspend ,
. resume = plic_irq_resume ,
} ;
2018-07-26 16:27:00 +02:00
static int plic_irqdomain_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hwirq )
{
2020-05-18 14:44:39 +05:30
struct plic_priv * priv = d - > host_data ;
2019-12-10 16:41:11 +05:30
irq_domain_set_info ( d , irq , hwirq , & plic_chip , d - > host_data ,
handle_fasteoi_irq , NULL , NULL ) ;
2018-07-26 16:27:00 +02:00
irq_set_noprobe ( irq ) ;
2020-05-18 14:44:39 +05:30
irq_set_affinity ( irq , & priv - > lmask ) ;
2018-07-26 16:27:00 +02:00
return 0 ;
}
2022-06-30 05:02:39 -05:00
static int plic_irq_domain_translate ( struct irq_domain * d ,
struct irq_fwspec * fwspec ,
unsigned long * hwirq ,
unsigned int * type )
{
struct plic_priv * priv = d - > host_data ;
if ( test_bit ( PLIC_QUIRK_EDGE_INTERRUPT , & priv - > plic_quirks ) )
return irq_domain_translate_twocell ( d , fwspec , hwirq , type ) ;
return irq_domain_translate_onecell ( d , fwspec , hwirq , type ) ;
}
2019-12-10 16:41:11 +05:30
static int plic_irq_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
int i , ret ;
irq_hw_number_t hwirq ;
unsigned int type ;
struct irq_fwspec * fwspec = arg ;
2022-06-30 05:02:39 -05:00
ret = plic_irq_domain_translate ( domain , fwspec , & hwirq , & type ) ;
2019-12-10 16:41:11 +05:30
if ( ret )
return ret ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
ret = plic_irqdomain_map ( domain , virq + i , hwirq + i ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2018-07-26 16:27:00 +02:00
static const struct irq_domain_ops plic_irqdomain_ops = {
2022-06-30 05:02:39 -05:00
. translate = plic_irq_domain_translate ,
2019-12-10 16:41:11 +05:30
. alloc = plic_irq_domain_alloc ,
. free = irq_domain_free_irqs_top ,
2018-07-26 16:27:00 +02:00
} ;
/*
* Handling an interrupt is a two - step process : first you claim the interrupt
* by reading the claim register , then you complete the interrupt by writing
* that source ID back to the same claim register . This automatically enables
* and disables the interrupt , so there ' s nothing else to do .
*/
2020-06-01 14:45:40 +05:30
static void plic_handle_irq ( struct irq_desc * desc )
2018-07-26 16:27:00 +02:00
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
2020-06-01 14:45:40 +05:30
struct irq_chip * chip = irq_desc_get_chip ( desc ) ;
2019-02-12 18:22:43 +05:30
void __iomem * claim = handler - > hart_base + CONTEXT_CLAIM ;
2018-07-26 16:27:00 +02:00
irq_hw_number_t hwirq ;
WARN_ON_ONCE ( ! handler - > present ) ;
2020-06-01 14:45:40 +05:30
chained_irq_enter ( chip , desc ) ;
2018-07-26 16:27:00 +02:00
while ( ( hwirq = readl ( claim ) ) ) {
2021-05-04 17:42:18 +01:00
int err = generic_handle_domain_irq ( handler - > priv - > irqdomain ,
hwirq ) ;
2024-02-22 15:09:50 +05:30
if ( unlikely ( err ) ) {
dev_warn_ratelimited ( handler - > priv - > dev ,
" can't find mapping for hwirq %lu \n " , hwirq ) ;
}
2018-07-26 16:27:00 +02:00
}
2020-06-01 14:45:40 +05:30
chained_irq_exit ( chip , desc ) ;
2018-07-26 16:27:00 +02:00
}
2020-03-02 15:11:45 -08:00
static void plic_set_threshold ( struct plic_handler * handler , u32 threshold )
{
/* priority must be > threshold to trigger an interrupt */
writel ( threshold , handler - > hart_base + CONTEXT_THRESHOLD ) ;
}
static int plic_dying_cpu ( unsigned int cpu )
{
2020-06-01 14:45:40 +05:30
if ( plic_parent_irq )
disable_percpu_irq ( plic_parent_irq ) ;
2020-03-02 15:11:45 -08:00
return 0 ;
}
static int plic_starting_cpu ( unsigned int cpu )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
2020-06-01 14:45:40 +05:30
if ( plic_parent_irq )
enable_percpu_irq ( plic_parent_irq ,
irq_get_trigger_type ( plic_parent_irq ) ) ;
else
2024-02-22 15:09:50 +05:30
dev_warn ( handler - > priv - > dev , " cpu%d: parent irq not available \n " , cpu ) ;
2020-03-02 15:11:45 -08:00
plic_set_threshold ( handler , PLIC_ENABLE_THRESHOLD ) ;
return 0 ;
}
2024-02-22 15:09:49 +05:30
static const struct of_device_id plic_match [ ] = {
{ . compatible = " sifive,plic-1.0.0 " } ,
{ . compatible = " riscv,plic0 " } ,
{ . compatible = " andestech,nceplic100 " ,
. data = ( const void * ) BIT ( PLIC_QUIRK_EDGE_INTERRUPT ) } ,
{ . compatible = " thead,c900-plic " ,
. data = ( const void * ) BIT ( PLIC_QUIRK_EDGE_INTERRUPT ) } ,
{ }
} ;
2024-02-22 15:09:54 +05:30
static int plic_parse_nr_irqs_and_contexts ( struct platform_device * pdev ,
u32 * nr_irqs , u32 * nr_contexts )
2018-07-26 16:27:00 +02:00
{
2024-02-22 15:09:54 +05:30
struct device * dev = & pdev - > dev ;
int rc ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:54 +05:30
/*
* Currently , only OF fwnode is supported so extend this
* function for ACPI support .
*/
if ( ! is_of_node ( dev - > fwnode ) )
return - EINVAL ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:54 +05:30
rc = of_property_read_u32 ( to_of_node ( dev - > fwnode ) , " riscv,ndev " , nr_irqs ) ;
if ( rc ) {
dev_err ( dev , " riscv,ndev property not available \n " ) ;
return rc ;
}
2022-06-30 05:02:39 -05:00
2024-02-22 15:09:54 +05:30
* nr_contexts = of_irq_count ( to_of_node ( dev - > fwnode ) ) ;
if ( WARN_ON ( ! ( * nr_contexts ) ) ) {
dev_err ( dev , " no PLIC context available \n " ) ;
return - EINVAL ;
2020-03-02 15:11:46 -08:00
}
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:54 +05:30
return 0 ;
}
2024-02-22 15:09:53 +05:30
static int plic_parse_context_parent ( struct platform_device * pdev , u32 context ,
u32 * parent_hwirq , int * parent_cpu )
{
struct device * dev = & pdev - > dev ;
struct of_phandle_args parent ;
unsigned long hartid ;
int rc ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:53 +05:30
/*
* Currently , only OF fwnode is supported so extend this
* function for ACPI support .
*/
if ( ! is_of_node ( dev - > fwnode ) )
return - EINVAL ;
2023-04-04 11:29:08 +08:00
2024-02-22 15:09:53 +05:30
rc = of_irq_parse_one ( to_of_node ( dev - > fwnode ) , context , & parent ) ;
if ( rc )
return rc ;
2023-04-04 11:29:08 +08:00
2024-02-22 15:09:53 +05:30
rc = riscv_of_parent_hartid ( parent . np , & hartid ) ;
if ( rc )
return rc ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:53 +05:30
* parent_hwirq = parent . args [ 0 ] ;
* parent_cpu = riscv_hartid_to_cpuid ( hartid ) ;
return 0 ;
}
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:49 +05:30
static int plic_probe ( struct platform_device * pdev )
2018-07-26 16:27:00 +02:00
{
2024-02-22 15:09:53 +05:30
int error = 0 , nr_contexts , nr_handlers = 0 , cpu , i ;
2024-02-22 15:09:49 +05:30
struct device * dev = & pdev - > dev ;
unsigned long plic_quirks = 0 ;
2020-05-18 14:44:40 +05:30
struct plic_handler * handler ;
2024-02-22 15:09:53 +05:30
u32 nr_irqs , parent_hwirq ;
2024-02-22 15:09:49 +05:30
struct plic_priv * priv ;
2024-02-22 15:09:53 +05:30
irq_hw_number_t hwirq ;
2024-02-22 15:09:49 +05:30
if ( is_of_node ( dev - > fwnode ) ) {
const struct of_device_id * id ;
id = of_match_node ( plic_match , to_of_node ( dev - > fwnode ) ) ;
if ( id )
plic_quirks = ( unsigned long ) id - > data ;
}
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:54 +05:30
error = plic_parse_nr_irqs_and_contexts ( pdev , & nr_irqs , & nr_contexts ) ;
if ( error )
return error ;
2024-02-22 15:09:51 +05:30
priv = devm_kzalloc ( dev , sizeof ( * priv ) , GFP_KERNEL ) ;
2020-03-02 15:11:46 -08:00
if ( ! priv )
return - ENOMEM ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:49 +05:30
priv - > dev = dev ;
2022-06-30 05:02:39 -05:00
priv - > plic_quirks = plic_quirks ;
2024-02-22 15:09:54 +05:30
priv - > nr_irqs = nr_irqs ;
2022-06-30 05:02:39 -05:00
2024-02-22 15:09:51 +05:30
priv - > regs = devm_platform_ioremap_resource ( pdev , 0 ) ;
if ( WARN_ON ( ! priv - > regs ) )
return - EIO ;
2018-07-26 16:27:00 +02:00
2024-02-22 15:09:51 +05:30
priv - > prio_save = devm_bitmap_zalloc ( dev , nr_irqs , GFP_KERNEL ) ;
2023-04-04 11:29:08 +08:00
if ( ! priv - > prio_save )
2024-02-22 15:09:51 +05:30
return - ENOMEM ;
2023-04-04 11:29:08 +08:00
2019-02-12 18:22:45 +05:30
for ( i = 0 ; i < nr_contexts ; i + + ) {
2024-02-22 15:09:53 +05:30
error = plic_parse_context_parent ( pdev , i , & parent_hwirq , & cpu ) ;
if ( error ) {
dev_warn ( dev , " hwirq for context%d not found \n " , i ) ;
2018-07-26 16:27:00 +02:00
continue ;
}
2019-10-28 13:10:32 +01:00
/*
* Skip contexts other than external interrupts for our
* privilege level .
*/
2024-02-22 15:09:53 +05:30
if ( parent_hwirq ! = RV_IRQ_EXT ) {
2022-03-02 13:15:53 +00:00
/* Disable S-mode enable bits if running in M-mode. */
if ( IS_ENABLED ( CONFIG_RISCV_M_MODE ) ) {
void __iomem * enable_base = priv - > regs +
CONTEXT_ENABLE_BASE +
i * CONTEXT_ENABLE_SIZE ;
for ( hwirq = 1 ; hwirq < = nr_irqs ; hwirq + + )
__plic_toggle ( enable_base , hwirq , 0 ) ;
}
2018-07-26 16:27:00 +02:00
continue ;
2022-03-02 13:15:53 +00:00
}
2018-07-26 16:27:00 +02:00
2019-02-12 03:10:11 -08:00
if ( cpu < 0 ) {
2024-02-22 15:09:50 +05:30
dev_warn ( dev , " Invalid cpuid for context %d \n " , i ) ;
2019-02-12 03:10:11 -08:00
continue ;
}
2019-09-03 11:32:20 +02:00
/*
* When running in M - mode we need to ignore the S - mode handler .
* Here we assume it always comes later , but that might be a
* little fragile .
*/
2018-07-26 16:27:00 +02:00
handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
2019-02-12 18:22:44 +05:30
if ( handler - > present ) {
2024-02-22 15:09:50 +05:30
dev_warn ( dev , " handler already present for context %d. \n " , i ) ;
2020-03-02 15:11:45 -08:00
plic_set_threshold ( handler , PLIC_DISABLE_THRESHOLD ) ;
2019-09-03 11:32:20 +02:00
goto done ;
2019-02-12 18:22:44 +05:30
}
2020-03-02 15:11:46 -08:00
cpumask_set_cpu ( cpu , & priv - > lmask ) ;
2018-07-26 16:27:00 +02:00
handler - > present = true ;
2022-03-02 13:15:52 +00:00
handler - > hart_base = priv - > regs + CONTEXT_BASE +
i * CONTEXT_SIZE ;
2019-02-12 18:22:43 +05:30
raw_spin_lock_init ( & handler - > enable_lock ) ;
2022-03-02 13:15:52 +00:00
handler - > enable_base = priv - > regs + CONTEXT_ENABLE_BASE +
i * CONTEXT_ENABLE_SIZE ;
2020-03-02 15:11:46 -08:00
handler - > priv = priv ;
2023-04-04 11:29:08 +08:00
2024-02-22 15:09:51 +05:30
handler - > enable_save = devm_kcalloc ( dev , DIV_ROUND_UP ( nr_irqs , 32 ) ,
sizeof ( * handler - > enable_save ) , GFP_KERNEL ) ;
2023-04-04 11:29:08 +08:00
if ( ! handler - > enable_save )
2024-02-22 15:09:53 +05:30
goto fail_cleanup_contexts ;
2019-09-03 11:32:20 +02:00
done :
2022-07-01 15:24:40 -05:00
for ( hwirq = 1 ; hwirq < = nr_irqs ; hwirq + + ) {
2019-02-12 18:22:43 +05:30
plic_toggle ( handler , hwirq , 0 ) ;
2022-07-01 15:24:40 -05:00
writel ( 1 , priv - > regs + PRIORITY_BASE +
hwirq * PRIORITY_PER_ID ) ;
}
2019-02-12 18:22:45 +05:30
nr_handlers + + ;
2018-07-26 16:27:00 +02:00
}
2024-02-22 15:09:51 +05:30
priv - > irqdomain = irq_domain_add_linear ( to_of_node ( dev - > fwnode ) , nr_irqs + 1 ,
& plic_irqdomain_ops , priv ) ;
if ( WARN_ON ( ! priv - > irqdomain ) )
2024-02-22 15:09:53 +05:30
goto fail_cleanup_contexts ;
2024-02-22 15:09:51 +05:30
2020-05-18 14:44:40 +05:30
/*
2024-05-29 14:54:56 -07:00
* We can have multiple PLIC instances so setup global state
2024-02-22 15:09:49 +05:30
* and register syscore operations only once after context
* handlers of all online CPUs are initialized .
2020-05-18 14:44:40 +05:30
*/
2024-05-29 14:54:56 -07:00
if ( ! plic_global_setup_done ) {
struct irq_domain * domain ;
bool global_setup = true ;
2024-02-22 15:09:49 +05:30
for_each_online_cpu ( cpu ) {
handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
if ( ! handler - > present ) {
2024-05-29 14:54:56 -07:00
global_setup = false ;
2024-02-22 15:09:49 +05:30
break ;
}
}
2024-05-29 14:54:56 -07:00
if ( global_setup ) {
/* Find parent domain and register chained handler */
domain = irq_find_matching_fwnode ( riscv_get_intc_hwnode ( ) , DOMAIN_BUS_ANY ) ;
if ( domain )
plic_parent_irq = irq_create_mapping ( domain , RV_IRQ_EXT ) ;
if ( plic_parent_irq )
irq_set_chained_handler ( plic_parent_irq , plic_handle_irq ) ;
2024-02-22 15:09:49 +05:30
cpuhp_setup_state ( CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING ,
" irqchip/sifive/plic:starting " ,
plic_starting_cpu , plic_dying_cpu ) ;
register_syscore_ops ( & plic_irq_syscore_ops ) ;
2024-05-29 14:54:56 -07:00
plic_global_setup_done = true ;
2024-02-22 15:09:49 +05:30
}
2020-05-18 14:44:40 +05:30
}
2024-02-22 15:09:50 +05:30
dev_info ( dev , " mapped %d interrupts with %d handlers for %d contexts. \n " ,
nr_irqs , nr_handlers , nr_contexts ) ;
2018-07-26 16:27:00 +02:00
return 0 ;
2024-02-22 15:09:53 +05:30
fail_cleanup_contexts :
for ( i = 0 ; i < nr_contexts ; i + + ) {
if ( plic_parse_context_parent ( pdev , i , & parent_hwirq , & cpu ) )
continue ;
if ( parent_hwirq ! = RV_IRQ_EXT | | cpu < 0 )
continue ;
2023-04-04 11:29:08 +08:00
handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
2024-02-22 15:09:53 +05:30
handler - > present = false ;
handler - > hart_base = NULL ;
handler - > enable_base = NULL ;
handler - > enable_save = NULL ;
handler - > priv = NULL ;
2023-04-04 11:29:08 +08:00
}
2024-02-22 15:09:53 +05:30
return - ENOMEM ;
2018-07-26 16:27:00 +02:00
}
2024-02-22 15:09:49 +05:30
static struct platform_driver plic_driver = {
. driver = {
. name = " riscv-plic " ,
. of_match_table = plic_match ,
} ,
. probe = plic_probe ,
} ;
builtin_platform_driver ( plic_driver ) ;