2018-07-26 17:27:00 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2017 SiFive
* Copyright ( C ) 2018 Christoph Hellwig
*/
# define pr_fmt(fmt) "plic: " fmt
2020-03-03 02:11:45 +03:00
# include <linux/cpu.h>
2018-07-26 17:27:00 +03:00
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/irq.h>
# include <linux/irqchip.h>
# include <linux/irqdomain.h>
# include <linux/module.h>
# include <linux/of.h>
# include <linux/of_address.h>
# include <linux/of_irq.h>
# include <linux/platform_device.h>
# include <linux/spinlock.h>
2018-10-02 22:15:05 +03:00
# include <asm/smp.h>
2018-07-26 17:27:00 +03:00
/*
* This driver implements a version of the RISC - V PLIC with the actual layout
* specified in chapter 8 of the SiFive U5 Coreplex Series Manual :
*
* https : //static.dev.sifive.com/U54-MC-RVCoreIP.pdf
*
* The largest number supported by devices marked as ' sifive , plic - 1.0 .0 ' , is
* 1024 , of which device 0 is defined as non - existent by the RISC - V Privileged
* Spec .
*/
# define MAX_DEVICES 1024
# define MAX_CONTEXTS 15872
/*
* Each interrupt source has a priority register associated with it .
* We always hardwire it to one in Linux .
*/
# define PRIORITY_BASE 0
# define PRIORITY_PER_ID 4
/*
* Each hart context has a vector of interrupt enable bits associated with it .
* There ' s one bit for each interrupt source .
*/
# define ENABLE_BASE 0x2000
# define ENABLE_PER_HART 0x80
/*
* Each hart context has a set of control registers associated with it . Right
* now there ' s only two : a source priority threshold over which the hart will
* take an interrupt , and a register to claim interrupts .
*/
# define CONTEXT_BASE 0x200000
# define CONTEXT_PER_HART 0x1000
# define CONTEXT_THRESHOLD 0x00
# define CONTEXT_CLAIM 0x04
2020-04-03 04:46:09 +03:00
# define PLIC_DISABLE_THRESHOLD 0x7
2020-03-03 02:11:45 +03:00
# define PLIC_ENABLE_THRESHOLD 0
2020-03-03 02:11:46 +03:00
struct plic_priv {
struct cpumask lmask ;
struct irq_domain * irqdomain ;
void __iomem * regs ;
} ;
2018-07-26 17:27:00 +03:00
struct plic_handler {
bool present ;
2019-02-12 15:52:43 +03:00
void __iomem * hart_base ;
/*
* Protect mask operations on the registers given that we can ' t
* assume atomic memory operations work on them .
*/
raw_spinlock_t enable_lock ;
void __iomem * enable_base ;
2020-03-03 02:11:46 +03:00
struct plic_priv * priv ;
2018-07-26 17:27:00 +03:00
} ;
static DEFINE_PER_CPU ( struct plic_handler , plic_handlers ) ;
2019-02-12 15:52:43 +03:00
static inline void plic_toggle ( struct plic_handler * handler ,
int hwirq , int enable )
2018-07-26 17:27:00 +03:00
{
2019-02-12 15:52:43 +03:00
u32 __iomem * reg = handler - > enable_base + ( hwirq / 32 ) * sizeof ( u32 ) ;
2018-07-26 17:27:00 +03:00
u32 hwirq_mask = 1 < < ( hwirq % 32 ) ;
2019-02-12 15:52:43 +03:00
raw_spin_lock ( & handler - > enable_lock ) ;
2018-07-26 17:27:00 +03:00
if ( enable )
writel ( readl ( reg ) | hwirq_mask , reg ) ;
else
writel ( readl ( reg ) & ~ hwirq_mask , reg ) ;
2019-02-12 15:52:43 +03:00
raw_spin_unlock ( & handler - > enable_lock ) ;
2018-07-26 17:27:00 +03:00
}
2019-02-12 15:52:46 +03:00
static inline void plic_irq_toggle ( const struct cpumask * mask ,
2020-03-03 02:11:46 +03:00
struct irq_data * d , int enable )
2018-07-26 17:27:00 +03:00
{
int cpu ;
2020-03-03 02:11:46 +03:00
struct plic_priv * priv = irq_get_chip_data ( d - > irq ) ;
2018-07-26 17:27:00 +03:00
2020-03-03 02:11:46 +03:00
writel ( enable , priv - > regs + PRIORITY_BASE + d - > hwirq * PRIORITY_PER_ID ) ;
2019-02-12 15:52:46 +03:00
for_each_cpu ( cpu , mask ) {
2018-07-26 17:27:00 +03:00
struct plic_handler * handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
2020-03-03 02:11:46 +03:00
if ( handler - > present & &
cpumask_test_cpu ( cpu , & handler - > priv - > lmask ) )
plic_toggle ( handler , d - > hwirq , enable ) ;
2018-07-26 17:27:00 +03:00
}
}
2019-09-15 17:17:45 +03:00
static void plic_irq_unmask ( struct irq_data * d )
2018-07-26 17:27:00 +03:00
{
2020-03-03 02:11:46 +03:00
struct cpumask amask ;
unsigned int cpu ;
struct plic_priv * priv = irq_get_chip_data ( d - > irq ) ;
cpumask_and ( & amask , & priv - > lmask , cpu_online_mask ) ;
cpu = cpumask_any_and ( irq_data_get_affinity_mask ( d ) ,
& amask ) ;
2019-02-12 15:52:46 +03:00
if ( WARN_ON_ONCE ( cpu > = nr_cpu_ids ) )
return ;
2020-03-03 02:11:46 +03:00
plic_irq_toggle ( cpumask_of ( cpu ) , d , 1 ) ;
2018-07-26 17:27:00 +03:00
}
2019-09-15 17:17:45 +03:00
static void plic_irq_mask ( struct irq_data * d )
2018-07-26 17:27:00 +03:00
{
2020-03-03 02:11:46 +03:00
struct plic_priv * priv = irq_get_chip_data ( d - > irq ) ;
plic_irq_toggle ( & priv - > lmask , d , 0 ) ;
2018-07-26 17:27:00 +03:00
}
2019-02-12 15:52:46 +03:00
# ifdef CONFIG_SMP
static int plic_set_affinity ( struct irq_data * d ,
const struct cpumask * mask_val , bool force )
{
unsigned int cpu ;
2020-03-03 02:11:46 +03:00
struct cpumask amask ;
struct plic_priv * priv = irq_get_chip_data ( d - > irq ) ;
cpumask_and ( & amask , & priv - > lmask , mask_val ) ;
2019-02-12 15:52:46 +03:00
if ( force )
2020-03-03 02:11:46 +03:00
cpu = cpumask_first ( & amask ) ;
2019-02-12 15:52:46 +03:00
else
2020-03-03 02:11:46 +03:00
cpu = cpumask_any_and ( & amask , cpu_online_mask ) ;
2019-02-12 15:52:46 +03:00
if ( cpu > = nr_cpu_ids )
return - EINVAL ;
2020-03-03 02:11:46 +03:00
plic_irq_toggle ( & priv - > lmask , d , 0 ) ;
plic_irq_toggle ( cpumask_of ( cpu ) , d , 1 ) ;
2019-02-12 15:52:46 +03:00
irq_data_update_effective_affinity ( d , cpumask_of ( cpu ) ) ;
return IRQ_SET_MASK_OK_DONE ;
}
# endif
2019-09-15 17:17:45 +03:00
static void plic_irq_eoi ( struct irq_data * d )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
writel ( d - > hwirq , handler - > hart_base + CONTEXT_CLAIM ) ;
}
2018-07-26 17:27:00 +03:00
static struct irq_chip plic_chip = {
. name = " SiFive PLIC " ,
2019-09-15 17:17:45 +03:00
. irq_mask = plic_irq_mask ,
. irq_unmask = plic_irq_unmask ,
. irq_eoi = plic_irq_eoi ,
2019-02-12 15:52:46 +03:00
# ifdef CONFIG_SMP
. irq_set_affinity = plic_set_affinity ,
# endif
2018-07-26 17:27:00 +03:00
} ;
static int plic_irqdomain_map ( struct irq_domain * d , unsigned int irq ,
irq_hw_number_t hwirq )
{
2019-12-10 14:11:11 +03:00
irq_domain_set_info ( d , irq , hwirq , & plic_chip , d - > host_data ,
handle_fasteoi_irq , NULL , NULL ) ;
2018-07-26 17:27:00 +03:00
irq_set_noprobe ( irq ) ;
return 0 ;
}
2019-12-10 14:11:11 +03:00
static int plic_irq_domain_alloc ( struct irq_domain * domain , unsigned int virq ,
unsigned int nr_irqs , void * arg )
{
int i , ret ;
irq_hw_number_t hwirq ;
unsigned int type ;
struct irq_fwspec * fwspec = arg ;
ret = irq_domain_translate_onecell ( domain , fwspec , & hwirq , & type ) ;
if ( ret )
return ret ;
for ( i = 0 ; i < nr_irqs ; i + + ) {
ret = plic_irqdomain_map ( domain , virq + i , hwirq + i ) ;
if ( ret )
return ret ;
}
return 0 ;
}
2018-07-26 17:27:00 +03:00
static const struct irq_domain_ops plic_irqdomain_ops = {
2019-12-10 14:11:11 +03:00
. translate = irq_domain_translate_onecell ,
. alloc = plic_irq_domain_alloc ,
. free = irq_domain_free_irqs_top ,
2018-07-26 17:27:00 +03:00
} ;
/*
* Handling an interrupt is a two - step process : first you claim the interrupt
* by reading the claim register , then you complete the interrupt by writing
* that source ID back to the same claim register . This automatically enables
* and disables the interrupt , so there ' s nothing else to do .
*/
static void plic_handle_irq ( struct pt_regs * regs )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
2019-02-12 15:52:43 +03:00
void __iomem * claim = handler - > hart_base + CONTEXT_CLAIM ;
2018-07-26 17:27:00 +03:00
irq_hw_number_t hwirq ;
WARN_ON_ONCE ( ! handler - > present ) ;
2019-10-28 15:10:32 +03:00
csr_clear ( CSR_IE , IE_EIE ) ;
2018-07-26 17:27:00 +03:00
while ( ( hwirq = readl ( claim ) ) ) {
2020-03-03 02:11:46 +03:00
int irq = irq_find_mapping ( handler - > priv - > irqdomain , hwirq ) ;
2018-07-26 17:27:00 +03:00
if ( unlikely ( irq < = 0 ) )
pr_warn_ratelimited ( " can't find mapping for hwirq %lu \n " ,
hwirq ) ;
else
generic_handle_irq ( irq ) ;
}
2019-10-28 15:10:32 +03:00
csr_set ( CSR_IE , IE_EIE ) ;
2018-07-26 17:27:00 +03:00
}
/*
* Walk up the DT tree until we find an active RISC - V core ( HART ) node and
* extract the cpuid from it .
*/
static int plic_find_hart_id ( struct device_node * node )
{
for ( ; node ; node = node - > parent ) {
if ( of_device_is_compatible ( node , " riscv " ) )
2018-10-02 22:15:00 +03:00
return riscv_of_processor_hartid ( node ) ;
2018-07-26 17:27:00 +03:00
}
return - 1 ;
}
2020-03-03 02:11:45 +03:00
static void plic_set_threshold ( struct plic_handler * handler , u32 threshold )
{
/* priority must be > threshold to trigger an interrupt */
writel ( threshold , handler - > hart_base + CONTEXT_THRESHOLD ) ;
}
static int plic_dying_cpu ( unsigned int cpu )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
csr_clear ( CSR_IE , IE_EIE ) ;
plic_set_threshold ( handler , PLIC_DISABLE_THRESHOLD ) ;
return 0 ;
}
static int plic_starting_cpu ( unsigned int cpu )
{
struct plic_handler * handler = this_cpu_ptr ( & plic_handlers ) ;
csr_set ( CSR_IE , IE_EIE ) ;
plic_set_threshold ( handler , PLIC_ENABLE_THRESHOLD ) ;
return 0 ;
}
2018-07-26 17:27:00 +03:00
static int __init plic_init ( struct device_node * node ,
struct device_node * parent )
{
2019-02-12 15:52:45 +03:00
int error = 0 , nr_contexts , nr_handlers = 0 , i ;
2018-07-26 17:27:00 +03:00
u32 nr_irqs ;
2020-03-03 02:11:46 +03:00
struct plic_priv * priv ;
2018-07-26 17:27:00 +03:00
2020-03-03 02:11:46 +03:00
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
2018-07-26 17:27:00 +03:00
2020-03-03 02:11:46 +03:00
priv - > regs = of_iomap ( node , 0 ) ;
if ( WARN_ON ( ! priv - > regs ) ) {
error = - EIO ;
goto out_free_priv ;
}
2018-07-26 17:27:00 +03:00
error = - EINVAL ;
of_property_read_u32 ( node , " riscv,ndev " , & nr_irqs ) ;
if ( WARN_ON ( ! nr_irqs ) )
goto out_iounmap ;
2019-02-12 15:52:45 +03:00
nr_contexts = of_irq_count ( node ) ;
if ( WARN_ON ( ! nr_contexts ) )
2018-07-26 17:27:00 +03:00
goto out_iounmap ;
2019-02-12 15:52:45 +03:00
if ( WARN_ON ( nr_contexts < num_possible_cpus ( ) ) )
2018-07-26 17:27:00 +03:00
goto out_iounmap ;
error = - ENOMEM ;
2020-03-03 02:11:46 +03:00
priv - > irqdomain = irq_domain_add_linear ( node , nr_irqs + 1 ,
& plic_irqdomain_ops , priv ) ;
if ( WARN_ON ( ! priv - > irqdomain ) )
2018-07-26 17:27:00 +03:00
goto out_iounmap ;
2019-02-12 15:52:45 +03:00
for ( i = 0 ; i < nr_contexts ; i + + ) {
2018-07-26 17:27:00 +03:00
struct of_phandle_args parent ;
struct plic_handler * handler ;
irq_hw_number_t hwirq ;
2018-10-02 22:15:05 +03:00
int cpu , hartid ;
2018-07-26 17:27:00 +03:00
if ( of_irq_parse_one ( node , i , & parent ) ) {
pr_err ( " failed to parse parent for context %d. \n " , i ) ;
continue ;
}
2019-10-28 15:10:32 +03:00
/*
* Skip contexts other than external interrupts for our
* privilege level .
*/
2019-12-20 14:09:49 +03:00
if ( parent . args [ 0 ] ! = RV_IRQ_EXT )
2018-07-26 17:27:00 +03:00
continue ;
2018-10-02 22:15:05 +03:00
hartid = plic_find_hart_id ( parent . np ) ;
if ( hartid < 0 ) {
2018-07-26 17:27:00 +03:00
pr_warn ( " failed to parse hart ID for context %d. \n " , i ) ;
continue ;
}
2018-10-02 22:15:05 +03:00
cpu = riscv_hartid_to_cpuid ( hartid ) ;
2019-02-12 14:10:11 +03:00
if ( cpu < 0 ) {
pr_warn ( " Invalid cpuid for context %d \n " , i ) ;
continue ;
}
2019-09-03 12:32:20 +03:00
/*
* When running in M - mode we need to ignore the S - mode handler .
* Here we assume it always comes later , but that might be a
* little fragile .
*/
2018-07-26 17:27:00 +03:00
handler = per_cpu_ptr ( & plic_handlers , cpu ) ;
2019-02-12 15:52:44 +03:00
if ( handler - > present ) {
pr_warn ( " handler already present for context %d. \n " , i ) ;
2020-03-03 02:11:45 +03:00
plic_set_threshold ( handler , PLIC_DISABLE_THRESHOLD ) ;
2019-09-03 12:32:20 +03:00
goto done ;
2019-02-12 15:52:44 +03:00
}
2020-03-03 02:11:46 +03:00
cpumask_set_cpu ( cpu , & priv - > lmask ) ;
2018-07-26 17:27:00 +03:00
handler - > present = true ;
2019-02-12 15:52:43 +03:00
handler - > hart_base =
2020-03-03 02:11:46 +03:00
priv - > regs + CONTEXT_BASE + i * CONTEXT_PER_HART ;
2019-02-12 15:52:43 +03:00
raw_spin_lock_init ( & handler - > enable_lock ) ;
handler - > enable_base =
2020-03-03 02:11:46 +03:00
priv - > regs + ENABLE_BASE + i * ENABLE_PER_HART ;
handler - > priv = priv ;
2019-09-03 12:32:20 +03:00
done :
2018-07-26 17:27:00 +03:00
for ( hwirq = 1 ; hwirq < = nr_irqs ; hwirq + + )
2019-02-12 15:52:43 +03:00
plic_toggle ( handler , hwirq , 0 ) ;
2019-02-12 15:52:45 +03:00
nr_handlers + + ;
2018-07-26 17:27:00 +03:00
}
2020-03-03 02:11:45 +03:00
cpuhp_setup_state ( CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING ,
" irqchip/sifive/plic:starting " ,
plic_starting_cpu , plic_dying_cpu ) ;
2019-02-12 15:52:45 +03:00
pr_info ( " mapped %d interrupts with %d handlers for %d contexts. \n " ,
nr_irqs , nr_handlers , nr_contexts ) ;
2018-07-26 17:27:00 +03:00
set_handle_irq ( plic_handle_irq ) ;
return 0 ;
out_iounmap :
2020-03-03 02:11:46 +03:00
iounmap ( priv - > regs ) ;
out_free_priv :
kfree ( priv ) ;
2018-07-26 17:27:00 +03:00
return error ;
}
IRQCHIP_DECLARE ( sifive_plic , " sifive,plic-1.0.0 " , plic_init ) ;
IRQCHIP_DECLARE ( riscv_plic0 , " riscv,plic0 " , plic_init ) ; /* for legacy systems */