2019-05-27 09:55:01 +03:00
/* SPDX-License-Identifier: GPL-2.0-or-later */
2017-04-05 10:54:50 +03:00
/*
* Copyright 2016 , 2017 IBM Corporation .
*/
# ifndef __XIVE_INTERNAL_H
# define __XIVE_INTERNAL_H
2020-03-06 18:01:40 +03:00
/*
* A " disabled " interrupt should never fire , to catch problems
* we set its logical number to this
*/
# define XIVE_BAD_IRQ 0x7fffffff
# define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
2017-04-05 10:54:50 +03:00
/* Each CPU carry one of these with various per-CPU state */
struct xive_cpu {
# ifdef CONFIG_SMP
/* HW irq number and data of IPI */
u32 hw_ipi ;
struct xive_irq_data ipi_data ;
# endif /* CONFIG_SMP */
int chip_id ;
/* Queue datas. Only one is populated */
# define XIVE_MAX_QUEUES 8
struct xive_q queue [ XIVE_MAX_QUEUES ] ;
/*
* Pending mask . Each bit corresponds to a priority that
* potentially has pending interrupts .
*/
u8 pending_prio ;
/* Cache of HW CPPR */
u8 cppr ;
} ;
/* Backend ops */
struct xive_ops {
int ( * populate_irq_data ) ( u32 hw_irq , struct xive_irq_data * data ) ;
int ( * configure_irq ) ( u32 hw_irq , u32 target , u8 prio , u32 sw_irq ) ;
2019-08-14 18:47:53 +03:00
int ( * get_irq_config ) ( u32 hw_irq , u32 * target , u8 * prio ,
u32 * sw_irq ) ;
2017-04-05 10:54:50 +03:00
int ( * setup_queue ) ( unsigned int cpu , struct xive_cpu * xc , u8 prio ) ;
void ( * cleanup_queue ) ( unsigned int cpu , struct xive_cpu * xc , u8 prio ) ;
void ( * setup_cpu ) ( unsigned int cpu , struct xive_cpu * xc ) ;
void ( * teardown_cpu ) ( unsigned int cpu , struct xive_cpu * xc ) ;
bool ( * match ) ( struct device_node * np ) ;
void ( * shutdown ) ( void ) ;
void ( * update_pending ) ( struct xive_cpu * xc ) ;
void ( * eoi ) ( u32 hw_irq ) ;
void ( * sync_source ) ( u32 hw_irq ) ;
2017-08-30 22:46:15 +03:00
u64 ( * esb_rw ) ( u32 hw_irq , u32 offset , u64 data , bool write ) ;
2017-04-05 10:54:50 +03:00
# ifdef CONFIG_SMP
int ( * get_ipi ) ( unsigned int cpu , struct xive_cpu * xc ) ;
void ( * put_ipi ) ( unsigned int cpu , struct xive_cpu * xc ) ;
# endif
2020-03-06 18:01:43 +03:00
int ( * debug_show ) ( struct seq_file * m , void * private ) ;
2017-04-05 10:54:50 +03:00
const char * name ;
} ;
bool xive_core_init ( const struct xive_ops * ops , void __iomem * area , u32 offset ,
u8 max_prio ) ;
2017-08-30 22:46:10 +03:00
__be32 * xive_queue_page_alloc ( unsigned int cpu , u32 queue_shift ) ;
2020-03-06 18:01:43 +03:00
int xive_core_debug_init ( void ) ;
2017-08-30 22:46:10 +03:00
static inline u32 xive_alloc_order ( u32 queue_shift )
{
return ( queue_shift > PAGE_SHIFT ) ? ( queue_shift - PAGE_SHIFT ) : 0 ;
}
2017-04-05 10:54:50 +03:00
extern bool xive_cmdline_disabled ;
# endif /* __XIVE_INTERNAL_H */