2005-04-16 15:20:36 -07:00
/*
* Copyright ( C ) 2001 Mike Corrigan IBM Corporation
2005-06-30 15:16:28 +10:00
*
2005-04-16 15:20:36 -07:00
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*/
# include <linux/stddef.h>
# include <linux/kernel.h>
# include <linux/sched.h>
2005-06-30 15:08:27 +10:00
# include <linux/bootmem.h>
2005-06-30 15:08:44 +10:00
# include <linux/seq_file.h>
# include <linux/proc_fs.h>
2005-09-30 16:16:52 +10:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <asm/system.h>
# include <asm/paca.h>
2005-11-02 14:13:34 +11:00
# include <asm/iseries/it_lp_queue.h>
2005-11-02 12:08:31 +11:00
# include <asm/iseries/hv_lp_event.h>
2005-11-02 11:11:11 +11:00
# include <asm/iseries/hv_call_event.h>
2005-11-02 13:51:41 +11:00
# include <asm/iseries/it_lp_naca.h>
2005-04-16 15:20:36 -07:00
2005-06-30 15:12:21 +10:00
/*
* The LpQueue is used to pass event data from the hypervisor to
* the partition . This is where I / O interrupt events are communicated .
*
* It is written to by the hypervisor so cannot end up in the BSS .
*/
2005-06-30 15:15:32 +10:00
struct hvlpevent_queue hvlpevent_queue __attribute__ ( ( __section__ ( " .data " ) ) ) ;
2005-06-30 15:12:21 +10:00
2005-06-30 15:16:09 +10:00
DEFINE_PER_CPU ( unsigned long [ HvLpEvent_Type_NumTypes ] , hvlpevent_counts ) ;
static char * event_types [ HvLpEvent_Type_NumTypes ] = {
2005-06-30 15:16:18 +10:00
" Hypervisor " ,
" Machine Facilities " ,
" Session Manager " ,
" SPD I/O " ,
" Virtual Bus " ,
" PCI I/O " ,
" RIO I/O " ,
" Virtual Lan " ,
" Virtual I/O "
2005-06-30 15:08:44 +10:00
} ;
2005-04-16 15:20:36 -07:00
/* Array of LpEvent handler functions */
2005-09-28 02:18:47 +10:00
static LpEventHandler lpEventHandler [ HvLpEvent_Type_NumTypes ] ;
static unsigned lpEventHandlerPaths [ HvLpEvent_Type_NumTypes ] ;
2005-04-16 15:20:36 -07:00
2005-06-30 15:15:42 +10:00
static struct HvLpEvent * get_next_hvlpevent ( void )
2005-04-16 15:20:36 -07:00
{
2005-06-30 15:16:48 +10:00
struct HvLpEvent * event ;
2006-06-28 11:49:10 +10:00
event = ( struct HvLpEvent * ) hvlpevent_queue . hq_current_event ;
2005-06-30 15:16:48 +10:00
2006-01-12 13:47:43 +11:00
if ( hvlpevent_is_valid ( event ) ) {
2005-04-16 15:20:36 -07:00
/* rmb() needed only for weakly consistent machines (regatta) */
rmb ( ) ;
/* Set pointer to next potential event */
2006-06-28 11:49:10 +10:00
hvlpevent_queue . hq_current_event + = ( ( event - > xSizeMinus1 +
IT_LP_EVENT_ALIGN ) / IT_LP_EVENT_ALIGN ) *
IT_LP_EVENT_ALIGN ;
2005-06-30 15:16:48 +10:00
2005-04-16 15:20:36 -07:00
/* Wrap to beginning if no room at end */
2006-06-28 11:49:10 +10:00
if ( hvlpevent_queue . hq_current_event >
hvlpevent_queue . hq_last_event ) {
hvlpevent_queue . hq_current_event =
hvlpevent_queue . hq_event_stack ;
2005-06-30 15:16:48 +10:00
}
} else {
event = NULL ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:16:48 +10:00
return event ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:07:33 +10:00
static unsigned long spread_lpevents = NR_CPUS ;
2005-06-30 15:07:09 +10:00
2005-06-30 15:15:42 +10:00
int hvlpevent_is_pending ( void )
2005-04-16 15:20:36 -07:00
{
2005-06-30 15:07:09 +10:00
struct HvLpEvent * next_event ;
if ( smp_processor_id ( ) > = spread_lpevents )
return 0 ;
2006-06-28 11:49:10 +10:00
next_event = ( struct HvLpEvent * ) hvlpevent_queue . hq_current_event ;
2005-06-30 15:16:48 +10:00
2006-01-12 13:47:43 +11:00
return hvlpevent_is_valid ( next_event ) | |
2006-06-28 11:49:10 +10:00
hvlpevent_queue . hq_overflow_pending ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:16:28 +10:00
static void hvlpevent_clear_valid ( struct HvLpEvent * event )
2005-04-16 15:20:36 -07:00
{
2005-06-30 15:16:48 +10:00
/* Tell the Hypervisor that we're done with this event.
* Also clear bits within this event that might look like valid bits .
* ie . on 64 - byte boundaries .
2005-06-30 15:16:28 +10:00
*/
2005-06-30 15:16:48 +10:00
struct HvLpEvent * tmp ;
2006-06-28 11:49:10 +10:00
unsigned extra = ( ( event - > xSizeMinus1 + IT_LP_EVENT_ALIGN ) /
IT_LP_EVENT_ALIGN ) - 1 ;
2005-06-30 15:16:48 +10:00
2005-06-30 15:16:28 +10:00
switch ( extra ) {
case 3 :
2006-06-28 11:49:10 +10:00
tmp = ( struct HvLpEvent * ) ( ( char * ) event + 3 * IT_LP_EVENT_ALIGN ) ;
2006-01-12 13:47:43 +11:00
hvlpevent_invalidate ( tmp ) ;
2005-06-30 15:16:28 +10:00
case 2 :
2006-06-28 11:49:10 +10:00
tmp = ( struct HvLpEvent * ) ( ( char * ) event + 2 * IT_LP_EVENT_ALIGN ) ;
2006-01-12 13:47:43 +11:00
hvlpevent_invalidate ( tmp ) ;
2005-06-30 15:16:28 +10:00
case 1 :
2006-06-28 11:49:10 +10:00
tmp = ( struct HvLpEvent * ) ( ( char * ) event + 1 * IT_LP_EVENT_ALIGN ) ;
2006-01-12 13:47:43 +11:00
hvlpevent_invalidate ( tmp ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:16:48 +10:00
2005-04-16 15:20:36 -07:00
mb ( ) ;
2005-06-30 15:16:48 +10:00
2006-01-12 13:47:43 +11:00
hvlpevent_invalidate ( event ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:15:53 +10:00
void process_hvlpevents ( struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
2005-06-30 15:16:48 +10:00
struct HvLpEvent * event ;
2005-04-16 15:20:36 -07:00
/* If we have recursed, just return */
2006-06-28 11:49:10 +10:00
if ( ! spin_trylock ( & hvlpevent_queue . hq_lock ) )
2005-06-30 15:15:53 +10:00
return ;
2005-06-30 15:16:28 +10:00
2005-04-16 15:20:36 -07:00
for ( ; ; ) {
2005-06-30 15:16:48 +10:00
event = get_next_hvlpevent ( ) ;
if ( event ) {
2005-06-30 15:16:28 +10:00
/* Call appropriate handler here, passing
2005-04-16 15:20:36 -07:00
* a pointer to the LpEvent . The handler
* must make a copy of the LpEvent if it
* needs it in a bottom half . ( perhaps for
* an ACK )
2005-06-30 15:16:28 +10:00
*
* Handlers are responsible for ACK processing
2005-04-16 15:20:36 -07:00
*
* The Hypervisor guarantees that LpEvents will
* only be delivered with types that we have
* registered for , so no type check is necessary
* here !
2005-06-30 15:16:28 +10:00
*/
2005-06-30 15:16:48 +10:00
if ( event - > xType < HvLpEvent_Type_NumTypes )
__get_cpu_var ( hvlpevent_counts ) [ event - > xType ] + + ;
if ( event - > xType < HvLpEvent_Type_NumTypes & &
lpEventHandler [ event - > xType ] )
lpEventHandler [ event - > xType ] ( event , regs ) ;
2005-04-16 15:20:36 -07:00
else
2005-06-30 15:16:48 +10:00
printk ( KERN_INFO " Unexpected Lp Event type=%d \n " , event - > xType ) ;
2005-06-30 15:16:28 +10:00
2005-06-30 15:16:48 +10:00
hvlpevent_clear_valid ( event ) ;
2006-06-28 11:49:10 +10:00
} else if ( hvlpevent_queue . hq_overflow_pending )
2005-04-16 15:20:36 -07:00
/*
* No more valid events . If overflow events are
* pending process them
*/
2006-06-28 11:49:10 +10:00
HvCallEvent_getOverflowLpEvents ( hvlpevent_queue . hq_index ) ;
2005-04-16 15:20:36 -07:00
else
break ;
}
2006-06-28 11:49:10 +10:00
spin_unlock ( & hvlpevent_queue . hq_lock ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-30 15:07:33 +10:00
static int set_spread_lpevents ( char * str )
{
unsigned long val = simple_strtoul ( str , NULL , 0 ) ;
/*
* The parameter is the number of processors to share in processing
* lp events .
*/
if ( ( val > 0 ) & & ( val < = NR_CPUS ) ) {
spread_lpevents = val ;
printk ( " lpevent processing spread over %ld processors \n " , val ) ;
} else {
printk ( " invalid spread_lpevents %ld \n " , val ) ;
}
return 1 ;
}
__setup ( " spread_lpevents= " , set_spread_lpevents ) ;
2005-06-30 15:08:27 +10:00
void setup_hvlpevent_queue ( void )
{
void * eventStack ;
2006-06-28 11:49:10 +10:00
spin_lock_init ( & hvlpevent_queue . hq_lock ) ;
2006-02-20 19:07:31 +11:00
2005-10-24 15:21:52 +10:00
/* Allocate a page for the Event Stack. */
2006-06-28 11:49:10 +10:00
eventStack = alloc_bootmem_pages ( IT_LP_EVENT_STACK_SIZE ) ;
memset ( eventStack , 0 , IT_LP_EVENT_STACK_SIZE ) ;
2005-06-30 15:08:27 +10:00
/* Invoke the hypervisor to initialize the event stack */
2006-06-28 11:49:10 +10:00
HvCallEvent_setLpEventStack ( 0 , eventStack , IT_LP_EVENT_STACK_SIZE ) ;
2005-06-30 15:08:27 +10:00
2006-06-28 11:49:10 +10:00
hvlpevent_queue . hq_event_stack = eventStack ;
hvlpevent_queue . hq_current_event = eventStack ;
hvlpevent_queue . hq_last_event = ( char * ) eventStack +
( IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE ) ;
hvlpevent_queue . hq_index = 0 ;
2005-06-30 15:08:27 +10:00
}
2005-06-30 15:08:44 +10:00
2005-09-28 02:18:47 +10:00
/* Register a handler for an LpEvent type */
int HvLpEvent_registerHandler ( HvLpEvent_Type eventType , LpEventHandler handler )
{
if ( eventType < HvLpEvent_Type_NumTypes ) {
lpEventHandler [ eventType ] = handler ;
return 0 ;
}
return 1 ;
}
EXPORT_SYMBOL ( HvLpEvent_registerHandler ) ;
int HvLpEvent_unregisterHandler ( HvLpEvent_Type eventType )
{
might_sleep ( ) ;
if ( eventType < HvLpEvent_Type_NumTypes ) {
if ( ! lpEventHandlerPaths [ eventType ] ) {
lpEventHandler [ eventType ] = NULL ;
/*
* We now sleep until all other CPUs have scheduled .
* This ensures that the deletion is seen by all
* other CPUs , and that the deleted handler isn ' t
* still running on another CPU when we return .
*/
synchronize_rcu ( ) ;
return 0 ;
}
}
return 1 ;
}
EXPORT_SYMBOL ( HvLpEvent_unregisterHandler ) ;
/*
* lpIndex is the partition index of the target partition .
* needed only for VirtualIo , VirtualLan and SessionMgr . Zero
* indicates to use our partition index - for the other types .
*/
int HvLpEvent_openPath ( HvLpEvent_Type eventType , HvLpIndex lpIndex )
{
if ( ( eventType < HvLpEvent_Type_NumTypes ) & &
lpEventHandler [ eventType ] ) {
if ( lpIndex = = 0 )
lpIndex = itLpNaca . xLpIndex ;
HvCallEvent_openLpEventPath ( lpIndex , eventType ) ;
+ + lpEventHandlerPaths [ eventType ] ;
return 0 ;
}
return 1 ;
}
int HvLpEvent_closePath ( HvLpEvent_Type eventType , HvLpIndex lpIndex )
{
if ( ( eventType < HvLpEvent_Type_NumTypes ) & &
lpEventHandler [ eventType ] & &
lpEventHandlerPaths [ eventType ] ) {
if ( lpIndex = = 0 )
lpIndex = itLpNaca . xLpIndex ;
HvCallEvent_closeLpEventPath ( lpIndex , eventType ) ;
- - lpEventHandlerPaths [ eventType ] ;
return 0 ;
}
return 1 ;
}
2005-06-30 15:08:44 +10:00
static int proc_lpevents_show ( struct seq_file * m , void * v )
{
2005-06-30 15:16:09 +10:00
int cpu , i ;
unsigned long sum ;
static unsigned long cpu_totals [ NR_CPUS ] ;
/* FIXME: do we care that there's no locking here? */
sum = 0 ;
for_each_online_cpu ( cpu ) {
cpu_totals [ cpu ] = 0 ;
for ( i = 0 ; i < HvLpEvent_Type_NumTypes ; i + + ) {
cpu_totals [ cpu ] + = per_cpu ( hvlpevent_counts , cpu ) [ i ] ;
}
sum + = cpu_totals [ cpu ] ;
}
2005-06-30 15:08:44 +10:00
seq_printf ( m , " LpEventQueue 0 \n " ) ;
2005-06-30 15:16:09 +10:00
seq_printf ( m , " events processed: \t %lu \n " , sum ) ;
2005-06-30 15:08:44 +10:00
2005-06-30 15:16:09 +10:00
for ( i = 0 ; i < HvLpEvent_Type_NumTypes ; + + i ) {
sum = 0 ;
for_each_online_cpu ( cpu ) {
sum + = per_cpu ( hvlpevent_counts , cpu ) [ i ] ;
}
2005-06-30 15:16:18 +10:00
seq_printf ( m , " %-20s %10lu \n " , event_types [ i ] , sum ) ;
2005-06-30 15:16:09 +10:00
}
2005-06-30 15:08:44 +10:00
seq_printf ( m , " \n events processed by processor: \n " ) ;
2005-06-30 15:16:09 +10:00
for_each_online_cpu ( cpu ) {
seq_printf ( m , " CPU%02d %10lu \n " , cpu , cpu_totals [ cpu ] ) ;
}
2005-06-30 15:08:44 +10:00
return 0 ;
}
static int proc_lpevents_open ( struct inode * inode , struct file * file )
{
return single_open ( file , proc_lpevents_show , NULL ) ;
}
static struct file_operations proc_lpevents_operations = {
. open = proc_lpevents_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static int __init proc_lpevents_init ( void )
{
struct proc_dir_entry * e ;
e = create_proc_entry ( " iSeries/lpevents " , S_IFREG | S_IRUGO , NULL ) ;
if ( e )
e - > proc_fops = & proc_lpevents_operations ;
return 0 ;
}
__initcall ( proc_lpevents_init ) ;