2010-02-05 21:47:04 -05:00
/*
* HW NMI watchdog support
*
* started by Don Zickus , Copyright ( C ) 2010 Red Hat , Inc .
*
* Arch specific calls to support NMI watchdog
*
* Bits copied from original nmi . c file
*
*/
2010-05-13 09:12:39 +02:00
# include <asm/apic.h>
2010-02-05 21:47:04 -05:00
# include <linux/cpumask.h>
2010-05-07 17:11:48 -04:00
# include <linux/kdebug.h>
# include <linux/notifier.h>
# include <linux/kprobes.h>
2010-02-05 21:47:04 -05:00
# include <linux/nmi.h>
# include <linux/module.h>
2011-03-25 15:20:14 +01:00
# include <linux/delay.h>
2010-02-05 21:47:04 -05:00
2010-11-12 11:22:23 -05:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2011-05-22 22:10:23 -07:00
u64 hw_nmi_get_sample_period ( int watchdog_thresh )
2010-02-12 17:19:19 -05:00
{
2011-05-22 22:10:23 -07:00
return ( u64 ) ( cpu_khz ) * 1000 * watchdog_thresh ;
2010-02-12 17:19:19 -05:00
}
2010-11-12 11:22:23 -05:00
# endif
2010-02-12 17:19:19 -05:00
2010-12-09 14:47:34 +06:00
# ifdef arch_trigger_all_cpu_backtrace
2010-11-12 09:50:54 -05:00
/* For reliability, we're prepared to waste bits here. */
static DECLARE_BITMAP ( backtrace_mask , NR_CPUS ) __read_mostly ;
2011-01-04 22:38:08 -05:00
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
static unsigned long backtrace_flag ;
2010-02-05 21:47:04 -05:00
void arch_trigger_all_cpu_backtrace ( void )
{
int i ;
2011-01-04 22:38:08 -05:00
if ( test_and_set_bit ( 0 , & backtrace_flag ) )
/*
* If there is already a trigger_all_cpu_backtrace ( ) in progress
* ( backtrace_flag = = 1 ) , don ' t output double cpu dump infos .
*/
return ;
2010-02-05 21:47:04 -05:00
cpumask_copy ( to_cpumask ( backtrace_mask ) , cpu_online_mask ) ;
printk ( KERN_INFO " sending NMI to all CPUs: \n " ) ;
apic - > send_IPI_all ( NMI_VECTOR ) ;
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
for ( i = 0 ; i < 10 * 1000 ; i + + ) {
if ( cpumask_empty ( to_cpumask ( backtrace_mask ) ) )
break ;
mdelay ( 1 ) ;
}
2011-01-04 22:38:08 -05:00
clear_bit ( 0 , & backtrace_flag ) ;
smp_mb__after_clear_bit ( ) ;
2010-02-05 21:47:04 -05:00
}
2010-05-07 17:11:48 -04:00
static int __kprobes
2011-09-30 15:06:21 -04:00
arch_trigger_all_cpu_backtrace_handler ( unsigned int cmd , struct pt_regs * regs )
2010-05-07 17:11:48 -04:00
{
2011-01-04 22:38:07 -05:00
int cpu ;
2010-05-07 17:11:48 -04:00
2011-01-04 22:38:07 -05:00
cpu = smp_processor_id ( ) ;
2010-05-07 17:11:48 -04:00
if ( cpumask_test_cpu ( cpu , to_cpumask ( backtrace_mask ) ) ) {
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED ;
arch_spin_lock ( & lock ) ;
printk ( KERN_WARNING " NMI backtrace for cpu %d \n " , cpu ) ;
show_regs ( regs ) ;
arch_spin_unlock ( & lock ) ;
cpumask_clear_cpu ( cpu , to_cpumask ( backtrace_mask ) ) ;
2011-09-30 15:06:21 -04:00
return NMI_HANDLED ;
2010-05-07 17:11:48 -04:00
}
2011-09-30 15:06:21 -04:00
return NMI_DONE ;
2010-05-07 17:11:48 -04:00
}
static int __init register_trigger_all_cpu_backtrace ( void )
{
2011-09-30 15:06:21 -04:00
register_nmi_handler ( NMI_LOCAL , arch_trigger_all_cpu_backtrace_handler ,
0 , " arch_bt " ) ;
2010-05-07 17:11:48 -04:00
return 0 ;
}
early_initcall ( register_trigger_all_cpu_backtrace ) ;
2010-02-18 21:56:52 -05:00
# endif