2010-05-08 01:11:44 +04:00
/*
* Detect hard and soft lockups on a system
*
* started by Don Zickus , Copyright ( C ) 2010 Red Hat , Inc .
*
2012-02-10 02:42:22 +04:00
* Note : Most of this code is borrowed heavily from the original softlockup
* detector , so thanks to Ingo for the initial implementation .
* Some chunks also taken from the old x86 - specific nmi watchdog code , thanks
2010-05-08 01:11:44 +04:00
* to those contributors as well .
*/
2012-03-24 02:01:55 +04:00
# define pr_fmt(fmt) "NMI watchdog: " fmt
2010-05-08 01:11:44 +04:00
# include <linux/mm.h>
# include <linux/cpu.h>
# include <linux/nmi.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/sysctl.h>
2012-07-16 14:42:38 +04:00
# include <linux/smpboot.h>
2013-02-07 19:47:07 +04:00
# include <linux/sched/rt.h>
2010-05-08 01:11:44 +04:00
# include <asm/irq_regs.h>
2012-03-10 23:37:28 +04:00
# include <linux/kvm_para.h>
2010-05-08 01:11:44 +04:00
# include <linux/perf_event.h>
2015-04-15 01:43:55 +03:00
/*
* The run state of the lockup detectors is controlled by the content of the
* ' watchdog_enabled ' variable . Each lockup detector has its dedicated bit -
* bit 0 for the hard lockup detector and bit 1 for the soft lockup detector .
*
* ' watchdog_user_enabled ' , ' nmi_watchdog_enabled ' and ' soft_watchdog_enabled '
* are variables that are only used as an ' interface ' between the parameters
* in / proc / sys / kernel and the internal state bits in ' watchdog_enabled ' . The
* ' watchdog_thresh ' variable is handled differently because its value is not
* boolean , and the lockup detectors are ' suspended ' while ' watchdog_thresh '
* is equal zero .
*/
# define NMI_WATCHDOG_ENABLED_BIT 0
# define SOFT_WATCHDOG_ENABLED_BIT 1
# define NMI_WATCHDOG_ENABLED (1 << NMI_WATCHDOG_ENABLED_BIT)
# define SOFT_WATCHDOG_ENABLED (1 << SOFT_WATCHDOG_ENABLED_BIT)
# ifdef CONFIG_HARDLOCKUP_DETECTOR
static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED ;
# else
static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED ;
# endif
int __read_mostly nmi_watchdog_enabled ;
int __read_mostly soft_watchdog_enabled ;
int __read_mostly watchdog_user_enabled ;
2011-05-23 09:10:23 +04:00
int __read_mostly watchdog_thresh = 10 ;
2015-04-15 01:43:55 +03:00
2014-06-24 00:22:05 +04:00
# ifdef CONFIG_SMP
int __read_mostly sysctl_softlockup_all_cpu_backtrace ;
# else
# define sysctl_softlockup_all_cpu_backtrace 0
# endif
2013-05-19 22:45:15 +04:00
static int __read_mostly watchdog_running ;
2012-12-18 03:59:50 +04:00
static u64 __read_mostly sample_period ;
2010-05-08 01:11:44 +04:00
static DEFINE_PER_CPU ( unsigned long , watchdog_touch_ts ) ;
static DEFINE_PER_CPU ( struct task_struct * , softlockup_watchdog ) ;
static DEFINE_PER_CPU ( struct hrtimer , watchdog_hrtimer ) ;
static DEFINE_PER_CPU ( bool , softlockup_touch_sync ) ;
static DEFINE_PER_CPU ( bool , soft_watchdog_warn ) ;
2012-07-16 14:42:38 +04:00
static DEFINE_PER_CPU ( unsigned long , hrtimer_interrupts ) ;
static DEFINE_PER_CPU ( unsigned long , soft_lockup_hrtimer_cnt ) ;
2014-10-10 02:25:17 +04:00
static DEFINE_PER_CPU ( struct task_struct * , softlockup_task_ptr_saved ) ;
2010-05-16 01:15:20 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2010-05-14 19:11:21 +04:00
static DEFINE_PER_CPU ( bool , hard_watchdog_warn ) ;
static DEFINE_PER_CPU ( bool , watchdog_nmi_touch ) ;
2010-05-08 01:11:44 +04:00
static DEFINE_PER_CPU ( unsigned long , hrtimer_interrupts_saved ) ;
static DEFINE_PER_CPU ( struct perf_event * , watchdog_ev ) ;
# endif
2014-06-24 00:22:05 +04:00
static unsigned long soft_lockup_nmi_warn ;
2010-05-08 01:11:44 +04:00
/* boot commands */
/*
* Should we panic when a soft - lockup or hard - lockup occurs :
*/
2010-05-16 01:15:20 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2011-03-23 02:34:16 +03:00
static int hardlockup_panic =
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE ;
2010-05-08 01:11:44 +04:00
2014-10-14 02:55:35 +04:00
static bool hardlockup_detector_enabled = true ;
/*
* We may not want to enable hard lockup detection by default in all cases ,
* for example when running the kernel as a guest on a hypervisor . In these
* cases this function can be called to disable hard lockup detection . This
* function should only be executed once by the boot processor before the
* kernel command line parameters are parsed , because otherwise it is not
* possible to override this in hardlockup_panic_setup ( ) .
*/
void watchdog_enable_hardlockup_detector ( bool val )
{
hardlockup_detector_enabled = val ;
}
bool watchdog_hardlockup_detector_is_enabled ( void )
{
return hardlockup_detector_enabled ;
}
2010-05-08 01:11:44 +04:00
static int __init hardlockup_panic_setup ( char * str )
{
if ( ! strncmp ( str , " panic " , 5 ) )
hardlockup_panic = 1 ;
2011-03-23 02:34:16 +03:00
else if ( ! strncmp ( str , " nopanic " , 7 ) )
hardlockup_panic = 0 ;
2010-11-30 01:07:17 +03:00
else if ( ! strncmp ( str , " 0 " , 1 ) )
2013-05-19 22:45:15 +04:00
watchdog_user_enabled = 0 ;
2014-10-14 02:55:35 +04:00
else if ( ! strncmp ( str , " 1 " , 1 ) | | ! strncmp ( str , " 2 " , 1 ) ) {
/*
* Setting ' nmi_watchdog = 1 ' or ' nmi_watchdog = 2 ' ( legacy option )
* has the same effect .
*/
watchdog_user_enabled = 1 ;
watchdog_enable_hardlockup_detector ( true ) ;
}
2010-05-08 01:11:44 +04:00
return 1 ;
}
__setup ( " nmi_watchdog= " , hardlockup_panic_setup ) ;
# endif
unsigned int __read_mostly softlockup_panic =
CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE ;
static int __init softlockup_panic_setup ( char * str )
{
softlockup_panic = simple_strtoul ( str , NULL , 0 ) ;
return 1 ;
}
__setup ( " softlockup_panic= " , softlockup_panic_setup ) ;
static int __init nowatchdog_setup ( char * str )
{
2013-05-19 22:45:15 +04:00
watchdog_user_enabled = 0 ;
2010-05-08 01:11:44 +04:00
return 1 ;
}
__setup ( " nowatchdog " , nowatchdog_setup ) ;
/* deprecated */
static int __init nosoftlockup_setup ( char * str )
{
2013-05-19 22:45:15 +04:00
watchdog_user_enabled = 0 ;
2010-05-08 01:11:44 +04:00
return 1 ;
}
__setup ( " nosoftlockup " , nosoftlockup_setup ) ;
/* */
2014-06-24 00:22:05 +04:00
# ifdef CONFIG_SMP
static int __init softlockup_all_cpu_backtrace_setup ( char * str )
{
sysctl_softlockup_all_cpu_backtrace =
! ! simple_strtol ( str , NULL , 0 ) ;
return 1 ;
}
__setup ( " softlockup_all_cpu_backtrace= " , softlockup_all_cpu_backtrace_setup ) ;
# endif
2010-05-08 01:11:44 +04:00
2011-05-23 09:10:23 +04:00
/*
* Hard - lockup warnings should be triggered after just a few seconds . Soft -
* lockups can have false positives under extreme conditions . So we generally
* want a higher threshold for soft lockups than for hard lockups . So we couple
* the thresholds with a factor : we make the soft threshold twice the amount of
* time the hard threshold is .
*/
2011-05-24 07:43:18 +04:00
static int get_softlockup_thresh ( void )
2011-05-23 09:10:23 +04:00
{
return watchdog_thresh * 2 ;
}
2010-05-08 01:11:44 +04:00
/*
* Returns seconds , approximately . We don ' t need nanosecond
* resolution , and we don ' t need to waste time with a big divide when
* 2 ^ 30 ns = = 1.074 s .
*/
2012-12-27 06:49:44 +04:00
static unsigned long get_timestamp ( void )
2010-05-08 01:11:44 +04:00
{
2015-02-13 02:01:24 +03:00
return running_clock ( ) > > 30LL ; /* 2^30 ~= 10^9 */
2010-05-08 01:11:44 +04:00
}
2012-12-18 03:59:50 +04:00
static void set_sample_period ( void )
2010-05-08 01:11:44 +04:00
{
/*
2011-05-23 09:10:22 +04:00
* convert watchdog_thresh from seconds to ns
2012-02-10 02:42:22 +04:00
* the divide by 5 is to give hrtimer several chances ( two
* or three with the current relation between the soft
* and hard thresholds ) to increment before the
* hardlockup detector generates a warning
2010-05-08 01:11:44 +04:00
*/
2012-12-18 03:59:50 +04:00
sample_period = get_softlockup_thresh ( ) * ( ( u64 ) NSEC_PER_SEC / 5 ) ;
2010-05-08 01:11:44 +04:00
}
/* Commands for resetting the watchdog */
static void __touch_watchdog ( void )
{
2012-12-27 06:49:44 +04:00
__this_cpu_write ( watchdog_touch_ts , get_timestamp ( ) ) ;
2010-05-08 01:11:44 +04:00
}
2010-05-08 01:11:45 +04:00
void touch_softlockup_watchdog ( void )
2010-05-08 01:11:44 +04:00
{
2014-04-19 02:07:12 +04:00
/*
* Preemption can be enabled . It doesn ' t matter which CPU ' s timestamp
* gets zeroed here , so use the raw_ operation .
*/
raw_cpu_write ( watchdog_touch_ts , 0 ) ;
2010-05-08 01:11:44 +04:00
}
2010-05-13 10:53:33 +04:00
EXPORT_SYMBOL ( touch_softlockup_watchdog ) ;
2010-05-08 01:11:44 +04:00
2010-05-08 01:11:45 +04:00
void touch_all_softlockup_watchdogs ( void )
2010-05-08 01:11:44 +04:00
{
int cpu ;
/*
* this is done lockless
* do we care if a 0 races with a timestamp ?
* all it means is the softlock check starts one cycle later
*/
for_each_online_cpu ( cpu )
per_cpu ( watchdog_touch_ts , cpu ) = 0 ;
}
2010-05-14 19:11:21 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2010-05-08 01:11:44 +04:00
void touch_nmi_watchdog ( void )
{
2014-04-04 01:47:18 +04:00
/*
* Using __raw here because some code paths have
* preemption enabled . If preemption is enabled
* then interrupts should be enabled too , in which
* case we shouldn ' t have to worry about the watchdog
* going off .
*/
2014-08-17 21:30:34 +04:00
raw_cpu_write ( watchdog_nmi_touch , true ) ;
2010-05-08 01:11:45 +04:00
touch_softlockup_watchdog ( ) ;
2010-05-08 01:11:44 +04:00
}
EXPORT_SYMBOL ( touch_nmi_watchdog ) ;
2010-05-14 19:11:21 +04:00
# endif
2010-05-08 01:11:44 +04:00
void touch_softlockup_watchdog_sync ( void )
{
2014-08-17 21:30:34 +04:00
__this_cpu_write ( softlockup_touch_sync , true ) ;
__this_cpu_write ( watchdog_touch_ts , 0 ) ;
2010-05-08 01:11:44 +04:00
}
2010-05-16 01:15:20 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2010-05-08 01:11:44 +04:00
/* watchdog detector functions */
2010-05-18 02:06:04 +04:00
static int is_hardlockup ( void )
2010-05-08 01:11:44 +04:00
{
2010-12-08 18:22:55 +03:00
unsigned long hrint = __this_cpu_read ( hrtimer_interrupts ) ;
2010-05-08 01:11:44 +04:00
2010-12-08 18:22:55 +03:00
if ( __this_cpu_read ( hrtimer_interrupts_saved ) = = hrint )
2010-05-08 01:11:44 +04:00
return 1 ;
2010-12-08 18:22:55 +03:00
__this_cpu_write ( hrtimer_interrupts_saved , hrint ) ;
2010-05-08 01:11:44 +04:00
return 0 ;
}
# endif
2010-05-18 02:06:04 +04:00
static int is_softlockup ( unsigned long touch_ts )
2010-05-08 01:11:44 +04:00
{
2012-12-27 06:49:44 +04:00
unsigned long now = get_timestamp ( ) ;
2010-05-08 01:11:44 +04:00
/* Warn about unreasonable delays: */
2011-05-23 09:10:23 +04:00
if ( time_after ( now , touch_ts + get_softlockup_thresh ( ) ) )
2010-05-08 01:11:44 +04:00
return now - touch_ts ;
return 0 ;
}
2010-05-16 01:15:20 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
2011-06-23 16:49:18 +04:00
2010-05-08 01:11:44 +04:00
static struct perf_event_attr wd_hw_attr = {
. type = PERF_TYPE_HARDWARE ,
. config = PERF_COUNT_HW_CPU_CYCLES ,
. size = sizeof ( struct perf_event_attr ) ,
. pinned = 1 ,
. disabled = 1 ,
} ;
/* Callback function for perf event subsystem */
2011-06-27 16:41:57 +04:00
static void watchdog_overflow_callback ( struct perf_event * event ,
2010-05-08 01:11:44 +04:00
struct perf_sample_data * data ,
struct pt_regs * regs )
{
2010-08-20 13:49:15 +04:00
/* Ensure the watchdog never gets throttled */
event - > hw . interrupts = 0 ;
2010-12-08 18:22:55 +03:00
if ( __this_cpu_read ( watchdog_nmi_touch ) = = true ) {
__this_cpu_write ( watchdog_nmi_touch , false ) ;
2010-05-08 01:11:44 +04:00
return ;
}
/* check for a hardlockup
* This is done by making sure our timer interrupt
* is incrementing . The timer interrupt should have
* fired multiple times before we overflow ' d . If it hasn ' t
* then this is a good indication the cpu is stuck
*/
2010-05-18 02:06:04 +04:00
if ( is_hardlockup ( ) ) {
int this_cpu = smp_processor_id ( ) ;
2010-05-08 01:11:44 +04:00
/* only print hardlockups once */
2010-12-08 18:22:55 +03:00
if ( __this_cpu_read ( hard_watchdog_warn ) = = true )
2010-05-08 01:11:44 +04:00
return ;
if ( hardlockup_panic )
2014-08-07 03:04:03 +04:00
panic ( " Watchdog detected hard LOCKUP on cpu %d " ,
this_cpu ) ;
2010-05-08 01:11:44 +04:00
else
2014-08-07 03:04:03 +04:00
WARN ( 1 , " Watchdog detected hard LOCKUP on cpu %d " ,
this_cpu ) ;
2010-05-08 01:11:44 +04:00
2010-12-08 18:22:55 +03:00
__this_cpu_write ( hard_watchdog_warn , true ) ;
2010-05-08 01:11:44 +04:00
return ;
}
2010-12-08 18:22:55 +03:00
__this_cpu_write ( hard_watchdog_warn , false ) ;
2010-05-08 01:11:44 +04:00
return ;
}
2012-07-16 14:42:38 +04:00
# endif /* CONFIG_HARDLOCKUP_DETECTOR */
2010-05-08 01:11:44 +04:00
static void watchdog_interrupt_count ( void )
{
2010-12-08 18:22:55 +03:00
__this_cpu_inc ( hrtimer_interrupts ) ;
2010-05-08 01:11:44 +04:00
}
2012-07-16 14:42:38 +04:00
static int watchdog_nmi_enable ( unsigned int cpu ) ;
static void watchdog_nmi_disable ( unsigned int cpu ) ;
2010-05-08 01:11:44 +04:00
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn ( struct hrtimer * hrtimer )
{
2010-12-08 18:22:55 +03:00
unsigned long touch_ts = __this_cpu_read ( watchdog_touch_ts ) ;
2010-05-08 01:11:44 +04:00
struct pt_regs * regs = get_irq_regs ( ) ;
int duration ;
2014-06-24 00:22:05 +04:00
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace ;
2010-05-08 01:11:44 +04:00
/* kick the hardlockup detector */
watchdog_interrupt_count ( ) ;
/* kick the softlockup detector */
2010-12-08 18:22:55 +03:00
wake_up_process ( __this_cpu_read ( softlockup_watchdog ) ) ;
2010-05-08 01:11:44 +04:00
/* .. and repeat */
2012-12-18 03:59:50 +04:00
hrtimer_forward_now ( hrtimer , ns_to_ktime ( sample_period ) ) ;
2010-05-08 01:11:44 +04:00
if ( touch_ts = = 0 ) {
2010-12-08 18:22:55 +03:00
if ( unlikely ( __this_cpu_read ( softlockup_touch_sync ) ) ) {
2010-05-08 01:11:44 +04:00
/*
* If the time stamp was touched atomically
* make sure the scheduler tick is up to date .
*/
2010-12-08 18:22:55 +03:00
__this_cpu_write ( softlockup_touch_sync , false ) ;
2010-05-08 01:11:44 +04:00
sched_clock_tick ( ) ;
}
2012-03-10 23:37:28 +04:00
/* Clear the guest paused flag on watchdog reset */
kvm_check_and_clear_guest_paused ( ) ;
2010-05-08 01:11:44 +04:00
__touch_watchdog ( ) ;
return HRTIMER_RESTART ;
}
/* check for a softlockup
* This is done by making sure a high priority task is
* being scheduled . The task touches the watchdog to
* indicate it is getting cpu time . If it hasn ' t then
* this is a good indication some task is hogging the cpu
*/
2010-05-18 02:06:04 +04:00
duration = is_softlockup ( touch_ts ) ;
2010-05-08 01:11:44 +04:00
if ( unlikely ( duration ) ) {
2012-03-10 23:37:28 +04:00
/*
* If a virtual machine is stopped by the host it can look to
* the watchdog like a soft lockup , check to see if the host
* stopped the vm before we issue the warning
*/
if ( kvm_check_and_clear_guest_paused ( ) )
return HRTIMER_RESTART ;
2010-05-08 01:11:44 +04:00
/* only warn once */
2014-10-10 02:25:17 +04:00
if ( __this_cpu_read ( soft_watchdog_warn ) = = true ) {
/*
* When multiple processes are causing softlockups the
* softlockup detector only warns on the first one
* because the code relies on a full quiet cycle to
* re - arm . The second process prevents the quiet cycle
* and never gets reported . Use task pointers to detect
* this .
*/
if ( __this_cpu_read ( softlockup_task_ptr_saved ) ! =
current ) {
__this_cpu_write ( soft_watchdog_warn , false ) ;
__touch_watchdog ( ) ;
}
2010-05-08 01:11:44 +04:00
return HRTIMER_RESTART ;
2014-10-10 02:25:17 +04:00
}
2010-05-08 01:11:44 +04:00
2014-06-24 00:22:05 +04:00
if ( softlockup_all_cpu_backtrace ) {
/* Prevent multiple soft-lockup reports if one cpu is already
* engaged in dumping cpu back traces
*/
if ( test_and_set_bit ( 0 , & soft_lockup_nmi_warn ) ) {
/* Someone else will report us. Let's give up */
__this_cpu_write ( soft_watchdog_warn , true ) ;
return HRTIMER_RESTART ;
}
}
2014-08-07 03:04:03 +04:00
pr_emerg ( " BUG: soft lockup - CPU#%d stuck for %us! [%s:%d] \n " ,
2010-05-18 02:06:04 +04:00
smp_processor_id ( ) , duration ,
2010-05-08 01:11:44 +04:00
current - > comm , task_pid_nr ( current ) ) ;
2014-10-10 02:25:17 +04:00
__this_cpu_write ( softlockup_task_ptr_saved , current ) ;
2010-05-08 01:11:44 +04:00
print_modules ( ) ;
print_irqtrace_events ( current ) ;
if ( regs )
show_regs ( regs ) ;
else
dump_stack ( ) ;
2014-06-24 00:22:05 +04:00
if ( softlockup_all_cpu_backtrace ) {
/* Avoid generating two back traces for current
* given that one is already made above
*/
trigger_allbutself_cpu_backtrace ( ) ;
clear_bit ( 0 , & soft_lockup_nmi_warn ) ;
/* Barrier to sync with other cpus */
smp_mb__after_atomic ( ) ;
}
2014-08-09 01:22:31 +04:00
add_taint ( TAINT_SOFTLOCKUP , LOCKDEP_STILL_OK ) ;
2010-05-08 01:11:44 +04:00
if ( softlockup_panic )
panic ( " softlockup: hung tasks " ) ;
2010-12-08 18:22:55 +03:00
__this_cpu_write ( soft_watchdog_warn , true ) ;
2010-05-08 01:11:44 +04:00
} else
2010-12-08 18:22:55 +03:00
__this_cpu_write ( soft_watchdog_warn , false ) ;
2010-05-08 01:11:44 +04:00
return HRTIMER_RESTART ;
}
2012-07-16 14:42:38 +04:00
static void watchdog_set_prio ( unsigned int policy , unsigned int prio )
{
struct sched_param param = { . sched_priority = prio } ;
2010-05-08 01:11:44 +04:00
2012-07-16 14:42:38 +04:00
sched_setscheduler ( current , policy , & param ) ;
}
static void watchdog_enable ( unsigned int cpu )
2010-05-08 01:11:44 +04:00
{
2014-08-17 21:30:34 +04:00
struct hrtimer * hrtimer = raw_cpu_ptr ( & watchdog_hrtimer ) ;
2010-05-08 01:11:44 +04:00
2012-12-19 23:51:31 +04:00
/* kick off the timer for the hardlockup detector */
hrtimer_init ( hrtimer , CLOCK_MONOTONIC , HRTIMER_MODE_REL ) ;
hrtimer - > function = watchdog_timer_fn ;
2012-07-16 14:42:38 +04:00
/* Enable the perf event */
watchdog_nmi_enable ( cpu ) ;
2010-05-08 01:11:44 +04:00
/* done here because hrtimer_start can only pin to smp_processor_id() */
2012-12-18 03:59:50 +04:00
hrtimer_start ( hrtimer , ns_to_ktime ( sample_period ) ,
2010-05-08 01:11:44 +04:00
HRTIMER_MODE_REL_PINNED ) ;
2012-07-16 14:42:38 +04:00
/* initialize timestamp */
watchdog_set_prio ( SCHED_FIFO , MAX_RT_PRIO - 1 ) ;
__touch_watchdog ( ) ;
}
2010-05-08 01:11:44 +04:00
2012-07-16 14:42:38 +04:00
static void watchdog_disable ( unsigned int cpu )
{
2014-08-17 21:30:34 +04:00
struct hrtimer * hrtimer = raw_cpu_ptr ( & watchdog_hrtimer ) ;
2010-05-08 01:11:44 +04:00
2012-07-16 14:42:38 +04:00
watchdog_set_prio ( SCHED_NORMAL , 0 ) ;
hrtimer_cancel ( hrtimer ) ;
/* disable the perf event */
watchdog_nmi_disable ( cpu ) ;
2010-05-08 01:11:44 +04:00
}
2013-06-06 17:42:53 +04:00
static void watchdog_cleanup ( unsigned int cpu , bool online )
{
watchdog_disable ( cpu ) ;
}
2012-07-16 14:42:38 +04:00
static int watchdog_should_run ( unsigned int cpu )
{
return __this_cpu_read ( hrtimer_interrupts ) ! =
__this_cpu_read ( soft_lockup_hrtimer_cnt ) ;
}
/*
* The watchdog thread function - touches the timestamp .
*
2012-12-18 03:59:50 +04:00
* It only runs once every sample_period seconds ( 4 seconds by
2012-07-16 14:42:38 +04:00
* default ) to reset the softlockup timestamp . If this gets delayed
* for more than 2 * watchdog_thresh seconds then the debug - printout
* triggers in watchdog_timer_fn ( ) .
*/
static void watchdog ( unsigned int cpu )
{
__this_cpu_write ( soft_lockup_hrtimer_cnt ,
__this_cpu_read ( hrtimer_interrupts ) ) ;
__touch_watchdog ( ) ;
}
2010-05-08 01:11:44 +04:00
2010-05-16 01:15:20 +04:00
# ifdef CONFIG_HARDLOCKUP_DETECTOR
watchdog: Quiet down the boot messages
A bunch of bugzillas have complained how noisy the nmi_watchdog
is during boot-up especially with its expected failure cases
(like virt and bios resource contention).
This is my attempt to quiet them down and keep it less confusing
for the end user. What I did is print the message for cpu0 and
save it for future comparisons. If future cpus have an
identical message as cpu0, then don't print the redundant info.
However, if a future cpu has a different message, happily print
that loudly.
Before the change, you would see something like:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog enabled, takes one hw-pmu counter.
Booting Node 0, Processors #1
NMI watchdog enabled, takes one hw-pmu counter.
#2
NMI watchdog enabled, takes one hw-pmu counter.
#3 Ok.
NMI watchdog enabled, takes one hw-pmu counter.
Brought up 4 CPUs
Total of 4 processors activated (22607.24 BogoMIPS).
After the change, it is simplified to:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.
Booting Node 0, Processors #1 #2 #3 Ok.
Brought up 4 CPUs
V2: little changes based on Joe Perches' feedback
V3: printk cleanup based on Ingo's feedback; checkpatch fix
V4: keep printk as one long line
V5: Ingo fix ups
Reported-and-tested-by: Nathan Zimmer <nzimmer@sgi.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: nzimmer@sgi.com
Cc: joe@perches.com
Link: http://lkml.kernel.org/r/1339594548-17227-1-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 17:35:48 +04:00
/*
* People like the simple clean cpu node info on boot .
* Reduce the watchdog noise by only printing messages
* that are different from what cpu0 displayed .
*/
static unsigned long cpu0_err ;
2012-07-16 14:42:38 +04:00
static int watchdog_nmi_enable ( unsigned int cpu )
2010-05-08 01:11:44 +04:00
{
struct perf_event_attr * wd_attr ;
struct perf_event * event = per_cpu ( watchdog_ev , cpu ) ;
2014-10-14 02:55:35 +04:00
/*
* Some kernels need to default hard lockup detection to
* ' disabled ' , for example a guest on a hypervisor .
*/
if ( ! watchdog_hardlockup_detector_is_enabled ( ) ) {
event = ERR_PTR ( - ENOENT ) ;
goto handle_err ;
}
2010-05-08 01:11:44 +04:00
/* is it already setup and enabled? */
if ( event & & event - > state > PERF_EVENT_STATE_OFF )
goto out ;
/* it is setup but not enabled */
if ( event ! = NULL )
goto out_enable ;
wd_attr = & wd_hw_attr ;
2011-05-23 09:10:23 +04:00
wd_attr - > sample_period = hw_nmi_get_sample_period ( watchdog_thresh ) ;
2011-06-23 16:49:18 +04:00
/* Try to register using hardware perf events */
2011-06-29 19:42:35 +04:00
event = perf_event_create_kernel_counter ( wd_attr , cpu , NULL , watchdog_overflow_callback , NULL ) ;
watchdog: Quiet down the boot messages
A bunch of bugzillas have complained how noisy the nmi_watchdog
is during boot-up especially with its expected failure cases
(like virt and bios resource contention).
This is my attempt to quiet them down and keep it less confusing
for the end user. What I did is print the message for cpu0 and
save it for future comparisons. If future cpus have an
identical message as cpu0, then don't print the redundant info.
However, if a future cpu has a different message, happily print
that loudly.
Before the change, you would see something like:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog enabled, takes one hw-pmu counter.
Booting Node 0, Processors #1
NMI watchdog enabled, takes one hw-pmu counter.
#2
NMI watchdog enabled, takes one hw-pmu counter.
#3 Ok.
NMI watchdog enabled, takes one hw-pmu counter.
Brought up 4 CPUs
Total of 4 processors activated (22607.24 BogoMIPS).
After the change, it is simplified to:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.
Booting Node 0, Processors #1 #2 #3 Ok.
Brought up 4 CPUs
V2: little changes based on Joe Perches' feedback
V3: printk cleanup based on Ingo's feedback; checkpatch fix
V4: keep printk as one long line
V5: Ingo fix ups
Reported-and-tested-by: Nathan Zimmer <nzimmer@sgi.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: nzimmer@sgi.com
Cc: joe@perches.com
Link: http://lkml.kernel.org/r/1339594548-17227-1-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 17:35:48 +04:00
2014-10-14 02:55:35 +04:00
handle_err :
watchdog: Quiet down the boot messages
A bunch of bugzillas have complained how noisy the nmi_watchdog
is during boot-up especially with its expected failure cases
(like virt and bios resource contention).
This is my attempt to quiet them down and keep it less confusing
for the end user. What I did is print the message for cpu0 and
save it for future comparisons. If future cpus have an
identical message as cpu0, then don't print the redundant info.
However, if a future cpu has a different message, happily print
that loudly.
Before the change, you would see something like:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog enabled, takes one hw-pmu counter.
Booting Node 0, Processors #1
NMI watchdog enabled, takes one hw-pmu counter.
#2
NMI watchdog enabled, takes one hw-pmu counter.
#3 Ok.
NMI watchdog enabled, takes one hw-pmu counter.
Brought up 4 CPUs
Total of 4 processors activated (22607.24 BogoMIPS).
After the change, it is simplified to:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.
Booting Node 0, Processors #1 #2 #3 Ok.
Brought up 4 CPUs
V2: little changes based on Joe Perches' feedback
V3: printk cleanup based on Ingo's feedback; checkpatch fix
V4: keep printk as one long line
V5: Ingo fix ups
Reported-and-tested-by: Nathan Zimmer <nzimmer@sgi.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: nzimmer@sgi.com
Cc: joe@perches.com
Link: http://lkml.kernel.org/r/1339594548-17227-1-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 17:35:48 +04:00
/* save cpu0 error for future comparision */
if ( cpu = = 0 & & IS_ERR ( event ) )
cpu0_err = PTR_ERR ( event ) ;
2010-05-08 01:11:44 +04:00
if ( ! IS_ERR ( event ) ) {
watchdog: Quiet down the boot messages
A bunch of bugzillas have complained how noisy the nmi_watchdog
is during boot-up especially with its expected failure cases
(like virt and bios resource contention).
This is my attempt to quiet them down and keep it less confusing
for the end user. What I did is print the message for cpu0 and
save it for future comparisons. If future cpus have an
identical message as cpu0, then don't print the redundant info.
However, if a future cpu has a different message, happily print
that loudly.
Before the change, you would see something like:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog enabled, takes one hw-pmu counter.
Booting Node 0, Processors #1
NMI watchdog enabled, takes one hw-pmu counter.
#2
NMI watchdog enabled, takes one hw-pmu counter.
#3 Ok.
NMI watchdog enabled, takes one hw-pmu counter.
Brought up 4 CPUs
Total of 4 processors activated (22607.24 BogoMIPS).
After the change, it is simplified to:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.
Booting Node 0, Processors #1 #2 #3 Ok.
Brought up 4 CPUs
V2: little changes based on Joe Perches' feedback
V3: printk cleanup based on Ingo's feedback; checkpatch fix
V4: keep printk as one long line
V5: Ingo fix ups
Reported-and-tested-by: Nathan Zimmer <nzimmer@sgi.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: nzimmer@sgi.com
Cc: joe@perches.com
Link: http://lkml.kernel.org/r/1339594548-17227-1-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 17:35:48 +04:00
/* only print for cpu0 or different than cpu0 */
if ( cpu = = 0 | | cpu0_err )
pr_info ( " enabled on all CPUs, permanently consumes one hw-PMU counter. \n " ) ;
2010-05-08 01:11:44 +04:00
goto out_save ;
}
watchdog: Quiet down the boot messages
A bunch of bugzillas have complained how noisy the nmi_watchdog
is during boot-up especially with its expected failure cases
(like virt and bios resource contention).
This is my attempt to quiet them down and keep it less confusing
for the end user. What I did is print the message for cpu0 and
save it for future comparisons. If future cpus have an
identical message as cpu0, then don't print the redundant info.
However, if a future cpu has a different message, happily print
that loudly.
Before the change, you would see something like:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog enabled, takes one hw-pmu counter.
Booting Node 0, Processors #1
NMI watchdog enabled, takes one hw-pmu counter.
#2
NMI watchdog enabled, takes one hw-pmu counter.
#3 Ok.
NMI watchdog enabled, takes one hw-pmu counter.
Brought up 4 CPUs
Total of 4 processors activated (22607.24 BogoMIPS).
After the change, it is simplified to:
..TIMER: vector=0x30 apic1=0 pin1=2 apic2=-1 pin2=-1
CPU0: Intel(R) Core(TM)2 Quad CPU Q9550 @ 2.83GHz stepping 0a
Performance Events: PEBS fmt0+, Core2 events, Intel PMU driver.
... version: 2
... bit width: 40
... generic registers: 2
... value mask: 000000ffffffffff
... max period: 000000007fffffff
... fixed-purpose events: 3
... event mask: 0000000700000003
NMI watchdog: enabled on all CPUs, permanently consumes one hw-PMU counter.
Booting Node 0, Processors #1 #2 #3 Ok.
Brought up 4 CPUs
V2: little changes based on Joe Perches' feedback
V3: printk cleanup based on Ingo's feedback; checkpatch fix
V4: keep printk as one long line
V5: Ingo fix ups
Reported-and-tested-by: Nathan Zimmer <nzimmer@sgi.com>
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: nzimmer@sgi.com
Cc: joe@perches.com
Link: http://lkml.kernel.org/r/1339594548-17227-1-git-send-email-dzickus@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2012-06-13 17:35:48 +04:00
/* skip displaying the same error again */
if ( cpu > 0 & & ( PTR_ERR ( event ) = = cpu0_err ) )
return PTR_ERR ( event ) ;
2011-02-09 22:02:33 +03:00
/* vary the KERN level based on the returned errno */
if ( PTR_ERR ( event ) = = - EOPNOTSUPP )
2012-03-24 02:01:55 +04:00
pr_info ( " disabled (cpu%i): not supported (no LAPIC?) \n " , cpu ) ;
2011-02-09 22:02:33 +03:00
else if ( PTR_ERR ( event ) = = - ENOENT )
2014-08-07 03:04:03 +04:00
pr_warn ( " disabled (cpu%i): hardware events not enabled \n " ,
2012-03-24 02:01:55 +04:00
cpu ) ;
2011-02-09 22:02:33 +03:00
else
2012-03-24 02:01:55 +04:00
pr_err ( " disabled (cpu%i): unable to create perf event: %ld \n " ,
cpu , PTR_ERR ( event ) ) ;
2010-09-01 07:00:08 +04:00
return PTR_ERR ( event ) ;
2010-05-08 01:11:44 +04:00
/* success path */
out_save :
per_cpu ( watchdog_ev , cpu ) = event ;
out_enable :
perf_event_enable ( per_cpu ( watchdog_ev , cpu ) ) ;
out :
return 0 ;
}
2012-07-16 14:42:38 +04:00
static void watchdog_nmi_disable ( unsigned int cpu )
2010-05-08 01:11:44 +04:00
{
struct perf_event * event = per_cpu ( watchdog_ev , cpu ) ;
if ( event ) {
perf_event_disable ( event ) ;
per_cpu ( watchdog_ev , cpu ) = NULL ;
/* should be in cleanup, but blocks oprofile */
perf_event_release_kernel ( event ) ;
}
2014-08-11 18:49:25 +04:00
if ( cpu = = 0 ) {
/* watchdog_nmi_enable() expects this to be zero initially. */
cpu0_err = 0 ;
}
2010-05-08 01:11:44 +04:00
}
# else
2012-07-16 14:42:38 +04:00
static int watchdog_nmi_enable ( unsigned int cpu ) { return 0 ; }
static void watchdog_nmi_disable ( unsigned int cpu ) { return ; }
2010-05-16 01:15:20 +04:00
# endif /* CONFIG_HARDLOCKUP_DETECTOR */
2010-05-08 01:11:44 +04:00
2013-06-06 17:42:53 +04:00
static struct smp_hotplug_thread watchdog_threads = {
. store = & softlockup_watchdog ,
. thread_should_run = watchdog_should_run ,
. thread_fn = watchdog ,
. thread_comm = " watchdog/%u " ,
. setup = watchdog_enable ,
. cleanup = watchdog_cleanup ,
. park = watchdog_disable ,
. unpark = watchdog_enable ,
} ;
2013-09-25 02:27:30 +04:00
static void restart_watchdog_hrtimer ( void * info )
{
2014-08-17 21:30:34 +04:00
struct hrtimer * hrtimer = raw_cpu_ptr ( & watchdog_hrtimer ) ;
2013-09-25 02:27:30 +04:00
int ret ;
/*
* No need to cancel and restart hrtimer if it is currently executing
* because it will reprogram itself with the new period now .
* We should never see it unqueued here because we are running per - cpu
* with interrupts disabled .
*/
ret = hrtimer_try_to_cancel ( hrtimer ) ;
if ( ret = = 1 )
hrtimer_start ( hrtimer , ns_to_ktime ( sample_period ) ,
HRTIMER_MODE_REL_PINNED ) ;
}
static void update_timers ( int cpu )
{
/*
* Make sure that perf event counter will adopt to a new
* sampling period . Updating the sampling period directly would
* be much nicer but we do not have an API for that now so
* let ' s use a big hammer .
* Hrtimer will adopt the new period on the next tick but this
* might be late already so we have to restart the timer as well .
*/
watchdog_nmi_disable ( cpu ) ;
2014-02-24 19:40:00 +04:00
smp_call_function_single ( cpu , restart_watchdog_hrtimer , NULL , 1 ) ;
2013-09-25 02:27:30 +04:00
watchdog_nmi_enable ( cpu ) ;
}
static void update_timers_all_cpus ( void )
{
int cpu ;
get_online_cpus ( ) ;
for_each_online_cpu ( cpu )
update_timers ( cpu ) ;
put_online_cpus ( ) ;
}
static int watchdog_enable_all_cpus ( bool sample_period_changed )
2010-05-08 01:11:44 +04:00
{
2013-06-06 17:42:53 +04:00
int err = 0 ;
2010-05-08 01:11:44 +04:00
2013-05-19 22:45:15 +04:00
if ( ! watchdog_running ) {
2013-06-06 17:42:53 +04:00
err = smpboot_register_percpu_thread ( & watchdog_threads ) ;
if ( err )
pr_err ( " Failed to create watchdog threads, disabled \n " ) ;
else
2013-05-19 22:45:15 +04:00
watchdog_running = 1 ;
2013-09-25 02:27:30 +04:00
} else if ( sample_period_changed ) {
update_timers_all_cpus ( ) ;
2012-07-16 14:42:38 +04:00
}
2013-06-06 17:42:53 +04:00
return err ;
2010-05-08 01:11:44 +04:00
}
2013-06-06 17:42:53 +04:00
/* prepare/enable/disable routines */
/* sysctl functions */
# ifdef CONFIG_SYSCTL
2010-05-08 01:11:44 +04:00
static void watchdog_disable_all_cpus ( void )
{
2013-05-19 22:45:15 +04:00
if ( watchdog_running ) {
watchdog_running = 0 ;
2013-06-06 17:42:53 +04:00
smpboot_unregister_percpu_thread ( & watchdog_threads ) ;
2012-07-16 14:42:38 +04:00
}
2010-05-08 01:11:44 +04:00
}
2015-04-15 01:43:58 +03:00
/*
* Update the run state of the lockup detectors .
*/
static int proc_watchdog_update ( void )
{
int err = 0 ;
/*
* Watchdog threads won ' t be started if they are already active .
* The ' watchdog_running ' variable in watchdog_ * _all_cpus ( ) takes
* care of this . If those threads are already active , the sample
* period will be updated and the lockup detectors will be enabled
* or disabled ' on the fly ' .
*/
if ( watchdog_enabled & & watchdog_thresh )
err = watchdog_enable_all_cpus ( true ) ;
else
watchdog_disable_all_cpus ( ) ;
return err ;
}
2015-04-15 01:44:01 +03:00
static DEFINE_MUTEX ( watchdog_proc_mutex ) ;
2015-04-15 01:44:05 +03:00
/*
* common function for watchdog , nmi_watchdog and soft_watchdog parameter
*
* caller | table - > data points to | ' which ' contains the flag ( s )
* - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or ' ed
* | | with SOFT_WATCHDOG_ENABLED
* - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
* - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - | - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
*/
static int proc_watchdog_common ( int which , struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
int err , old , new ;
int * watchdog_param = ( int * ) table - > data ;
mutex_lock ( & watchdog_proc_mutex ) ;
/*
* If the parameter is being read return the state of the corresponding
* bit ( s ) in ' watchdog_enabled ' , else update ' watchdog_enabled ' and the
* run state of the lockup detectors .
*/
if ( ! write ) {
* watchdog_param = ( watchdog_enabled & which ) ! = 0 ;
err = proc_dointvec_minmax ( table , write , buffer , lenp , ppos ) ;
} else {
err = proc_dointvec_minmax ( table , write , buffer , lenp , ppos ) ;
if ( err )
goto out ;
/*
* There is a race window between fetching the current value
* from ' watchdog_enabled ' and storing the new value . During
* this race window , watchdog_nmi_enable ( ) can sneak in and
* clear the NMI_WATCHDOG_ENABLED bit in ' watchdog_enabled ' .
* The ' cmpxchg ' detects this race and the loop retries .
*/
do {
old = watchdog_enabled ;
/*
* If the parameter value is not zero set the
* corresponding bit ( s ) , else clear it ( them ) .
*/
if ( * watchdog_param )
new = old | which ;
else
new = old & ~ which ;
} while ( cmpxchg ( & watchdog_enabled , old , new ) ! = old ) ;
/*
* Update the run state of the lockup detectors .
* Restore ' watchdog_enabled ' on failure .
*/
err = proc_watchdog_update ( ) ;
if ( err )
watchdog_enabled = old ;
}
out :
mutex_unlock ( & watchdog_proc_mutex ) ;
return err ;
}
2015-04-15 01:44:08 +03:00
/*
* / proc / sys / kernel / watchdog
*/
int proc_watchdog ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
return proc_watchdog_common ( NMI_WATCHDOG_ENABLED | SOFT_WATCHDOG_ENABLED ,
table , write , buffer , lenp , ppos ) ;
}
/*
* / proc / sys / kernel / nmi_watchdog
*/
int proc_nmi_watchdog ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
return proc_watchdog_common ( NMI_WATCHDOG_ENABLED ,
table , write , buffer , lenp , ppos ) ;
}
/*
* / proc / sys / kernel / soft_watchdog
*/
int proc_soft_watchdog ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
return proc_watchdog_common ( SOFT_WATCHDOG_ENABLED ,
table , write , buffer , lenp , ppos ) ;
}
/*
* / proc / sys / kernel / watchdog_thresh
*/
int proc_watchdog_thresh ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
int err , old ;
mutex_lock ( & watchdog_proc_mutex ) ;
old = ACCESS_ONCE ( watchdog_thresh ) ;
err = proc_dointvec_minmax ( table , write , buffer , lenp , ppos ) ;
if ( err | | ! write )
goto out ;
/*
* Update the sample period .
* Restore ' watchdog_thresh ' on failure .
*/
set_sample_period ( ) ;
err = proc_watchdog_update ( ) ;
if ( err )
watchdog_thresh = old ;
out :
mutex_unlock ( & watchdog_proc_mutex ) ;
return err ;
}
2010-05-08 01:11:44 +04:00
/*
2011-05-23 09:10:22 +04:00
* proc handler for / proc / sys / kernel / nmi_watchdog , watchdog_thresh
2010-05-08 01:11:44 +04:00
*/
2011-05-23 09:10:22 +04:00
int proc_dowatchdog ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
2010-05-08 01:11:44 +04:00
{
2013-06-06 17:42:53 +04:00
int err , old_thresh , old_enabled ;
2014-10-14 02:55:35 +04:00
bool old_hardlockup ;
2010-05-08 01:11:44 +04:00
2013-09-25 02:27:29 +04:00
mutex_lock ( & watchdog_proc_mutex ) ;
2013-06-06 17:42:53 +04:00
old_thresh = ACCESS_ONCE ( watchdog_thresh ) ;
2013-05-19 22:45:15 +04:00
old_enabled = ACCESS_ONCE ( watchdog_user_enabled ) ;
2014-10-14 02:55:35 +04:00
old_hardlockup = watchdog_hardlockup_detector_is_enabled ( ) ;
2012-07-16 14:42:38 +04:00
2013-06-06 17:42:53 +04:00
err = proc_dointvec_minmax ( table , write , buffer , lenp , ppos ) ;
if ( err | | ! write )
2013-09-25 02:27:29 +04:00
goto out ;
2011-05-23 09:10:21 +04:00
2012-12-18 03:59:50 +04:00
set_sample_period ( ) ;
2013-03-12 22:44:08 +04:00
/*
* Watchdog threads shouldn ' t be enabled if they are
2013-05-19 22:45:15 +04:00
* disabled . The ' watchdog_running ' variable check in
2013-03-12 22:44:08 +04:00
* watchdog_ * _all_cpus ( ) function takes care of this .
*/
2014-10-14 02:55:35 +04:00
if ( watchdog_user_enabled & & watchdog_thresh ) {
/*
* Prevent a change in watchdog_thresh accidentally overriding
* the enablement of the hardlockup detector .
*/
if ( watchdog_user_enabled ! = old_enabled )
watchdog_enable_hardlockup_detector ( true ) ;
2013-09-25 02:27:30 +04:00
err = watchdog_enable_all_cpus ( old_thresh ! = watchdog_thresh ) ;
2014-10-14 02:55:35 +04:00
} else
2011-05-23 09:10:21 +04:00
watchdog_disable_all_cpus ( ) ;
2013-06-06 17:42:53 +04:00
/* Restore old values on failure */
if ( err ) {
watchdog_thresh = old_thresh ;
2013-05-19 22:45:15 +04:00
watchdog_user_enabled = old_enabled ;
2014-10-14 02:55:35 +04:00
watchdog_enable_hardlockup_detector ( old_hardlockup ) ;
2013-06-06 17:42:53 +04:00
}
2013-09-25 02:27:29 +04:00
out :
mutex_unlock ( & watchdog_proc_mutex ) ;
2013-06-06 17:42:53 +04:00
return err ;
2010-05-08 01:11:44 +04:00
}
# endif /* CONFIG_SYSCTL */
2010-11-25 20:38:29 +03:00
void __init lockup_detector_init ( void )
2010-05-08 01:11:44 +04:00
{
2012-12-18 03:59:50 +04:00
set_sample_period ( ) ;
2013-06-06 17:42:53 +04:00
2013-05-19 22:45:15 +04:00
if ( watchdog_user_enabled )
2013-09-25 02:27:30 +04:00
watchdog_enable_all_cpus ( false ) ;
2010-05-08 01:11:44 +04:00
}