2019-05-19 15:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2007-10-19 10:39:16 +04:00
# include <linux/kdebug.h>
# include <linux/kprobes.h>
2011-05-23 22:51:41 +04:00
# include <linux/export.h>
2007-10-19 10:39:16 +04:00
# include <linux/notifier.h>
# include <linux/rcupdate.h>
# include <linux/vmalloc.h>
2008-02-06 12:36:46 +03:00
# include <linux/reboot.h>
2007-10-19 10:39:16 +04:00
2023-03-14 23:00:58 +03:00
# define CREATE_TRACE_POINTS
# include <trace/events/notifier.h>
2007-10-19 10:39:16 +04:00
/*
* Notifier list for kernel code which wants to be called
* at shutdown . This is used to stop any idling DMA operations
* and the like .
*/
BLOCKING_NOTIFIER_HEAD ( reboot_notifier_list ) ;
/*
* Notifier chain core routines . The exported routines below
* are layered on top of these , with appropriate locking added .
*/
static int notifier_chain_register ( struct notifier_block * * nl ,
2022-05-10 02:32:10 +03:00
struct notifier_block * n ,
bool unique_priority )
2007-10-19 10:39:16 +04:00
{
while ( ( * nl ) ! = NULL ) {
kernel/notifier.c: intercept duplicate registrations to avoid infinite loops
Registering the same notifier to a hook repeatedly can cause the hook
list to form a ring or lose other members of the list.
case1: An infinite loop in notifier_chain_register() can cause soft lockup
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test2);
case2: An infinite loop in notifier_chain_register() can cause soft lockup
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_call_chain(&test_notifier_list, 0, NULL);
case3: lose other hook test2
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test2);
atomic_notifier_chain_register(&test_notifier_list, &test1);
case4: Unregister returns 0, but the hook is still in the linked list,
and it is not really registered. If you call
notifier_call_chain after ko is unloaded, it will trigger oops.
If the system is configured with softlockup_panic and the same hook is
repeatedly registered on the panic_notifier_list, it will cause a loop
panic.
Add a check in notifier_chain_register(), intercepting duplicate
registrations to avoid infinite loops
Link: http://lkml.kernel.org/r/1568861888-34045-2-git-send-email-nixiaoming@huawei.com
Signed-off-by: Xiaoming Ni <nixiaoming@huawei.com>
Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Anna Schumaker <anna.schumaker@netapp.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: J. Bruce Fields <bfields@fieldses.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Nadia Derbey <Nadia.Derbey@bull.net>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Sam Protsenko <semen.protsenko@linaro.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Xiaoming Ni <nixiaoming@huawei.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-05 03:50:39 +03:00
if ( unlikely ( ( * nl ) = = n ) ) {
2021-12-02 00:28:14 +03:00
WARN ( 1 , " notifier callback %ps already registered " ,
n - > notifier_call ) ;
return - EEXIST ;
kernel/notifier.c: intercept duplicate registrations to avoid infinite loops
Registering the same notifier to a hook repeatedly can cause the hook
list to form a ring or lose other members of the list.
case1: An infinite loop in notifier_chain_register() can cause soft lockup
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test2);
case2: An infinite loop in notifier_chain_register() can cause soft lockup
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_call_chain(&test_notifier_list, 0, NULL);
case3: lose other hook test2
atomic_notifier_chain_register(&test_notifier_list, &test1);
atomic_notifier_chain_register(&test_notifier_list, &test2);
atomic_notifier_chain_register(&test_notifier_list, &test1);
case4: Unregister returns 0, but the hook is still in the linked list,
and it is not really registered. If you call
notifier_call_chain after ko is unloaded, it will trigger oops.
If the system is configured with softlockup_panic and the same hook is
repeatedly registered on the panic_notifier_list, it will cause a loop
panic.
Add a check in notifier_chain_register(), intercepting duplicate
registrations to avoid infinite loops
Link: http://lkml.kernel.org/r/1568861888-34045-2-git-send-email-nixiaoming@huawei.com
Signed-off-by: Xiaoming Ni <nixiaoming@huawei.com>
Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Anna Schumaker <anna.schumaker@netapp.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: J. Bruce Fields <bfields@fieldses.org>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jeff Layton <jlayton@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Nadia Derbey <Nadia.Derbey@bull.net>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Sam Protsenko <semen.protsenko@linaro.org>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Xiaoming Ni <nixiaoming@huawei.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-12-05 03:50:39 +03:00
}
2007-10-19 10:39:16 +04:00
if ( n - > priority > ( * nl ) - > priority )
break ;
2022-05-10 02:32:10 +03:00
if ( n - > priority = = ( * nl ) - > priority & & unique_priority )
return - EBUSY ;
2007-10-19 10:39:16 +04:00
nl = & ( ( * nl ) - > next ) ;
}
n - > next = * nl ;
rcu_assign_pointer ( * nl , n ) ;
2023-03-14 23:00:58 +03:00
trace_notifier_register ( ( void * ) n - > notifier_call ) ;
2007-10-19 10:39:16 +04:00
return 0 ;
}
static int notifier_chain_unregister ( struct notifier_block * * nl ,
struct notifier_block * n )
{
while ( ( * nl ) ! = NULL ) {
if ( ( * nl ) = = n ) {
rcu_assign_pointer ( * nl , n - > next ) ;
2023-03-14 23:00:58 +03:00
trace_notifier_unregister ( ( void * ) n - > notifier_call ) ;
2007-10-19 10:39:16 +04:00
return 0 ;
}
nl = & ( ( * nl ) - > next ) ;
}
return - ENOENT ;
}
/**
* notifier_call_chain - Informs the registered notifiers about an event .
* @ nl : Pointer to head of the blocking notifier chain
* @ val : Value passed unmodified to notifier function
* @ v : Pointer passed unmodified to notifier function
* @ nr_to_call : Number of notifier functions to be called . Don ' t care
* value of this parameter is - 1.
* @ nr_calls : Records the number of notifications sent . Don ' t care
* value of this field is NULL .
2022-11-07 13:56:57 +03:00
* Return : notifier_call_chain returns the value returned by the
2007-10-19 10:39:16 +04:00
* last notifier function called .
*/
2014-04-17 12:18:35 +04:00
static int notifier_call_chain ( struct notifier_block * * nl ,
unsigned long val , void * v ,
int nr_to_call , int * nr_calls )
2007-10-19 10:39:16 +04:00
{
int ret = NOTIFY_DONE ;
struct notifier_block * nb , * next_nb ;
2010-02-23 04:04:50 +03:00
nb = rcu_dereference_raw ( * nl ) ;
2007-10-19 10:39:16 +04:00
while ( nb & & nr_to_call ) {
2010-02-23 04:04:50 +03:00
next_nb = rcu_dereference_raw ( nb - > next ) ;
2008-08-16 02:29:38 +04:00
# ifdef CONFIG_DEBUG_NOTIFIERS
2008-08-16 02:29:38 +04:00
if ( unlikely ( ! func_ptr_is_kernel_text ( nb - > notifier_call ) ) ) {
2008-08-16 02:29:38 +04:00
WARN ( 1 , " Invalid notifier called! " ) ;
nb = next_nb ;
continue ;
}
# endif
2023-03-14 23:00:58 +03:00
trace_notifier_run ( ( void * ) nb - > notifier_call ) ;
2007-10-19 10:39:16 +04:00
ret = nb - > notifier_call ( nb , val , v ) ;
if ( nr_calls )
( * nr_calls ) + + ;
2017-02-25 02:00:44 +03:00
if ( ret & NOTIFY_STOP_MASK )
2007-10-19 10:39:16 +04:00
break ;
nb = next_nb ;
nr_to_call - - ;
}
return ret ;
}
2014-04-17 12:18:35 +04:00
NOKPROBE_SYMBOL ( notifier_call_chain ) ;
2007-10-19 10:39:16 +04:00
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
/**
* notifier_call_chain_robust - Inform the registered notifiers about an event
* and rollback on error .
* @ nl : Pointer to head of the blocking notifier chain
* @ val_up : Value passed unmodified to the notifier function
* @ val_down : Value passed unmodified to the notifier function when recovering
* from an error on @ val_up
2022-11-07 13:56:57 +03:00
* @ v : Pointer passed unmodified to the notifier function
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
*
* NOTE : It is important the @ nl chain doesn ' t change between the two
* invocations of notifier_call_chain ( ) such that we visit the
* exact same notifier callbacks ; this rules out any RCU usage .
*
2022-11-07 13:56:57 +03:00
* Return : the return value of the @ val_up call .
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
*/
static int notifier_call_chain_robust ( struct notifier_block * * nl ,
unsigned long val_up , unsigned long val_down ,
void * v )
{
int ret , nr = 0 ;
ret = notifier_call_chain ( nl , val_up , v , - 1 , & nr ) ;
if ( ret & NOTIFY_STOP_MASK )
notifier_call_chain ( nl , val_down , v , nr - 1 , NULL ) ;
return ret ;
}
2007-10-19 10:39:16 +04:00
/*
* Atomic notifier chain routines . Registration and unregistration
* use a spinlock , and call_chain is synchronized by RCU ( no locks ) .
*/
/**
* atomic_notifier_chain_register - Add notifier to an atomic notifier chain
* @ nh : Pointer to head of the atomic notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to an atomic notifier chain .
*
2021-12-02 00:28:14 +03:00
* Returns 0 on success , % - EEXIST on error .
2007-10-19 10:39:16 +04:00
*/
int atomic_notifier_chain_register ( struct atomic_notifier_head * nh ,
struct notifier_block * n )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & nh - > lock , flags ) ;
2022-05-10 02:32:10 +03:00
ret = notifier_chain_register ( & nh - > head , n , false ) ;
2007-10-19 10:39:16 +04:00
spin_unlock_irqrestore ( & nh - > lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( atomic_notifier_chain_register ) ;
2022-05-10 02:32:10 +03:00
/**
* atomic_notifier_chain_register_unique_prio - Add notifier to an atomic notifier chain
* @ nh : Pointer to head of the atomic notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to an atomic notifier chain if there is no other
* notifier registered using the same priority .
*
* Returns 0 on success , % - EEXIST or % - EBUSY on error .
*/
int atomic_notifier_chain_register_unique_prio ( struct atomic_notifier_head * nh ,
struct notifier_block * n )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & nh - > lock , flags ) ;
ret = notifier_chain_register ( & nh - > head , n , true ) ;
spin_unlock_irqrestore ( & nh - > lock , flags ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( atomic_notifier_chain_register_unique_prio ) ;
2007-10-19 10:39:16 +04:00
/**
* atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
* @ nh : Pointer to head of the atomic notifier chain
* @ n : Entry to remove from notifier chain
*
* Removes a notifier from an atomic notifier chain .
*
* Returns zero on success or % - ENOENT on failure .
*/
int atomic_notifier_chain_unregister ( struct atomic_notifier_head * nh ,
struct notifier_block * n )
{
unsigned long flags ;
int ret ;
spin_lock_irqsave ( & nh - > lock , flags ) ;
ret = notifier_chain_unregister ( & nh - > head , n ) ;
spin_unlock_irqrestore ( & nh - > lock , flags ) ;
synchronize_rcu ( ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( atomic_notifier_chain_unregister ) ;
/**
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
* atomic_notifier_call_chain - Call functions in an atomic notifier chain
2007-10-19 10:39:16 +04:00
* @ nh : Pointer to head of the atomic notifier chain
* @ val : Value passed unmodified to notifier function
* @ v : Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn . The functions
* run in an atomic context , so they must not block .
* This routine uses RCU to synchronize with changes to the chain .
*
* If the return value of the notifier can be and ' ed
* with % NOTIFY_STOP_MASK then atomic_notifier_call_chain ( )
* will return immediately , with the return value of
* the notifier function which halted execution .
* Otherwise the return value is the return value
* of the last notifier function called .
*/
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int atomic_notifier_call_chain ( struct atomic_notifier_head * nh ,
unsigned long val , void * v )
2007-10-19 10:39:16 +04:00
{
int ret ;
rcu_read_lock ( ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
ret = notifier_call_chain ( & nh - > head , val , v , - 1 , NULL ) ;
2007-10-19 10:39:16 +04:00
rcu_read_unlock ( ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return ret ;
2007-10-19 10:39:16 +04:00
}
EXPORT_SYMBOL_GPL ( atomic_notifier_call_chain ) ;
2014-04-17 12:18:35 +04:00
NOKPROBE_SYMBOL ( atomic_notifier_call_chain ) ;
2007-10-19 10:39:16 +04:00
2022-05-19 14:02:32 +03:00
/**
* atomic_notifier_call_chain_is_empty - Check whether notifier chain is empty
* @ nh : Pointer to head of the atomic notifier chain
*
* Checks whether notifier chain is empty .
*
* Returns true is notifier chain is empty , false otherwise .
*/
bool atomic_notifier_call_chain_is_empty ( struct atomic_notifier_head * nh )
{
return ! rcu_access_pointer ( nh - > head ) ;
}
2007-10-19 10:39:16 +04:00
/*
* Blocking notifier chain routines . All access to the chain is
* synchronized by an rwsem .
*/
2022-05-10 02:32:10 +03:00
static int __blocking_notifier_chain_register ( struct blocking_notifier_head * nh ,
struct notifier_block * n ,
bool unique_priority )
2007-10-19 10:39:16 +04:00
{
int ret ;
/*
* This code gets used during boot - up , when task switching is
* not yet working and interrupts must remain disabled . At
* such times we must not call down_write ( ) .
*/
if ( unlikely ( system_state = = SYSTEM_BOOTING ) )
2022-05-10 02:32:10 +03:00
return notifier_chain_register ( & nh - > head , n , unique_priority ) ;
2007-10-19 10:39:16 +04:00
down_write ( & nh - > rwsem ) ;
2022-05-10 02:32:10 +03:00
ret = notifier_chain_register ( & nh - > head , n , unique_priority ) ;
2007-10-19 10:39:16 +04:00
up_write ( & nh - > rwsem ) ;
return ret ;
}
2022-05-10 02:32:10 +03:00
/**
* blocking_notifier_chain_register - Add notifier to a blocking notifier chain
* @ nh : Pointer to head of the blocking notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to a blocking notifier chain .
* Must be called in process context .
*
* Returns 0 on success , % - EEXIST on error .
*/
int blocking_notifier_chain_register ( struct blocking_notifier_head * nh ,
struct notifier_block * n )
{
return __blocking_notifier_chain_register ( nh , n , false ) ;
}
2007-10-19 10:39:16 +04:00
EXPORT_SYMBOL_GPL ( blocking_notifier_chain_register ) ;
2022-05-10 02:32:10 +03:00
/**
* blocking_notifier_chain_register_unique_prio - Add notifier to a blocking notifier chain
* @ nh : Pointer to head of the blocking notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to an blocking notifier chain if there is no other
* notifier registered using the same priority .
*
* Returns 0 on success , % - EEXIST or % - EBUSY on error .
*/
int blocking_notifier_chain_register_unique_prio ( struct blocking_notifier_head * nh ,
struct notifier_block * n )
{
return __blocking_notifier_chain_register ( nh , n , true ) ;
}
EXPORT_SYMBOL_GPL ( blocking_notifier_chain_register_unique_prio ) ;
2007-10-19 10:39:16 +04:00
/**
* blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
* @ nh : Pointer to head of the blocking notifier chain
* @ n : Entry to remove from notifier chain
*
* Removes a notifier from a blocking notifier chain .
* Must be called from process context .
*
* Returns zero on success or % - ENOENT on failure .
*/
int blocking_notifier_chain_unregister ( struct blocking_notifier_head * nh ,
struct notifier_block * n )
{
int ret ;
/*
* This code gets used during boot - up , when task switching is
* not yet working and interrupts must remain disabled . At
* such times we must not call down_write ( ) .
*/
if ( unlikely ( system_state = = SYSTEM_BOOTING ) )
return notifier_chain_unregister ( & nh - > head , n ) ;
down_write ( & nh - > rwsem ) ;
ret = notifier_chain_unregister ( & nh - > head , n ) ;
up_write ( & nh - > rwsem ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( blocking_notifier_chain_unregister ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int blocking_notifier_call_chain_robust ( struct blocking_notifier_head * nh ,
unsigned long val_up , unsigned long val_down , void * v )
{
int ret = NOTIFY_DONE ;
/*
* We check the head outside the lock , but if this access is
* racy then it does not matter what the result of the test
* is , we re - check the list after having taken the lock anyway :
*/
if ( rcu_access_pointer ( nh - > head ) ) {
down_read ( & nh - > rwsem ) ;
ret = notifier_call_chain_robust ( & nh - > head , val_up , val_down , v ) ;
up_read ( & nh - > rwsem ) ;
}
return ret ;
}
EXPORT_SYMBOL_GPL ( blocking_notifier_call_chain_robust ) ;
2007-10-19 10:39:16 +04:00
/**
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
* blocking_notifier_call_chain - Call functions in a blocking notifier chain
2007-10-19 10:39:16 +04:00
* @ nh : Pointer to head of the blocking notifier chain
* @ val : Value passed unmodified to notifier function
* @ v : Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn . The functions
* run in a process context , so they are allowed to block .
*
* If the return value of the notifier can be and ' ed
* with % NOTIFY_STOP_MASK then blocking_notifier_call_chain ( )
* will return immediately , with the return value of
* the notifier function which halted execution .
* Otherwise the return value is the return value
* of the last notifier function called .
*/
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int blocking_notifier_call_chain ( struct blocking_notifier_head * nh ,
unsigned long val , void * v )
2007-10-19 10:39:16 +04:00
{
int ret = NOTIFY_DONE ;
/*
* We check the head outside the lock , but if this access is
* racy then it does not matter what the result of the test
* is , we re - check the list after having taken the lock anyway :
*/
2014-02-13 01:02:22 +04:00
if ( rcu_access_pointer ( nh - > head ) ) {
2007-10-19 10:39:16 +04:00
down_read ( & nh - > rwsem ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
ret = notifier_call_chain ( & nh - > head , val , v , - 1 , NULL ) ;
2007-10-19 10:39:16 +04:00
up_read ( & nh - > rwsem ) ;
}
return ret ;
}
EXPORT_SYMBOL_GPL ( blocking_notifier_call_chain ) ;
/*
* Raw notifier chain routines . There is no protection ;
* the caller must provide it . Use at your own risk !
*/
/**
* raw_notifier_chain_register - Add notifier to a raw notifier chain
* @ nh : Pointer to head of the raw notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to a raw notifier chain .
* All locking must be provided by the caller .
*
2021-12-02 00:28:14 +03:00
* Returns 0 on success , % - EEXIST on error .
2007-10-19 10:39:16 +04:00
*/
int raw_notifier_chain_register ( struct raw_notifier_head * nh ,
struct notifier_block * n )
{
2022-05-10 02:32:10 +03:00
return notifier_chain_register ( & nh - > head , n , false ) ;
2007-10-19 10:39:16 +04:00
}
EXPORT_SYMBOL_GPL ( raw_notifier_chain_register ) ;
/**
* raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
* @ nh : Pointer to head of the raw notifier chain
* @ n : Entry to remove from notifier chain
*
* Removes a notifier from a raw notifier chain .
* All locking must be provided by the caller .
*
* Returns zero on success or % - ENOENT on failure .
*/
int raw_notifier_chain_unregister ( struct raw_notifier_head * nh ,
struct notifier_block * n )
{
return notifier_chain_unregister ( & nh - > head , n ) ;
}
EXPORT_SYMBOL_GPL ( raw_notifier_chain_unregister ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int raw_notifier_call_chain_robust ( struct raw_notifier_head * nh ,
unsigned long val_up , unsigned long val_down , void * v )
{
return notifier_call_chain_robust ( & nh - > head , val_up , val_down , v ) ;
}
EXPORT_SYMBOL_GPL ( raw_notifier_call_chain_robust ) ;
2007-10-19 10:39:16 +04:00
/**
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
* raw_notifier_call_chain - Call functions in a raw notifier chain
2007-10-19 10:39:16 +04:00
* @ nh : Pointer to head of the raw notifier chain
* @ val : Value passed unmodified to notifier function
* @ v : Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn . The functions
* run in an undefined context .
* All locking must be provided by the caller .
*
* If the return value of the notifier can be and ' ed
* with % NOTIFY_STOP_MASK then raw_notifier_call_chain ( )
* will return immediately , with the return value of
* the notifier function which halted execution .
* Otherwise the return value is the return value
* of the last notifier function called .
*/
int raw_notifier_call_chain ( struct raw_notifier_head * nh ,
unsigned long val , void * v )
{
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return notifier_call_chain ( & nh - > head , val , v , - 1 , NULL ) ;
2007-10-19 10:39:16 +04:00
}
EXPORT_SYMBOL_GPL ( raw_notifier_call_chain ) ;
/*
* SRCU notifier chain routines . Registration and unregistration
* use a mutex , and call_chain is synchronized by SRCU ( no locks ) .
*/
/**
* srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
* @ nh : Pointer to head of the SRCU notifier chain
* @ n : New entry in notifier chain
*
* Adds a notifier to an SRCU notifier chain .
* Must be called in process context .
*
2021-12-02 00:28:14 +03:00
* Returns 0 on success , % - EEXIST on error .
2007-10-19 10:39:16 +04:00
*/
int srcu_notifier_chain_register ( struct srcu_notifier_head * nh ,
struct notifier_block * n )
{
int ret ;
/*
* This code gets used during boot - up , when task switching is
* not yet working and interrupts must remain disabled . At
* such times we must not call mutex_lock ( ) .
*/
if ( unlikely ( system_state = = SYSTEM_BOOTING ) )
2022-05-10 02:32:10 +03:00
return notifier_chain_register ( & nh - > head , n , false ) ;
2007-10-19 10:39:16 +04:00
mutex_lock ( & nh - > mutex ) ;
2022-05-10 02:32:10 +03:00
ret = notifier_chain_register ( & nh - > head , n , false ) ;
2007-10-19 10:39:16 +04:00
mutex_unlock ( & nh - > mutex ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( srcu_notifier_chain_register ) ;
/**
* srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
* @ nh : Pointer to head of the SRCU notifier chain
* @ n : Entry to remove from notifier chain
*
* Removes a notifier from an SRCU notifier chain .
* Must be called from process context .
*
* Returns zero on success or % - ENOENT on failure .
*/
int srcu_notifier_chain_unregister ( struct srcu_notifier_head * nh ,
struct notifier_block * n )
{
int ret ;
/*
* This code gets used during boot - up , when task switching is
* not yet working and interrupts must remain disabled . At
* such times we must not call mutex_lock ( ) .
*/
if ( unlikely ( system_state = = SYSTEM_BOOTING ) )
return notifier_chain_unregister ( & nh - > head , n ) ;
mutex_lock ( & nh - > mutex ) ;
ret = notifier_chain_unregister ( & nh - > head , n ) ;
mutex_unlock ( & nh - > mutex ) ;
synchronize_srcu ( & nh - > srcu ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( srcu_notifier_chain_unregister ) ;
/**
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
* srcu_notifier_call_chain - Call functions in an SRCU notifier chain
2007-10-19 10:39:16 +04:00
* @ nh : Pointer to head of the SRCU notifier chain
* @ val : Value passed unmodified to notifier function
* @ v : Pointer passed unmodified to notifier function
*
* Calls each function in a notifier chain in turn . The functions
* run in a process context , so they are allowed to block .
*
* If the return value of the notifier can be and ' ed
* with % NOTIFY_STOP_MASK then srcu_notifier_call_chain ( )
* will return immediately , with the return value of
* the notifier function which halted execution .
* Otherwise the return value is the return value
* of the last notifier function called .
*/
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int srcu_notifier_call_chain ( struct srcu_notifier_head * nh ,
unsigned long val , void * v )
2007-10-19 10:39:16 +04:00
{
int ret ;
int idx ;
idx = srcu_read_lock ( & nh - > srcu ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
ret = notifier_call_chain ( & nh - > head , val , v , - 1 , NULL ) ;
2007-10-19 10:39:16 +04:00
srcu_read_unlock ( & nh - > srcu , idx ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( srcu_notifier_call_chain ) ;
/**
* srcu_init_notifier_head - Initialize an SRCU notifier head
* @ nh : Pointer to head of the srcu notifier chain
*
* Unlike other sorts of notifier heads , SRCU notifier heads require
* dynamic initialization . Be sure to call this routine before
* calling any of the other SRCU notifier routines for this head .
*
* If an SRCU notifier head is deallocated , it must first be cleaned
* up by calling srcu_cleanup_notifier_head ( ) . Otherwise the head ' s
* per - cpu data ( used by the SRCU mechanism ) will leak .
*/
void srcu_init_notifier_head ( struct srcu_notifier_head * nh )
{
mutex_init ( & nh - > mutex ) ;
if ( init_srcu_struct ( & nh - > srcu ) < 0 )
BUG ( ) ;
nh - > head = NULL ;
}
EXPORT_SYMBOL_GPL ( srcu_init_notifier_head ) ;
static ATOMIC_NOTIFIER_HEAD ( die_chain ) ;
2014-04-17 12:18:35 +04:00
int notrace notify_die ( enum die_val val , const char * str ,
2007-10-19 10:39:16 +04:00
struct pt_regs * regs , long err , int trap , int sig )
{
struct die_args args = {
. regs = regs ,
. str = str ,
. err = err ,
. trapnr = trap ,
. signr = sig ,
} ;
2015-09-01 18:40:25 +03:00
RCU_LOCKDEP_WARN ( ! rcu_is_watching ( ) ,
2015-07-03 22:44:22 +03:00
" notify_die called but RCU thinks we're quiescent " ) ;
2007-10-19 10:39:16 +04:00
return atomic_notifier_call_chain ( & die_chain , val , & args ) ;
}
2014-04-17 12:18:35 +04:00
NOKPROBE_SYMBOL ( notify_die ) ;
2007-10-19 10:39:16 +04:00
int register_die_notifier ( struct notifier_block * nb )
{
return atomic_notifier_chain_register ( & die_chain , nb ) ;
}
EXPORT_SYMBOL_GPL ( register_die_notifier ) ;
int unregister_die_notifier ( struct notifier_block * nb )
{
return atomic_notifier_chain_unregister ( & die_chain , nb ) ;
}
EXPORT_SYMBOL_GPL ( unregister_die_notifier ) ;