2019-05-29 17:17:56 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2011-02-10 13:04:45 +03:00
/*
* Copyright ( C ) 2011 Google , Inc .
*
* Author :
* Colin Cross < ccross @ android . com >
*/
# include <linux/kernel.h>
# include <linux/cpu_pm.h>
# include <linux/module.h>
# include <linux/notifier.h>
# include <linux/spinlock.h>
2011-07-23 01:57:09 +04:00
# include <linux/syscore_ops.h>
2011-02-10 13:04:45 +03:00
2021-08-11 23:14:31 +03:00
/*
* atomic_notifiers use a spinlock_t , which can block under PREEMPT_RT .
* Notifications for cpu_pm will be issued by the idle task itself , which can
* never block , IOW it requires using a raw_spinlock_t .
*/
static struct {
struct raw_notifier_head chain ;
raw_spinlock_t lock ;
} cpu_pm_notifier = {
. chain = RAW_NOTIFIER_INIT ( cpu_pm_notifier . chain ) ,
. lock = __RAW_SPIN_LOCK_UNLOCKED ( cpu_pm_notifier . lock ) ,
} ;
2011-02-10 13:04:45 +03:00
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
static int cpu_pm_notify ( enum cpu_pm_event event )
2011-02-10 13:04:45 +03:00
{
int ret ;
2021-08-11 23:14:31 +03:00
rcu_read_lock ( ) ;
ret = raw_notifier_call_chain ( & cpu_pm_notifier . chain , event , NULL ) ;
rcu_read_unlock ( ) ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return notifier_to_errno ( ret ) ;
}
static int cpu_pm_notify_robust ( enum cpu_pm_event event_up , enum cpu_pm_event event_down )
{
2021-08-11 23:14:31 +03:00
unsigned long flags ;
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
int ret ;
2021-08-11 23:14:31 +03:00
raw_spin_lock_irqsave ( & cpu_pm_notifier . lock , flags ) ;
ret = raw_notifier_call_chain_robust ( & cpu_pm_notifier . chain , event_up , event_down , NULL ) ;
raw_spin_unlock_irqrestore ( & cpu_pm_notifier . lock , flags ) ;
2011-02-10 13:04:45 +03:00
return notifier_to_errno ( ret ) ;
}
/**
* cpu_pm_register_notifier - register a driver with cpu_pm
* @ nb : notifier block to register
*
* Add a driver to a list of drivers that are notified about
* CPU and CPU cluster low power entry and exit .
*
2021-08-11 23:14:31 +03:00
* This function has the same return conditions as raw_notifier_chain_register .
2011-02-10 13:04:45 +03:00
*/
int cpu_pm_register_notifier ( struct notifier_block * nb )
{
2021-08-11 23:14:31 +03:00
unsigned long flags ;
int ret ;
raw_spin_lock_irqsave ( & cpu_pm_notifier . lock , flags ) ;
ret = raw_notifier_chain_register ( & cpu_pm_notifier . chain , nb ) ;
raw_spin_unlock_irqrestore ( & cpu_pm_notifier . lock , flags ) ;
return ret ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_pm_register_notifier ) ;
/**
* cpu_pm_unregister_notifier - unregister a driver with cpu_pm
* @ nb : notifier block to be unregistered
*
* Remove a driver from the CPU PM notifier list .
*
2021-08-11 23:14:31 +03:00
* This function has the same return conditions as raw_notifier_chain_unregister .
2011-02-10 13:04:45 +03:00
*/
int cpu_pm_unregister_notifier ( struct notifier_block * nb )
{
2021-08-11 23:14:31 +03:00
unsigned long flags ;
int ret ;
raw_spin_lock_irqsave ( & cpu_pm_notifier . lock , flags ) ;
ret = raw_notifier_chain_unregister ( & cpu_pm_notifier . chain , nb ) ;
raw_spin_unlock_irqrestore ( & cpu_pm_notifier . lock , flags ) ;
return ret ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_pm_unregister_notifier ) ;
/**
2012-06-01 03:26:07 +04:00
* cpu_pm_enter - CPU low power entry notifier
2011-02-10 13:04:45 +03:00
*
* Notifies listeners that a single CPU is entering a low power state that may
* cause some blocks in the same power domain as the cpu to reset .
*
* Must be called on the affected CPU with interrupts disabled . Platform is
* responsible for ensuring that cpu_pm_enter is not called twice on the same
* CPU before cpu_pm_exit is called . Notified drivers can include VFP
2012-06-01 03:26:07 +04:00
* co - processor , interrupt controller and its PM extensions , local CPU
2011-02-10 13:04:45 +03:00
* timers context save / restore which shouldn ' t be interrupted . Hence it
* must be called with interrupts disabled .
*
* Return conditions are same as __raw_notifier_call_chain .
*/
int cpu_pm_enter ( void )
{
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return cpu_pm_notify_robust ( CPU_PM_ENTER , CPU_PM_ENTER_FAILED ) ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_pm_enter ) ;
/**
2012-06-01 03:26:07 +04:00
* cpu_pm_exit - CPU low power exit notifier
2011-02-10 13:04:45 +03:00
*
* Notifies listeners that a single CPU is exiting a low power state that may
* have caused some blocks in the same power domain as the cpu to reset .
*
* Notified drivers can include VFP co - processor , interrupt controller
2012-06-01 03:26:07 +04:00
* and its PM extensions , local CPU timers context save / restore which
2011-02-10 13:04:45 +03:00
* shouldn ' t be interrupted . Hence it must be called with interrupts disabled .
*
* Return conditions are same as __raw_notifier_call_chain .
*/
int cpu_pm_exit ( void )
{
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return cpu_pm_notify ( CPU_PM_EXIT ) ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_pm_exit ) ;
/**
2012-06-01 03:26:07 +04:00
* cpu_cluster_pm_enter - CPU cluster low power entry notifier
2011-02-10 13:04:45 +03:00
*
* Notifies listeners that all cpus in a power domain are entering a low power
* state that may cause some blocks in the same power domain to reset .
*
* Must be called after cpu_pm_enter has been called on all cpus in the power
* domain , and before cpu_pm_exit has been called on any cpu in the power
* domain . Notified drivers can include VFP co - processor , interrupt controller
2012-06-01 03:26:07 +04:00
* and its PM extensions , local CPU timers context save / restore which
2011-02-10 13:04:45 +03:00
* shouldn ' t be interrupted . Hence it must be called with interrupts disabled .
*
* Must be called with interrupts disabled .
*
* Return conditions are same as __raw_notifier_call_chain .
*/
int cpu_cluster_pm_enter ( void )
{
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return cpu_pm_notify_robust ( CPU_CLUSTER_PM_ENTER , CPU_CLUSTER_PM_ENTER_FAILED ) ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_cluster_pm_enter ) ;
/**
2012-06-01 03:26:07 +04:00
* cpu_cluster_pm_exit - CPU cluster low power exit notifier
2011-02-10 13:04:45 +03:00
*
* Notifies listeners that all cpus in a power domain are exiting form a
* low power state that may have caused some blocks in the same power domain
* to reset .
*
2015-09-03 01:18:57 +03:00
* Must be called after cpu_cluster_pm_enter has been called for the power
2011-02-10 13:04:45 +03:00
* domain , and before cpu_pm_exit has been called on any cpu in the power
* domain . Notified drivers can include VFP co - processor , interrupt controller
2012-06-01 03:26:07 +04:00
* and its PM extensions , local CPU timers context save / restore which
2011-02-10 13:04:45 +03:00
* shouldn ' t be interrupted . Hence it must be called with interrupts disabled .
*
* Return conditions are same as __raw_notifier_call_chain .
*/
int cpu_cluster_pm_exit ( void )
{
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 16:57:36 +03:00
return cpu_pm_notify ( CPU_CLUSTER_PM_EXIT ) ;
2011-02-10 13:04:45 +03:00
}
EXPORT_SYMBOL_GPL ( cpu_cluster_pm_exit ) ;
2011-07-23 01:57:09 +04:00
# ifdef CONFIG_PM
static int cpu_pm_suspend ( void )
{
int ret ;
ret = cpu_pm_enter ( ) ;
if ( ret )
return ret ;
ret = cpu_cluster_pm_enter ( ) ;
return ret ;
}
static void cpu_pm_resume ( void )
{
cpu_cluster_pm_exit ( ) ;
cpu_pm_exit ( ) ;
}
static struct syscore_ops cpu_pm_syscore_ops = {
. suspend = cpu_pm_suspend ,
. resume = cpu_pm_resume ,
} ;
static int cpu_pm_init ( void )
{
register_syscore_ops ( & cpu_pm_syscore_ops ) ;
return 0 ;
}
core_initcall ( cpu_pm_init ) ;
# endif