Merge branch 'acpi-dev-pm' into acpi-enumeration
Subsequent commits in this branch will depend on 'acpi-dev-pm' material.
This commit is contained in:
commit
45c36462ae
@ -204,3 +204,34 @@ Description:
|
||||
|
||||
This attribute has no effect on system-wide suspend/resume and
|
||||
hibernation.
|
||||
|
||||
What: /sys/devices/.../power/pm_qos_no_power_off
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_no_power_off attribute
|
||||
is used for manipulating the PM QoS "no power off" flag. If
|
||||
set, this flag indicates to the kernel that power should not
|
||||
be removed entirely from the device.
|
||||
|
||||
Not all drivers support this attribute. If it isn't supported,
|
||||
it is not present.
|
||||
|
||||
This attribute has no effect on system-wide suspend/resume and
|
||||
hibernation.
|
||||
|
||||
What: /sys/devices/.../power/pm_qos_remote_wakeup
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_remote_wakeup attribute
|
||||
is used for manipulating the PM QoS "remote wakeup required"
|
||||
flag. If set, this flag indicates to the kernel that the
|
||||
device is a source of user events that have to be signaled from
|
||||
its low-power states.
|
||||
|
||||
Not all drivers support this attribute. If it isn't supported,
|
||||
it is not present.
|
||||
|
||||
This attribute has no effect on system-wide suspend/resume and
|
||||
hibernation.
|
||||
|
@ -99,7 +99,7 @@ reading the aggregated value does not require any locking mechanism.
|
||||
|
||||
From kernel mode the use of this interface is the following:
|
||||
|
||||
int dev_pm_qos_add_request(device, handle, value):
|
||||
int dev_pm_qos_add_request(device, handle, type, value):
|
||||
Will insert an element into the list for that identified device with the
|
||||
target value. Upon change to this list the new target is recomputed and any
|
||||
registered notifiers are called only if the target value is now different.
|
||||
|
@ -21,9 +21,10 @@ obj-y += acpi.o \
|
||||
acpi-y += osl.o utils.o reboot.o
|
||||
acpi-y += nvs.o
|
||||
|
||||
# sleep related files
|
||||
# Power management related files
|
||||
acpi-y += wakeup.o
|
||||
acpi-y += sleep.o
|
||||
acpi-$(CONFIG_PM) += device_pm.o
|
||||
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
|
||||
|
||||
|
||||
|
@ -257,7 +257,15 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state)
|
||||
}
|
||||
|
||||
|
||||
static int __acpi_bus_set_power(struct acpi_device *device, int state)
|
||||
/**
|
||||
* acpi_device_set_power - Set power state of an ACPI device.
|
||||
* @device: Device to set the power state of.
|
||||
* @state: New power state to set.
|
||||
*
|
||||
* Callers must ensure that the device is power manageable before using this
|
||||
* function.
|
||||
*/
|
||||
int acpi_device_set_power(struct acpi_device *device, int state)
|
||||
{
|
||||
int result = 0;
|
||||
acpi_status status = AE_OK;
|
||||
@ -298,6 +306,12 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
|
||||
* a lower-powered state.
|
||||
*/
|
||||
if (state < device->power.state) {
|
||||
if (device->power.state >= ACPI_STATE_D3_HOT &&
|
||||
state != ACPI_STATE_D0) {
|
||||
printk(KERN_WARNING PREFIX
|
||||
"Cannot transition to non-D0 state from D3\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (device->power.flags.power_resources) {
|
||||
result = acpi_power_transition(device, state);
|
||||
if (result)
|
||||
@ -341,6 +355,7 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_device_set_power);
|
||||
|
||||
|
||||
int acpi_bus_set_power(acpi_handle handle, int state)
|
||||
@ -359,7 +374,7 @@ int acpi_bus_set_power(acpi_handle handle, int state)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return __acpi_bus_set_power(device, state);
|
||||
return acpi_device_set_power(device, state);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_bus_set_power);
|
||||
|
||||
@ -402,7 +417,7 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p)
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
result = __acpi_bus_set_power(device, state);
|
||||
result = acpi_device_set_power(device, state);
|
||||
if (!result && state_p)
|
||||
*state_p = state;
|
||||
|
||||
|
668
drivers/acpi/device_pm.c
Normal file
668
drivers/acpi/device_pm.c
Normal file
@ -0,0 +1,668 @@
|
||||
/*
|
||||
* drivers/acpi/device_pm.c - ACPI device power management routines.
|
||||
*
|
||||
* Copyright (C) 2012, Intel Corp.
|
||||
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <acpi/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
|
||||
static DEFINE_MUTEX(acpi_pm_notifier_lock);
|
||||
|
||||
/**
|
||||
* acpi_add_pm_notifier - Register PM notifier for given ACPI device.
|
||||
* @adev: ACPI device to add the notifier for.
|
||||
* @context: Context information to pass to the notifier routine.
|
||||
*
|
||||
* NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
|
||||
* PM wakeup events. For example, wakeup events may be generated for bridges
|
||||
* if one of the devices below the bridge is signaling wakeup, even if the
|
||||
* bridge itself doesn't have a wakeup GPE associated with it.
|
||||
*/
|
||||
acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler, void *context)
|
||||
{
|
||||
acpi_status status = AE_ALREADY_EXISTS;
|
||||
|
||||
mutex_lock(&acpi_pm_notifier_lock);
|
||||
|
||||
if (adev->wakeup.flags.notifier_present)
|
||||
goto out;
|
||||
|
||||
status = acpi_install_notify_handler(adev->handle,
|
||||
ACPI_SYSTEM_NOTIFY,
|
||||
handler, context);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto out;
|
||||
|
||||
adev->wakeup.flags.notifier_present = true;
|
||||
|
||||
out:
|
||||
mutex_unlock(&acpi_pm_notifier_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
|
||||
* @adev: ACPI device to remove the notifier from.
|
||||
*/
|
||||
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler)
|
||||
{
|
||||
acpi_status status = AE_BAD_PARAMETER;
|
||||
|
||||
mutex_lock(&acpi_pm_notifier_lock);
|
||||
|
||||
if (!adev->wakeup.flags.notifier_present)
|
||||
goto out;
|
||||
|
||||
status = acpi_remove_notify_handler(adev->handle,
|
||||
ACPI_SYSTEM_NOTIFY,
|
||||
handler);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto out;
|
||||
|
||||
adev->wakeup.flags.notifier_present = false;
|
||||
|
||||
out:
|
||||
mutex_unlock(&acpi_pm_notifier_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_device_power_state - Get preferred power state of ACPI device.
|
||||
* @dev: Device whose preferred target power state to return.
|
||||
* @adev: ACPI device node corresponding to @dev.
|
||||
* @target_state: System state to match the resultant device state.
|
||||
* @d_max_in: Deepest low-power state to take into consideration.
|
||||
* @d_min_p: Location to store the upper limit of the allowed states range.
|
||||
* Return value: Preferred power state of the device on success, -ENODEV
|
||||
* (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
|
||||
*
|
||||
* Find the lowest power (highest number) ACPI device power state that the
|
||||
* device can be in while the system is in the state represented by
|
||||
* @target_state. If @d_min_p is set, the highest power (lowest number) device
|
||||
* power state that @dev can be in for the given system sleep state is stored
|
||||
* at the location pointed to by it.
|
||||
*
|
||||
* Callers must ensure that @dev and @adev are valid pointers and that @adev
|
||||
* actually corresponds to @dev before using this function.
|
||||
*/
|
||||
int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
|
||||
u32 target_state, int d_max_in, int *d_min_p)
|
||||
{
|
||||
char acpi_method[] = "_SxD";
|
||||
unsigned long long d_min, d_max;
|
||||
bool wakeup = false;
|
||||
|
||||
if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
|
||||
return -EINVAL;
|
||||
|
||||
if (d_max_in > ACPI_STATE_D3_HOT) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(dev, PM_QOS_FLAG_NO_POWER_OFF);
|
||||
if (stat == PM_QOS_FLAGS_ALL)
|
||||
d_max_in = ACPI_STATE_D3_HOT;
|
||||
}
|
||||
|
||||
acpi_method[2] = '0' + target_state;
|
||||
/*
|
||||
* If the sleep state is S0, the lowest limit from ACPI is D3,
|
||||
* but if the device has _S0W, we will use the value from _S0W
|
||||
* as the lowest limit from ACPI. Finally, we will constrain
|
||||
* the lowest limit with the specified one.
|
||||
*/
|
||||
d_min = ACPI_STATE_D0;
|
||||
d_max = ACPI_STATE_D3;
|
||||
|
||||
/*
|
||||
* If present, _SxD methods return the minimum D-state (highest power
|
||||
* state) we can use for the corresponding S-states. Otherwise, the
|
||||
* minimum D-state is D0 (ACPI 3.x).
|
||||
*
|
||||
* NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
|
||||
* provided -- that's our fault recovery, we ignore retval.
|
||||
*/
|
||||
if (target_state > ACPI_STATE_S0) {
|
||||
acpi_evaluate_integer(adev->handle, acpi_method, NULL, &d_min);
|
||||
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
|
||||
&& adev->wakeup.sleep_state >= target_state;
|
||||
} else if (dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) !=
|
||||
PM_QOS_FLAGS_NONE) {
|
||||
wakeup = adev->wakeup.flags.valid;
|
||||
}
|
||||
|
||||
/*
|
||||
* If _PRW says we can wake up the system from the target sleep state,
|
||||
* the D-state returned by _SxD is sufficient for that (we assume a
|
||||
* wakeup-aware driver if wake is set). Still, if _SxW exists
|
||||
* (ACPI 3.x), it should return the maximum (lowest power) D-state that
|
||||
* can wake the system. _S0W may be valid, too.
|
||||
*/
|
||||
if (wakeup) {
|
||||
acpi_status status;
|
||||
|
||||
acpi_method[3] = 'W';
|
||||
status = acpi_evaluate_integer(adev->handle, acpi_method, NULL,
|
||||
&d_max);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (target_state != ACPI_STATE_S0 ||
|
||||
status != AE_NOT_FOUND)
|
||||
d_max = d_min;
|
||||
} else if (d_max < d_min) {
|
||||
/* Warn the user of the broken DSDT */
|
||||
printk(KERN_WARNING "ACPI: Wrong value from %s\n",
|
||||
acpi_method);
|
||||
/* Sanitize it */
|
||||
d_min = d_max;
|
||||
}
|
||||
}
|
||||
|
||||
if (d_max_in < d_min)
|
||||
return -EINVAL;
|
||||
if (d_min_p)
|
||||
*d_min_p = d_min;
|
||||
/* constrain d_max with specified lowest limit (max number) */
|
||||
if (d_max > d_max_in) {
|
||||
for (d_max = d_max_in; d_max > d_min; d_max--) {
|
||||
if (adev->power.states[d_max].flags.valid)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return d_max;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_device_power_state);
|
||||
|
||||
/**
|
||||
* acpi_pm_device_sleep_state - Get preferred power state of ACPI device.
|
||||
* @dev: Device whose preferred target power state to return.
|
||||
* @d_min_p: Location to store the upper limit of the allowed states range.
|
||||
* @d_max_in: Deepest low-power state to take into consideration.
|
||||
* Return value: Preferred power state of the device on success, -ENODEV
|
||||
* (if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
|
||||
*
|
||||
* The caller must ensure that @dev is valid before using this function.
|
||||
*/
|
||||
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
|
||||
{
|
||||
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
|
||||
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return acpi_device_power_state(dev, adev, acpi_target_system_state(),
|
||||
d_max_in, d_min_p);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/**
|
||||
* acpi_wakeup_device - Wakeup notification handler for ACPI devices.
|
||||
* @handle: ACPI handle of the device the notification is for.
|
||||
* @event: Type of the signaled event.
|
||||
* @context: Device corresponding to @handle.
|
||||
*/
|
||||
static void acpi_wakeup_device(acpi_handle handle, u32 event, void *context)
|
||||
{
|
||||
struct device *dev = context;
|
||||
|
||||
if (event == ACPI_NOTIFY_DEVICE_WAKE && dev) {
|
||||
pm_wakeup_event(dev, 0);
|
||||
pm_runtime_resume(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __acpi_device_run_wake - Enable/disable runtime remote wakeup for device.
|
||||
* @adev: ACPI device to enable/disable the remote wakeup for.
|
||||
* @enable: Whether to enable or disable the wakeup functionality.
|
||||
*
|
||||
* Enable/disable the GPE associated with @adev so that it can generate
|
||||
* wakeup signals for the device in response to external (remote) events and
|
||||
* enable/disable device wakeup power.
|
||||
*
|
||||
* Callers must ensure that @adev is a valid ACPI device node before executing
|
||||
* this function.
|
||||
*/
|
||||
int __acpi_device_run_wake(struct acpi_device *adev, bool enable)
|
||||
{
|
||||
struct acpi_device_wakeup *wakeup = &adev->wakeup;
|
||||
|
||||
if (enable) {
|
||||
acpi_status res;
|
||||
int error;
|
||||
|
||||
error = acpi_enable_wakeup_device_power(adev, ACPI_STATE_S0);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
|
||||
if (ACPI_FAILURE(res)) {
|
||||
acpi_disable_wakeup_device_power(adev);
|
||||
return -EIO;
|
||||
}
|
||||
} else {
|
||||
acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
|
||||
acpi_disable_wakeup_device_power(adev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_pm_device_run_wake - Enable/disable remote wakeup for given device.
|
||||
* @dev: Device to enable/disable the platform to wake up.
|
||||
* @enable: Whether to enable or disable the wakeup functionality.
|
||||
*/
|
||||
int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
|
||||
{
|
||||
struct acpi_device *adev;
|
||||
acpi_handle handle;
|
||||
|
||||
if (!device_run_wake(phys_dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(phys_dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
|
||||
dev_dbg(phys_dev, "ACPI handle without context in %s!\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return __acpi_device_run_wake(adev, enable);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_pm_device_run_wake);
|
||||
#else
|
||||
static inline void acpi_wakeup_device(acpi_handle handle, u32 event,
|
||||
void *context) {}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* __acpi_device_sleep_wake - Enable or disable device to wake up the system.
|
||||
* @dev: Device to enable/desible to wake up the system.
|
||||
* @target_state: System state the device is supposed to wake up from.
|
||||
* @enable: Whether to enable or disable @dev to wake up the system.
|
||||
*/
|
||||
int __acpi_device_sleep_wake(struct acpi_device *adev, u32 target_state,
|
||||
bool enable)
|
||||
{
|
||||
return enable ?
|
||||
acpi_enable_wakeup_device_power(adev, target_state) :
|
||||
acpi_disable_wakeup_device_power(adev);
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_pm_device_sleep_wake - Enable or disable device to wake up the system.
|
||||
* @dev: Device to enable/desible to wake up the system from sleep states.
|
||||
* @enable: Whether to enable or disable @dev to wake up the system.
|
||||
*/
|
||||
int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
|
||||
{
|
||||
acpi_handle handle;
|
||||
struct acpi_device *adev;
|
||||
int error;
|
||||
|
||||
if (!device_can_wakeup(dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
|
||||
dev_dbg(dev, "ACPI handle without context in %s!\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
error = __acpi_device_sleep_wake(adev, acpi_target_system_state(),
|
||||
enable);
|
||||
if (!error)
|
||||
dev_info(dev, "System wakeup %s by ACPI\n",
|
||||
enable ? "enabled" : "disabled");
|
||||
|
||||
return error;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_get_node - Get ACPI device node for the given physical device.
|
||||
* @dev: Device to get the ACPI node for.
|
||||
*/
|
||||
static struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
|
||||
{
|
||||
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
|
||||
struct acpi_device *adev;
|
||||
|
||||
return handle && ACPI_SUCCESS(acpi_bus_get_device(handle, &adev)) ?
|
||||
adev : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
|
||||
* @dev: Device to put into a low-power state.
|
||||
* @adev: ACPI device node corresponding to @dev.
|
||||
* @system_state: System state to choose the device state for.
|
||||
*/
|
||||
static int acpi_dev_pm_low_power(struct device *dev, struct acpi_device *adev,
|
||||
u32 system_state)
|
||||
{
|
||||
int power_state;
|
||||
|
||||
if (!acpi_device_power_manageable(adev))
|
||||
return 0;
|
||||
|
||||
power_state = acpi_device_power_state(dev, adev, system_state,
|
||||
ACPI_STATE_D3, NULL);
|
||||
if (power_state < ACPI_STATE_D0 || power_state > ACPI_STATE_D3)
|
||||
return -EIO;
|
||||
|
||||
return acpi_device_set_power(adev, power_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_full_power - Put ACPI device into the full-power state.
|
||||
* @adev: ACPI device node to put into the full-power state.
|
||||
*/
|
||||
static int acpi_dev_pm_full_power(struct acpi_device *adev)
|
||||
{
|
||||
return acpi_device_power_manageable(adev) ?
|
||||
acpi_device_set_power(adev, ACPI_STATE_D0) : 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
/**
|
||||
* acpi_dev_runtime_suspend - Put device into a low-power state using ACPI.
|
||||
* @dev: Device to put into a low-power state.
|
||||
*
|
||||
* Put the given device into a runtime low-power state using the standard ACPI
|
||||
* mechanism. Set up remote wakeup if desired, choose the state to put the
|
||||
* device into (this checks if remote wakeup is expected to work too), and set
|
||||
* the power state of the device.
|
||||
*/
|
||||
int acpi_dev_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
bool remote_wakeup;
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
|
||||
PM_QOS_FLAGS_NONE;
|
||||
error = __acpi_device_run_wake(adev, remote_wakeup);
|
||||
if (remote_wakeup && error)
|
||||
return -EAGAIN;
|
||||
|
||||
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
|
||||
if (error)
|
||||
__acpi_device_run_wake(adev, false);
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_runtime_suspend);
|
||||
|
||||
/**
|
||||
* acpi_dev_runtime_resume - Put device into the full-power state using ACPI.
|
||||
* @dev: Device to put into the full-power state.
|
||||
*
|
||||
* Put the given device into the full-power state using the standard ACPI
|
||||
* mechanism at run time. Set the power state of the device to ACPI D0 and
|
||||
* disable remote wakeup.
|
||||
*/
|
||||
int acpi_dev_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
error = acpi_dev_pm_full_power(adev);
|
||||
__acpi_device_run_wake(adev, false);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
|
||||
|
||||
/**
|
||||
* acpi_subsys_runtime_suspend - Suspend device using ACPI.
|
||||
* @dev: Device to suspend.
|
||||
*
|
||||
* Carry out the generic runtime suspend procedure for @dev and use ACPI to put
|
||||
* it into a runtime low-power state.
|
||||
*/
|
||||
int acpi_subsys_runtime_suspend(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_runtime_suspend(dev);
|
||||
return ret ? ret : acpi_dev_runtime_suspend(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_suspend);
|
||||
|
||||
/**
|
||||
* acpi_subsys_runtime_resume - Resume device using ACPI.
|
||||
* @dev: Device to Resume.
|
||||
*
|
||||
* Use ACPI to put the given device into the full-power state and carry out the
|
||||
* generic runtime resume procedure for it.
|
||||
*/
|
||||
int acpi_subsys_runtime_resume(struct device *dev)
|
||||
{
|
||||
int ret = acpi_dev_runtime_resume(dev);
|
||||
return ret ? ret : pm_generic_runtime_resume(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_runtime_resume);
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* acpi_dev_suspend_late - Put device into a low-power state using ACPI.
|
||||
* @dev: Device to put into a low-power state.
|
||||
*
|
||||
* Put the given device into a low-power state during system transition to a
|
||||
* sleep state using the standard ACPI mechanism. Set up system wakeup if
|
||||
* desired, choose the state to put the device into (this checks if system
|
||||
* wakeup is expected to work too), and set the power state of the device.
|
||||
*/
|
||||
int acpi_dev_suspend_late(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
u32 target_state;
|
||||
bool wakeup;
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
target_state = acpi_target_system_state();
|
||||
wakeup = device_may_wakeup(dev);
|
||||
error = __acpi_device_sleep_wake(adev, target_state, wakeup);
|
||||
if (wakeup && error)
|
||||
return error;
|
||||
|
||||
error = acpi_dev_pm_low_power(dev, adev, target_state);
|
||||
if (error)
|
||||
__acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
|
||||
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_suspend_late);
|
||||
|
||||
/**
|
||||
* acpi_dev_resume_early - Put device into the full-power state using ACPI.
|
||||
* @dev: Device to put into the full-power state.
|
||||
*
|
||||
* Put the given device into the full-power state using the standard ACPI
|
||||
* mechanism during system transition to the working state. Set the power
|
||||
* state of the device to ACPI D0 and disable remote wakeup.
|
||||
*/
|
||||
int acpi_dev_resume_early(struct device *dev)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
int error;
|
||||
|
||||
if (!adev)
|
||||
return 0;
|
||||
|
||||
error = acpi_dev_pm_full_power(adev);
|
||||
__acpi_device_sleep_wake(adev, ACPI_STATE_UNKNOWN, false);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
|
||||
|
||||
/**
|
||||
* acpi_subsys_prepare - Prepare device for system transition to a sleep state.
|
||||
* @dev: Device to prepare.
|
||||
*/
|
||||
int acpi_subsys_prepare(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* Follow PCI and resume devices suspended at run time before running
|
||||
* their system suspend callbacks.
|
||||
*/
|
||||
pm_runtime_resume(dev);
|
||||
return pm_generic_prepare(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
|
||||
|
||||
/**
|
||||
* acpi_subsys_suspend_late - Suspend device using ACPI.
|
||||
* @dev: Device to suspend.
|
||||
*
|
||||
* Carry out the generic late suspend procedure for @dev and use ACPI to put
|
||||
* it into a low-power state during system transition into a sleep state.
|
||||
*/
|
||||
int acpi_subsys_suspend_late(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_suspend_late(dev);
|
||||
return ret ? ret : acpi_dev_suspend_late(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late);
|
||||
|
||||
/**
|
||||
* acpi_subsys_resume_early - Resume device using ACPI.
|
||||
* @dev: Device to Resume.
|
||||
*
|
||||
* Use ACPI to put the given device into the full-power state and carry out the
|
||||
* generic early resume procedure for it during system transition into the
|
||||
* working state.
|
||||
*/
|
||||
int acpi_subsys_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_dev_resume_early(dev);
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static struct dev_pm_domain acpi_general_pm_domain = {
|
||||
.ops = {
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
.runtime_suspend = acpi_subsys_runtime_suspend,
|
||||
.runtime_resume = acpi_subsys_runtime_resume,
|
||||
.runtime_idle = pm_generic_runtime_idle,
|
||||
#endif
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.prepare = acpi_subsys_prepare,
|
||||
.suspend_late = acpi_subsys_suspend_late,
|
||||
.resume_early = acpi_subsys_resume_early,
|
||||
.poweroff_late = acpi_subsys_suspend_late,
|
||||
.restore_early = acpi_subsys_resume_early,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_attach - Prepare device for ACPI power management.
|
||||
* @dev: Device to prepare.
|
||||
* @power_on: Whether or not to power on the device.
|
||||
*
|
||||
* If @dev has a valid ACPI handle that has a valid struct acpi_device object
|
||||
* attached to it, install a wakeup notification handler for the device and
|
||||
* add it to the general ACPI PM domain. If @power_on is set, the device will
|
||||
* be put into the ACPI D0 state before the function returns.
|
||||
*
|
||||
* This assumes that the @dev's bus type uses generic power management callbacks
|
||||
* (or doesn't use any power management callbacks at all).
|
||||
*
|
||||
* Callers must ensure proper synchronization of this function with power
|
||||
* management callbacks.
|
||||
*/
|
||||
int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
|
||||
if (!adev)
|
||||
return -ENODEV;
|
||||
|
||||
if (dev->pm_domain)
|
||||
return -EEXIST;
|
||||
|
||||
acpi_add_pm_notifier(adev, acpi_wakeup_device, dev);
|
||||
dev->pm_domain = &acpi_general_pm_domain;
|
||||
if (power_on) {
|
||||
acpi_dev_pm_full_power(adev);
|
||||
__acpi_device_run_wake(adev, false);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_attach);
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_detach - Remove ACPI power management from the device.
|
||||
* @dev: Device to take care of.
|
||||
* @power_off: Whether or not to try to remove power from the device.
|
||||
*
|
||||
* Remove the device from the general ACPI PM domain and remove its wakeup
|
||||
* notifier. If @power_off is set, additionally remove power from the device if
|
||||
* possible.
|
||||
*
|
||||
* Callers must ensure proper synchronization of this function with power
|
||||
* management callbacks.
|
||||
*/
|
||||
void acpi_dev_pm_detach(struct device *dev, bool power_off)
|
||||
{
|
||||
struct acpi_device *adev = acpi_dev_pm_get_node(dev);
|
||||
|
||||
if (adev && dev->pm_domain == &acpi_general_pm_domain) {
|
||||
dev->pm_domain = NULL;
|
||||
acpi_remove_pm_notifier(adev, acpi_wakeup_device);
|
||||
if (power_off) {
|
||||
/*
|
||||
* If the device's PM QoS resume latency limit or flags
|
||||
* have been exposed to user space, they have to be
|
||||
* hidden at this point, so that they don't affect the
|
||||
* choice of the low-power state to put the device into.
|
||||
*/
|
||||
dev_pm_qos_hide_latency_limit(dev);
|
||||
dev_pm_qos_hide_flags(dev);
|
||||
__acpi_device_run_wake(adev, false);
|
||||
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
|
@ -1006,8 +1006,10 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
|
||||
* D3hot is only valid if _PR3 present.
|
||||
*/
|
||||
if (ps->resources.count ||
|
||||
(ps->flags.explicit_set && i < ACPI_STATE_D3_HOT))
|
||||
(ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) {
|
||||
ps->flags.valid = 1;
|
||||
ps->flags.os_accessible = 1;
|
||||
}
|
||||
|
||||
ps->power = -1; /* Unknown - driver assigned */
|
||||
ps->latency = -1; /* Unknown - driver assigned */
|
||||
@ -1023,6 +1025,11 @@ static int acpi_bus_get_power_flags(struct acpi_device *device)
|
||||
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
|
||||
device->power.states[ACPI_STATE_D3_COLD].flags.explicit_set = 1;
|
||||
|
||||
/* Presence of _PS3 or _PRx means we can put the device into D3 cold */
|
||||
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set ||
|
||||
device->power.flags.power_resources)
|
||||
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
|
||||
|
||||
acpi_bus_init_power(device);
|
||||
|
||||
return 0;
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
|
||||
@ -81,6 +80,12 @@ static int acpi_sleep_prepare(u32 acpi_state)
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
|
||||
|
||||
u32 acpi_target_system_state(void)
|
||||
{
|
||||
return acpi_target_sleep_state;
|
||||
}
|
||||
|
||||
static bool pwr_btn_event_pending;
|
||||
|
||||
/*
|
||||
@ -681,177 +686,6 @@ int acpi_suspend(u32 acpi_state)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/**
|
||||
* acpi_pm_device_sleep_state - return preferred power state of ACPI device
|
||||
* in the system sleep state given by %acpi_target_sleep_state
|
||||
* @dev: device to examine; its driver model wakeup flags control
|
||||
* whether it should be able to wake up the system
|
||||
* @d_min_p: used to store the upper limit of allowed states range
|
||||
* @d_max_in: specify the lowest allowed states
|
||||
* Return value: preferred power state of the device on success, -ENODEV
|
||||
* (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
|
||||
*
|
||||
* Find the lowest power (highest number) ACPI device power state that
|
||||
* device @dev can be in while the system is in the sleep state represented
|
||||
* by %acpi_target_sleep_state. If @wake is nonzero, the device should be
|
||||
* able to wake up the system from this sleep state. If @d_min_p is set,
|
||||
* the highest power (lowest number) device power state of @dev allowed
|
||||
* in this system sleep state is stored at the location pointed to by it.
|
||||
*
|
||||
* The caller must ensure that @dev is valid before using this function.
|
||||
* The caller is also responsible for figuring out if the device is
|
||||
* supposed to be able to wake up the system and passing this information
|
||||
* via @wake.
|
||||
*/
|
||||
|
||||
int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
|
||||
{
|
||||
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
|
||||
struct acpi_device *adev;
|
||||
char acpi_method[] = "_SxD";
|
||||
unsigned long long d_min, d_max;
|
||||
|
||||
if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
|
||||
return -EINVAL;
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
|
||||
printk(KERN_DEBUG "ACPI handle has no context!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
acpi_method[2] = '0' + acpi_target_sleep_state;
|
||||
/*
|
||||
* If the sleep state is S0, the lowest limit from ACPI is D3,
|
||||
* but if the device has _S0W, we will use the value from _S0W
|
||||
* as the lowest limit from ACPI. Finally, we will constrain
|
||||
* the lowest limit with the specified one.
|
||||
*/
|
||||
d_min = ACPI_STATE_D0;
|
||||
d_max = ACPI_STATE_D3;
|
||||
|
||||
/*
|
||||
* If present, _SxD methods return the minimum D-state (highest power
|
||||
* state) we can use for the corresponding S-states. Otherwise, the
|
||||
* minimum D-state is D0 (ACPI 3.x).
|
||||
*
|
||||
* NOTE: We rely on acpi_evaluate_integer() not clobbering the integer
|
||||
* provided -- that's our fault recovery, we ignore retval.
|
||||
*/
|
||||
if (acpi_target_sleep_state > ACPI_STATE_S0)
|
||||
acpi_evaluate_integer(handle, acpi_method, NULL, &d_min);
|
||||
|
||||
/*
|
||||
* If _PRW says we can wake up the system from the target sleep state,
|
||||
* the D-state returned by _SxD is sufficient for that (we assume a
|
||||
* wakeup-aware driver if wake is set). Still, if _SxW exists
|
||||
* (ACPI 3.x), it should return the maximum (lowest power) D-state that
|
||||
* can wake the system. _S0W may be valid, too.
|
||||
*/
|
||||
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
|
||||
(device_may_wakeup(dev) && adev->wakeup.flags.valid &&
|
||||
adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
|
||||
acpi_status status;
|
||||
|
||||
acpi_method[3] = 'W';
|
||||
status = acpi_evaluate_integer(handle, acpi_method, NULL,
|
||||
&d_max);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (acpi_target_sleep_state != ACPI_STATE_S0 ||
|
||||
status != AE_NOT_FOUND)
|
||||
d_max = d_min;
|
||||
} else if (d_max < d_min) {
|
||||
/* Warn the user of the broken DSDT */
|
||||
printk(KERN_WARNING "ACPI: Wrong value from %s\n",
|
||||
acpi_method);
|
||||
/* Sanitize it */
|
||||
d_min = d_max;
|
||||
}
|
||||
}
|
||||
|
||||
if (d_max_in < d_min)
|
||||
return -EINVAL;
|
||||
if (d_min_p)
|
||||
*d_min_p = d_min;
|
||||
/* constrain d_max with specified lowest limit (max number) */
|
||||
if (d_max > d_max_in) {
|
||||
for (d_max = d_max_in; d_max > d_min; d_max--) {
|
||||
if (adev->power.states[d_max].flags.valid)
|
||||
break;
|
||||
}
|
||||
}
|
||||
return d_max;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
/**
|
||||
* acpi_pm_device_run_wake - Enable/disable wake-up for given device.
|
||||
* @phys_dev: Device to enable/disable the platform to wake-up the system for.
|
||||
* @enable: Whether enable or disable the wake-up functionality.
|
||||
*
|
||||
* Find the ACPI device object corresponding to @pci_dev and try to
|
||||
* enable/disable the GPE associated with it.
|
||||
*/
|
||||
int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
|
||||
{
|
||||
struct acpi_device *dev;
|
||||
acpi_handle handle;
|
||||
|
||||
if (!device_run_wake(phys_dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(phys_dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &dev))) {
|
||||
dev_dbg(phys_dev, "ACPI handle has no context in %s!\n",
|
||||
__func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
acpi_enable_wakeup_device_power(dev, ACPI_STATE_S0);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
} else {
|
||||
acpi_disable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number);
|
||||
acpi_disable_wakeup_device_power(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_pm_device_run_wake);
|
||||
|
||||
/**
|
||||
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
|
||||
* capability of given device
|
||||
* @dev: device to handle
|
||||
* @enable: 'true' - enable, 'false' - disable the wake-up capability
|
||||
*/
|
||||
int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
|
||||
{
|
||||
acpi_handle handle;
|
||||
struct acpi_device *adev;
|
||||
int error;
|
||||
|
||||
if (!device_can_wakeup(dev))
|
||||
return -EINVAL;
|
||||
|
||||
handle = DEVICE_ACPI_HANDLE(dev);
|
||||
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
|
||||
dev_dbg(dev, "ACPI handle has no context in %s!\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
error = enable ?
|
||||
acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
|
||||
acpi_disable_wakeup_device_power(adev);
|
||||
if (!error)
|
||||
dev_info(dev, "wake-up capability %s by ACPI\n",
|
||||
enable ? "enabled" : "disabled");
|
||||
|
||||
return error;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static void acpi_power_off_prepare(void)
|
||||
{
|
||||
/* Prepare to power off the system */
|
||||
|
@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
|
||||
return -EBUSY;
|
||||
|
||||
not_suspended = 0;
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node)
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
|
||||
|| pdd->dev->power.irq_safe))
|
||||
not_suspended++;
|
||||
}
|
||||
|
||||
if (not_suspended > genpd->in_progress)
|
||||
return -EBUSY;
|
||||
|
@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev);
|
||||
extern void rpm_sysfs_remove(struct device *dev);
|
||||
extern int wakeup_sysfs_add(struct device *dev);
|
||||
extern void wakeup_sysfs_remove(struct device *dev);
|
||||
extern int pm_qos_sysfs_add(struct device *dev);
|
||||
extern void pm_qos_sysfs_remove(struct device *dev);
|
||||
extern int pm_qos_sysfs_add_latency(struct device *dev);
|
||||
extern void pm_qos_sysfs_remove_latency(struct device *dev);
|
||||
extern int pm_qos_sysfs_add_flags(struct device *dev);
|
||||
extern void pm_qos_sysfs_remove_flags(struct device *dev);
|
||||
|
||||
#else /* CONFIG_PM */
|
||||
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
@ -47,6 +48,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
/**
|
||||
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
|
||||
* @dev: Device to check the PM QoS flags for.
|
||||
* @mask: Flags to check against.
|
||||
*
|
||||
* This routine must be called with dev->power.lock held.
|
||||
*/
|
||||
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
|
||||
{
|
||||
struct dev_pm_qos *qos = dev->power.qos;
|
||||
struct pm_qos_flags *pqf;
|
||||
s32 val;
|
||||
|
||||
if (!qos)
|
||||
return PM_QOS_FLAGS_UNDEFINED;
|
||||
|
||||
pqf = &qos->flags;
|
||||
if (list_empty(&pqf->list))
|
||||
return PM_QOS_FLAGS_UNDEFINED;
|
||||
|
||||
val = pqf->effective_flags & mask;
|
||||
if (val)
|
||||
return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
|
||||
|
||||
return PM_QOS_FLAGS_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
|
||||
* @dev: Device to check the PM QoS flags for.
|
||||
* @mask: Flags to check against.
|
||||
*/
|
||||
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
enum pm_qos_flags_status ret;
|
||||
|
||||
spin_lock_irqsave(&dev->power.lock, irqflags);
|
||||
ret = __dev_pm_qos_flags(dev, mask);
|
||||
spin_unlock_irqrestore(&dev->power.lock, irqflags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
|
||||
* @dev: Device to get the PM QoS constraint value for.
|
||||
@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
*/
|
||||
s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{
|
||||
struct pm_qos_constraints *c = dev->power.constraints;
|
||||
|
||||
return c ? pm_qos_read_value(c) : 0;
|
||||
return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* apply_constraint
|
||||
* @req: constraint request to apply
|
||||
* @action: action to perform add/update/remove, of type enum pm_qos_req_action
|
||||
* @value: defines the qos request
|
||||
/**
|
||||
* apply_constraint - Add/modify/remove device PM QoS request.
|
||||
* @req: Constraint request to apply
|
||||
* @action: Action to perform (add/update/remove).
|
||||
* @value: Value to assign to the QoS request.
|
||||
*
|
||||
* Internal function to update the constraints list using the PM QoS core
|
||||
* code and if needed call the per-device and the global notification
|
||||
* callbacks
|
||||
*/
|
||||
static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
enum pm_qos_req_action action, int value)
|
||||
enum pm_qos_req_action action, s32 value)
|
||||
{
|
||||
int ret, curr_value;
|
||||
struct dev_pm_qos *qos = req->dev->power.qos;
|
||||
int ret;
|
||||
|
||||
ret = pm_qos_update_target(req->dev->power.constraints,
|
||||
&req->node, action, value);
|
||||
|
||||
if (ret) {
|
||||
/* Call the global callbacks if needed */
|
||||
curr_value = pm_qos_read_value(req->dev->power.constraints);
|
||||
blocking_notifier_call_chain(&dev_pm_notifiers,
|
||||
(unsigned long)curr_value,
|
||||
req);
|
||||
switch(req->type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
|
||||
action, value);
|
||||
if (ret) {
|
||||
value = pm_qos_read_value(&qos->latency);
|
||||
blocking_notifier_call_chain(&dev_pm_notifiers,
|
||||
(unsigned long)value,
|
||||
req);
|
||||
}
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
|
||||
action, value);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
*/
|
||||
static int dev_pm_qos_constraints_allocate(struct device *dev)
|
||||
{
|
||||
struct dev_pm_qos *qos;
|
||||
struct pm_qos_constraints *c;
|
||||
struct blocking_notifier_head *n;
|
||||
|
||||
c = kzalloc(sizeof(*c), GFP_KERNEL);
|
||||
if (!c)
|
||||
qos = kzalloc(sizeof(*qos), GFP_KERNEL);
|
||||
if (!qos)
|
||||
return -ENOMEM;
|
||||
|
||||
n = kzalloc(sizeof(*n), GFP_KERNEL);
|
||||
if (!n) {
|
||||
kfree(c);
|
||||
kfree(qos);
|
||||
return -ENOMEM;
|
||||
}
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(n);
|
||||
|
||||
c = &qos->latency;
|
||||
plist_head_init(&c->list);
|
||||
c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
|
||||
c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
|
||||
c->type = PM_QOS_MIN;
|
||||
c->notifiers = n;
|
||||
|
||||
INIT_LIST_HEAD(&qos->flags.list);
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.constraints = c;
|
||||
dev->power.qos = qos;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
return 0;
|
||||
@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
|
||||
void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
dev->power.constraints = NULL;
|
||||
dev->power.qos = NULL;
|
||||
dev->power.power_state = PMSG_ON;
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
}
|
||||
@ -164,6 +220,7 @@ void dev_pm_qos_constraints_init(struct device *dev)
|
||||
*/
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
{
|
||||
struct dev_pm_qos *qos;
|
||||
struct dev_pm_qos_request *req, *tmp;
|
||||
struct pm_qos_constraints *c;
|
||||
|
||||
@ -176,12 +233,13 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
dev->power.power_state = PMSG_INVALID;
|
||||
c = dev->power.constraints;
|
||||
if (!c)
|
||||
qos = dev->power.qos;
|
||||
if (!qos)
|
||||
goto out;
|
||||
|
||||
c = &qos->latency;
|
||||
/* Flush the constraints list for the device */
|
||||
plist_for_each_entry_safe(req, tmp, &c->list, node) {
|
||||
plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
|
||||
/*
|
||||
* Update constraints list and call the notification
|
||||
* callbacks if needed
|
||||
@ -191,11 +249,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
}
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.constraints = NULL;
|
||||
dev->power.qos = NULL;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
kfree(c->notifiers);
|
||||
kfree(c);
|
||||
kfree(qos);
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
@ -205,6 +263,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
* dev_pm_qos_add_request - inserts new qos request into the list
|
||||
* @dev: target device for the constraint
|
||||
* @req: pointer to a preallocated handle
|
||||
* @type: type of the request
|
||||
* @value: defines the qos request
|
||||
*
|
||||
* This function inserts a new entry in the device constraints list of
|
||||
@ -218,9 +277,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
|
||||
* -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
|
||||
* to allocate for data structures, -ENODEV if the device has just been removed
|
||||
* from the system.
|
||||
*
|
||||
* Callers should ensure that the target device is not RPM_SUSPENDED before
|
||||
* using this function for requests of type DEV_PM_QOS_FLAGS.
|
||||
*/
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value)
|
||||
enum dev_pm_qos_req_type type, s32 value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -235,7 +297,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.constraints) {
|
||||
if (!dev->power.qos) {
|
||||
if (dev->power.power_state.event == PM_EVENT_INVALID) {
|
||||
/* The device has been removed from the system. */
|
||||
req->dev = NULL;
|
||||
@ -251,8 +313,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
req->type = type;
|
||||
ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
@ -261,6 +325,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
|
||||
|
||||
/**
|
||||
* __dev_pm_qos_update_request - Modify an existing device PM QoS request.
|
||||
* @req : PM QoS request to modify.
|
||||
* @new_value: New value to request.
|
||||
*/
|
||||
static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
s32 new_value)
|
||||
{
|
||||
s32 curr_value;
|
||||
int ret = 0;
|
||||
|
||||
if (!req->dev->power.qos)
|
||||
return -ENODEV;
|
||||
|
||||
switch(req->type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
curr_value = req->data.pnode.prio;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
curr_value = req->data.flr.flags;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (curr_value != new_value)
|
||||
ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_qos_update_request - modifies an existing qos request
|
||||
* @req : handle to list element holding a dev_pm_qos request to use
|
||||
@ -275,11 +370,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
|
||||
* 0 if the aggregated constraint value has not changed,
|
||||
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
|
||||
* removed from the system
|
||||
*
|
||||
* Callers should ensure that the target device is not RPM_SUSPENDED before
|
||||
* using this function for requests of type DEV_PM_QOS_FLAGS.
|
||||
*/
|
||||
int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
s32 new_value)
|
||||
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (!req) /*guard against callers passing in null */
|
||||
return -EINVAL;
|
||||
@ -289,17 +386,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.constraints) {
|
||||
if (new_value != req->node.prio)
|
||||
ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
|
||||
new_value);
|
||||
} else {
|
||||
/* Return if the device has been removed */
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
ret = __dev_pm_qos_update_request(req, new_value);
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
@ -315,6 +404,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
|
||||
* 0 if the aggregated constraint value has not changed,
|
||||
* -EINVAL in case of wrong parameters, -ENODEV if the device has been
|
||||
* removed from the system
|
||||
*
|
||||
* Callers should ensure that the target device is not RPM_SUSPENDED before
|
||||
* using this function for requests of type DEV_PM_QOS_FLAGS.
|
||||
*/
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
{
|
||||
@ -329,7 +421,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (req->dev->power.constraints) {
|
||||
if (req->dev->power.qos) {
|
||||
ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
|
||||
PM_QOS_DEFAULT_VALUE);
|
||||
memset(req, 0, sizeof(*req));
|
||||
@ -362,13 +454,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
|
||||
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
if (!dev->power.constraints)
|
||||
if (!dev->power.qos)
|
||||
ret = dev->power.power_state.event != PM_EVENT_INVALID ?
|
||||
dev_pm_qos_constraints_allocate(dev) : -ENODEV;
|
||||
|
||||
if (!ret)
|
||||
ret = blocking_notifier_chain_register(
|
||||
dev->power.constraints->notifiers, notifier);
|
||||
dev->power.qos->latency.notifiers, notifier);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
return ret;
|
||||
@ -393,9 +485,9 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
/* Silently return if the constraints object is not present. */
|
||||
if (dev->power.constraints)
|
||||
if (dev->power.qos)
|
||||
retval = blocking_notifier_chain_unregister(
|
||||
dev->power.constraints->notifiers,
|
||||
dev->power.qos->latency.notifiers,
|
||||
notifier);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
@ -449,7 +541,8 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
ancestor = ancestor->parent;
|
||||
|
||||
if (ancestor)
|
||||
error = dev_pm_qos_add_request(ancestor, req, value);
|
||||
error = dev_pm_qos_add_request(ancestor, req,
|
||||
DEV_PM_QOS_LATENCY, value);
|
||||
|
||||
if (error)
|
||||
req->dev = NULL;
|
||||
@ -459,10 +552,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static void __dev_pm_qos_drop_user_request(struct device *dev)
|
||||
static void __dev_pm_qos_drop_user_request(struct device *dev,
|
||||
enum dev_pm_qos_req_type type)
|
||||
{
|
||||
dev_pm_qos_remove_request(dev->power.pq_req);
|
||||
dev->power.pq_req = NULL;
|
||||
switch(type) {
|
||||
case DEV_PM_QOS_LATENCY:
|
||||
dev_pm_qos_remove_request(dev->power.qos->latency_req);
|
||||
dev->power.qos->latency_req = NULL;
|
||||
break;
|
||||
case DEV_PM_QOS_FLAGS:
|
||||
dev_pm_qos_remove_request(dev->power.qos->flags_req);
|
||||
dev->power.qos->flags_req = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -478,21 +580,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
||||
if (!device_is_registered(dev) || value < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.pq_req)
|
||||
if (dev->power.qos && dev->power.qos->latency_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = dev_pm_qos_add_request(dev, req, value);
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
dev->power.pq_req = req;
|
||||
ret = pm_qos_sysfs_add(dev);
|
||||
dev->power.qos->latency_req = req;
|
||||
ret = pm_qos_sysfs_add_latency(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -504,10 +606,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
|
||||
*/
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev)
|
||||
{
|
||||
if (dev->power.pq_req) {
|
||||
pm_qos_sysfs_remove(dev);
|
||||
__dev_pm_qos_drop_user_request(dev);
|
||||
if (dev->power.qos && dev->power.qos->latency_req) {
|
||||
pm_qos_sysfs_remove_latency(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
|
||||
* @dev: Device whose PM QoS flags are to be exposed to user space.
|
||||
* @val: Initial values of the flags.
|
||||
*/
|
||||
int dev_pm_qos_expose_flags(struct device *dev, s32 val)
|
||||
{
|
||||
struct dev_pm_qos_request *req;
|
||||
int ret;
|
||||
|
||||
if (!device_is_registered(dev))
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->power.qos && dev->power.qos->flags_req)
|
||||
return -EEXIST;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
return -ENOMEM;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
dev->power.qos->flags_req = req;
|
||||
ret = pm_qos_sysfs_add_flags(dev);
|
||||
if (ret)
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
|
||||
fail:
|
||||
pm_runtime_put(dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
|
||||
* @dev: Device whose PM QoS flags are to be hidden from user space.
|
||||
*/
|
||||
void dev_pm_qos_hide_flags(struct device *dev)
|
||||
{
|
||||
if (dev->power.qos && dev->power.qos->flags_req) {
|
||||
pm_qos_sysfs_remove_flags(dev);
|
||||
pm_runtime_get_sync(dev);
|
||||
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
|
||||
* @dev: Device to update the PM QoS flags request for.
|
||||
* @mask: Flags to set/clear.
|
||||
* @set: Whether to set or clear the flags (true means set).
|
||||
*/
|
||||
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
|
||||
{
|
||||
s32 value;
|
||||
int ret;
|
||||
|
||||
if (!dev->power.qos || !dev->power.qos->flags_req)
|
||||
return -EINVAL;
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
mutex_lock(&dev_pm_qos_mtx);
|
||||
|
||||
value = dev_pm_qos_requested_flags(dev);
|
||||
if (set)
|
||||
value |= mask;
|
||||
else
|
||||
value &= ~mask;
|
||||
|
||||
ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
|
||||
|
||||
mutex_unlock(&dev_pm_qos_mtx);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
|
||||
static ssize_t pm_qos_latency_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
|
||||
return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
|
||||
}
|
||||
|
||||
static ssize_t pm_qos_latency_store(struct device *dev,
|
||||
@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev,
|
||||
if (value < 0)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dev_pm_qos_update_request(dev->power.pq_req, value);
|
||||
ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
|
||||
return ret < 0 ? ret : n;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
|
||||
pm_qos_latency_show, pm_qos_latency_store);
|
||||
|
||||
static ssize_t pm_qos_no_power_off_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
|
||||
& PM_QOS_FLAG_NO_POWER_OFF));
|
||||
}
|
||||
|
||||
static ssize_t pm_qos_no_power_off_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (kstrtoint(buf, 0, &ret))
|
||||
return -EINVAL;
|
||||
|
||||
if (ret != 0 && ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
|
||||
return ret < 0 ? ret : n;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(pm_qos_no_power_off, 0644,
|
||||
pm_qos_no_power_off_show, pm_qos_no_power_off_store);
|
||||
|
||||
static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
|
||||
& PM_QOS_FLAG_REMOTE_WAKEUP));
|
||||
}
|
||||
|
||||
static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (kstrtoint(buf, 0, &ret))
|
||||
return -EINVAL;
|
||||
|
||||
if (ret != 0 && ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
|
||||
return ret < 0 ? ret : n;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
|
||||
pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = {
|
||||
.attrs = runtime_attrs,
|
||||
};
|
||||
|
||||
static struct attribute *pm_qos_attrs[] = {
|
||||
static struct attribute *pm_qos_latency_attrs[] = {
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
&dev_attr_pm_qos_resume_latency_us.attr,
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group pm_qos_attr_group = {
|
||||
static struct attribute_group pm_qos_latency_attr_group = {
|
||||
.name = power_group_name,
|
||||
.attrs = pm_qos_attrs,
|
||||
.attrs = pm_qos_latency_attrs,
|
||||
};
|
||||
|
||||
static struct attribute *pm_qos_flags_attrs[] = {
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
&dev_attr_pm_qos_no_power_off.attr,
|
||||
&dev_attr_pm_qos_remote_wakeup.attr,
|
||||
#endif /* CONFIG_PM_RUNTIME */
|
||||
NULL,
|
||||
};
|
||||
static struct attribute_group pm_qos_flags_attr_group = {
|
||||
.name = power_group_name,
|
||||
.attrs = pm_qos_flags_attrs,
|
||||
};
|
||||
|
||||
int dpm_sysfs_add(struct device *dev)
|
||||
@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev)
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
|
||||
}
|
||||
|
||||
int pm_qos_sysfs_add(struct device *dev)
|
||||
int pm_qos_sysfs_add_latency(struct device *dev)
|
||||
{
|
||||
return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
|
||||
return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
|
||||
}
|
||||
|
||||
void pm_qos_sysfs_remove(struct device *dev)
|
||||
void pm_qos_sysfs_remove_latency(struct device *dev)
|
||||
{
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
|
||||
}
|
||||
|
||||
int pm_qos_sysfs_add_flags(struct device *dev)
|
||||
{
|
||||
return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
|
||||
}
|
||||
|
||||
void pm_qos_sysfs_remove_flags(struct device *dev)
|
||||
{
|
||||
sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
|
||||
}
|
||||
|
||||
void rpm_sysfs_remove(struct device *dev)
|
||||
|
@ -727,7 +727,9 @@ static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
|
||||
|
||||
if (!flctl->qos_request) {
|
||||
ret = dev_pm_qos_add_request(&flctl->pdev->dev,
|
||||
&flctl->pm_qos, 100);
|
||||
&flctl->pm_qos,
|
||||
DEV_PM_QOS_LATENCY,
|
||||
100);
|
||||
if (ret < 0)
|
||||
dev_err(&flctl->pdev->dev,
|
||||
"PM QoS request failed: %d\n", ret);
|
||||
|
@ -17,10 +17,9 @@
|
||||
|
||||
#include <linux/pci-acpi.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "pci.h"
|
||||
|
||||
static DEFINE_MUTEX(pci_acpi_pm_notify_mtx);
|
||||
|
||||
/**
|
||||
* pci_acpi_wake_bus - Wake-up notification handler for root buses.
|
||||
* @handle: ACPI handle of a device the notification is for.
|
||||
@ -67,67 +66,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
|
||||
pci_pme_wakeup_bus(pci_dev->subordinate);
|
||||
}
|
||||
|
||||
/**
|
||||
* add_pm_notifier - Register PM notifier for given ACPI device.
|
||||
* @dev: ACPI device to add the notifier for.
|
||||
* @context: PCI device or bus to check for PME status if an event is signaled.
|
||||
*
|
||||
* NOTE: @dev need not be a run-wake or wake-up device to be a valid source of
|
||||
* PM wake-up events. For example, wake-up events may be generated for bridges
|
||||
* if one of the devices below the bridge is signaling PME, even if the bridge
|
||||
* itself doesn't have a wake-up GPE associated with it.
|
||||
*/
|
||||
static acpi_status add_pm_notifier(struct acpi_device *dev,
|
||||
acpi_notify_handler handler,
|
||||
void *context)
|
||||
{
|
||||
acpi_status status = AE_ALREADY_EXISTS;
|
||||
|
||||
mutex_lock(&pci_acpi_pm_notify_mtx);
|
||||
|
||||
if (dev->wakeup.flags.notifier_present)
|
||||
goto out;
|
||||
|
||||
status = acpi_install_notify_handler(dev->handle,
|
||||
ACPI_SYSTEM_NOTIFY,
|
||||
handler, context);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto out;
|
||||
|
||||
dev->wakeup.flags.notifier_present = true;
|
||||
|
||||
out:
|
||||
mutex_unlock(&pci_acpi_pm_notify_mtx);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* remove_pm_notifier - Unregister PM notifier from given ACPI device.
|
||||
* @dev: ACPI device to remove the notifier from.
|
||||
*/
|
||||
static acpi_status remove_pm_notifier(struct acpi_device *dev,
|
||||
acpi_notify_handler handler)
|
||||
{
|
||||
acpi_status status = AE_BAD_PARAMETER;
|
||||
|
||||
mutex_lock(&pci_acpi_pm_notify_mtx);
|
||||
|
||||
if (!dev->wakeup.flags.notifier_present)
|
||||
goto out;
|
||||
|
||||
status = acpi_remove_notify_handler(dev->handle,
|
||||
ACPI_SYSTEM_NOTIFY,
|
||||
handler);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto out;
|
||||
|
||||
dev->wakeup.flags.notifier_present = false;
|
||||
|
||||
out:
|
||||
mutex_unlock(&pci_acpi_pm_notify_mtx);
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_acpi_add_bus_pm_notifier - Register PM notifier for given PCI bus.
|
||||
* @dev: ACPI device to add the notifier for.
|
||||
@ -136,7 +74,7 @@ static acpi_status remove_pm_notifier(struct acpi_device *dev,
|
||||
acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
|
||||
struct pci_bus *pci_bus)
|
||||
{
|
||||
return add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
|
||||
return acpi_add_pm_notifier(dev, pci_acpi_wake_bus, pci_bus);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -145,7 +83,7 @@ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev,
|
||||
*/
|
||||
acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
|
||||
{
|
||||
return remove_pm_notifier(dev, pci_acpi_wake_bus);
|
||||
return acpi_remove_pm_notifier(dev, pci_acpi_wake_bus);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -156,7 +94,7 @@ acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev)
|
||||
acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
|
||||
struct pci_dev *pci_dev)
|
||||
{
|
||||
return add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
|
||||
return acpi_add_pm_notifier(dev, pci_acpi_wake_dev, pci_dev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -165,7 +103,7 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
|
||||
*/
|
||||
acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
|
||||
{
|
||||
return remove_pm_notifier(dev, pci_acpi_wake_dev);
|
||||
return acpi_remove_pm_notifier(dev, pci_acpi_wake_dev);
|
||||
}
|
||||
|
||||
phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
|
||||
@ -257,11 +195,16 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
|
||||
return -ENODEV;
|
||||
|
||||
switch (state) {
|
||||
case PCI_D3cold:
|
||||
if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
|
||||
PM_QOS_FLAGS_ALL) {
|
||||
error = -EBUSY;
|
||||
break;
|
||||
}
|
||||
case PCI_D0:
|
||||
case PCI_D1:
|
||||
case PCI_D2:
|
||||
case PCI_D3hot:
|
||||
case PCI_D3cold:
|
||||
error = acpi_bus_set_power(handle, state_conv[state]);
|
||||
}
|
||||
|
||||
|
@ -201,6 +201,7 @@ struct acpi_device_power_flags {
|
||||
struct acpi_device_power_state {
|
||||
struct {
|
||||
u8 valid:1;
|
||||
u8 os_accessible:1;
|
||||
u8 explicit_set:1; /* _PSx present? */
|
||||
u8 reserved:6;
|
||||
} flags;
|
||||
@ -339,6 +340,7 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
|
||||
unsigned long long *sta);
|
||||
int acpi_bus_get_status(struct acpi_device *device);
|
||||
int acpi_bus_set_power(acpi_handle handle, int state);
|
||||
int acpi_device_set_power(struct acpi_device *device, int state);
|
||||
int acpi_bus_update_power(acpi_handle handle, int *state_p);
|
||||
bool acpi_bus_power_manageable(acpi_handle handle);
|
||||
bool acpi_bus_can_wakeup(acpi_handle handle);
|
||||
@ -416,21 +418,64 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
|
||||
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler, void *context);
|
||||
acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler);
|
||||
int acpi_device_power_state(struct device *dev, struct acpi_device *adev,
|
||||
u32 target_state, int d_max_in, int *d_min_p);
|
||||
int acpi_pm_device_sleep_state(struct device *, int *, int);
|
||||
#else
|
||||
static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
|
||||
static inline acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler,
|
||||
void *context)
|
||||
{
|
||||
return AE_SUPPORT;
|
||||
}
|
||||
static inline acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
|
||||
acpi_notify_handler handler)
|
||||
{
|
||||
return AE_SUPPORT;
|
||||
}
|
||||
static inline int __acpi_device_power_state(int m, int *p)
|
||||
{
|
||||
if (p)
|
||||
*p = ACPI_STATE_D0;
|
||||
return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0;
|
||||
}
|
||||
static inline int acpi_device_power_state(struct device *dev,
|
||||
struct acpi_device *adev,
|
||||
u32 target_state, int d_max_in,
|
||||
int *d_min_p)
|
||||
{
|
||||
return __acpi_device_power_state(d_max_in, d_min_p);
|
||||
}
|
||||
static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
|
||||
{
|
||||
return __acpi_device_power_state(m, p);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
int __acpi_device_run_wake(struct acpi_device *, bool);
|
||||
int acpi_pm_device_run_wake(struct device *, bool);
|
||||
#else
|
||||
static inline int __acpi_device_run_wake(struct acpi_device *adev, bool en)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
int acpi_pm_device_run_wake(struct device *, bool);
|
||||
int __acpi_device_sleep_wake(struct acpi_device *, u32, bool);
|
||||
int acpi_pm_device_sleep_wake(struct device *, bool);
|
||||
#else
|
||||
static inline int acpi_pm_device_run_wake(struct device *dev, bool enable)
|
||||
static inline int __acpi_device_sleep_wake(struct acpi_device *adev,
|
||||
u32 target_state, bool enable)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -440,6 +485,27 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
u32 acpi_target_system_state(void);
|
||||
#else
|
||||
static inline u32 acpi_target_system_state(void) { return ACPI_STATE_S0; }
|
||||
#endif
|
||||
|
||||
static inline bool acpi_device_power_manageable(struct acpi_device *adev)
|
||||
{
|
||||
return adev->flags.power_manageable;
|
||||
}
|
||||
|
||||
static inline bool acpi_device_can_wakeup(struct acpi_device *adev)
|
||||
{
|
||||
return adev->wakeup.flags.valid;
|
||||
}
|
||||
|
||||
static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
|
||||
{
|
||||
return adev->power.states[ACPI_STATE_D3_COLD].flags.os_accessible;
|
||||
}
|
||||
|
||||
#else /* CONFIG_ACPI */
|
||||
|
||||
static inline int register_acpi_bus_type(void *bus) { return 0; }
|
||||
|
@ -25,6 +25,7 @@
|
||||
#ifndef _LINUX_ACPI_H
|
||||
#define _LINUX_ACPI_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ioport.h> /* for struct resource */
|
||||
#include <linux/device.h>
|
||||
|
||||
@ -478,4 +479,41 @@ acpi_status acpi_os_prepare_sleep(u8 sleep_state,
|
||||
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME)
|
||||
int acpi_dev_runtime_suspend(struct device *dev);
|
||||
int acpi_dev_runtime_resume(struct device *dev);
|
||||
int acpi_subsys_runtime_suspend(struct device *dev);
|
||||
int acpi_subsys_runtime_resume(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
int acpi_dev_suspend_late(struct device *dev);
|
||||
int acpi_dev_resume_early(struct device *dev);
|
||||
int acpi_subsys_prepare(struct device *dev);
|
||||
int acpi_subsys_suspend_late(struct device *dev);
|
||||
int acpi_subsys_resume_early(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
|
||||
int acpi_dev_pm_attach(struct device *dev, bool power_on);
|
||||
int acpi_dev_pm_detach(struct device *dev, bool power_off);
|
||||
#else
|
||||
static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
|
||||
#endif
|
||||
|
||||
#endif /*_LINUX_ACPI_H*/
|
||||
|
@ -546,10 +546,9 @@ struct dev_pm_info {
|
||||
unsigned long active_jiffies;
|
||||
unsigned long suspended_jiffies;
|
||||
unsigned long accounting_timestamp;
|
||||
struct dev_pm_qos_request *pq_req;
|
||||
#endif
|
||||
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
|
||||
struct pm_qos_constraints *constraints;
|
||||
struct dev_pm_qos *qos;
|
||||
};
|
||||
|
||||
extern void update_pm_runtime_accounting(struct device *dev);
|
||||
|
@ -20,6 +20,13 @@ enum {
|
||||
PM_QOS_NUM_CLASSES,
|
||||
};
|
||||
|
||||
enum pm_qos_flags_status {
|
||||
PM_QOS_FLAGS_UNDEFINED = -1,
|
||||
PM_QOS_FLAGS_NONE,
|
||||
PM_QOS_FLAGS_SOME,
|
||||
PM_QOS_FLAGS_ALL,
|
||||
};
|
||||
|
||||
#define PM_QOS_DEFAULT_VALUE -1
|
||||
|
||||
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
||||
@ -27,14 +34,31 @@ enum {
|
||||
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
||||
#define PM_QOS_DEV_LAT_DEFAULT_VALUE 0
|
||||
|
||||
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
|
||||
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
|
||||
|
||||
struct pm_qos_request {
|
||||
struct plist_node node;
|
||||
int pm_qos_class;
|
||||
struct delayed_work work; /* for pm_qos_update_request_timeout */
|
||||
};
|
||||
|
||||
struct pm_qos_flags_request {
|
||||
struct list_head node;
|
||||
s32 flags; /* Do not change to 64 bit */
|
||||
};
|
||||
|
||||
enum dev_pm_qos_req_type {
|
||||
DEV_PM_QOS_LATENCY = 1,
|
||||
DEV_PM_QOS_FLAGS,
|
||||
};
|
||||
|
||||
struct dev_pm_qos_request {
|
||||
struct plist_node node;
|
||||
enum dev_pm_qos_req_type type;
|
||||
union {
|
||||
struct plist_node pnode;
|
||||
struct pm_qos_flags_request flr;
|
||||
} data;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
@ -45,8 +69,8 @@ enum pm_qos_type {
|
||||
};
|
||||
|
||||
/*
|
||||
* Note: The lockless read path depends on the CPU accessing
|
||||
* target_value atomically. Atomic access is only guaranteed on all CPU
|
||||
* Note: The lockless read path depends on the CPU accessing target_value
|
||||
* or effective_flags atomically. Atomic access is only guaranteed on all CPU
|
||||
* types linux supports for 32 bit quantites
|
||||
*/
|
||||
struct pm_qos_constraints {
|
||||
@ -57,6 +81,18 @@ struct pm_qos_constraints {
|
||||
struct blocking_notifier_head *notifiers;
|
||||
};
|
||||
|
||||
struct pm_qos_flags {
|
||||
struct list_head list;
|
||||
s32 effective_flags; /* Do not change to 64 bit */
|
||||
};
|
||||
|
||||
struct dev_pm_qos {
|
||||
struct pm_qos_constraints latency;
|
||||
struct pm_qos_flags flags;
|
||||
struct dev_pm_qos_request *latency_req;
|
||||
struct dev_pm_qos_request *flags_req;
|
||||
};
|
||||
|
||||
/* Action requested to pm_qos_update_target */
|
||||
enum pm_qos_req_action {
|
||||
PM_QOS_ADD_REQ, /* Add a new request */
|
||||
@ -71,6 +107,9 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
|
||||
|
||||
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
|
||||
enum pm_qos_req_action action, int value);
|
||||
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
|
||||
struct pm_qos_flags_request *req,
|
||||
enum pm_qos_req_action action, s32 val);
|
||||
void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
|
||||
s32 value);
|
||||
void pm_qos_update_request(struct pm_qos_request *req,
|
||||
@ -86,10 +125,12 @@ int pm_qos_request_active(struct pm_qos_request *req);
|
||||
s32 pm_qos_read_value(struct pm_qos_constraints *c);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
|
||||
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
|
||||
s32 __dev_pm_qos_read_value(struct device *dev);
|
||||
s32 dev_pm_qos_read_value(struct device *dev);
|
||||
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
|
||||
s32 value);
|
||||
enum dev_pm_qos_req_type type, s32 value);
|
||||
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
|
||||
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
|
||||
int dev_pm_qos_add_notifier(struct device *dev,
|
||||
@ -103,12 +144,19 @@ void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req, s32 value);
|
||||
#else
|
||||
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
|
||||
s32 mask)
|
||||
{ return PM_QOS_FLAGS_UNDEFINED; }
|
||||
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
|
||||
s32 mask)
|
||||
{ return PM_QOS_FLAGS_UNDEFINED; }
|
||||
static inline s32 __dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline s32 dev_pm_qos_read_value(struct device *dev)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_request(struct device *dev,
|
||||
struct dev_pm_qos_request *req,
|
||||
enum dev_pm_qos_req_type type,
|
||||
s32 value)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
|
||||
@ -144,10 +192,31 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
|
||||
void dev_pm_qos_hide_latency_limit(struct device *dev);
|
||||
int dev_pm_qos_expose_flags(struct device *dev, s32 value);
|
||||
void dev_pm_qos_hide_flags(struct device *dev);
|
||||
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
|
||||
|
||||
static inline s32 dev_pm_qos_requested_latency(struct device *dev)
|
||||
{
|
||||
return dev->power.qos->latency_req->data.pnode.prio;
|
||||
}
|
||||
|
||||
static inline s32 dev_pm_qos_requested_flags(struct device *dev)
|
||||
{
|
||||
return dev->power.qos->flags_req->data.flr.flags;
|
||||
}
|
||||
#else
|
||||
static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
|
||||
{ return 0; }
|
||||
static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
|
||||
static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
|
||||
{ return 0; }
|
||||
static inline void dev_pm_qos_hide_flags(struct device *dev) {}
|
||||
static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
|
||||
{ return 0; }
|
||||
|
||||
static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; }
|
||||
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -212,6 +212,69 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_flags_remove_req - Remove device PM QoS flags request.
|
||||
* @pqf: Device PM QoS flags set to remove the request from.
|
||||
* @req: Request to remove from the set.
|
||||
*/
|
||||
static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
|
||||
struct pm_qos_flags_request *req)
|
||||
{
|
||||
s32 val = 0;
|
||||
|
||||
list_del(&req->node);
|
||||
list_for_each_entry(req, &pqf->list, node)
|
||||
val |= req->flags;
|
||||
|
||||
pqf->effective_flags = val;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_update_flags - Update a set of PM QoS flags.
|
||||
* @pqf: Set of flags to update.
|
||||
* @req: Request to add to the set, to modify, or to remove from the set.
|
||||
* @action: Action to take on the set.
|
||||
* @val: Value of the request to add or modify.
|
||||
*
|
||||
* Update the given set of PM QoS flags and call notifiers if the aggregate
|
||||
* value has changed. Returns 1 if the aggregate constraint value has changed,
|
||||
* 0 otherwise.
|
||||
*/
|
||||
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
|
||||
struct pm_qos_flags_request *req,
|
||||
enum pm_qos_req_action action, s32 val)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
s32 prev_value, curr_value;
|
||||
|
||||
spin_lock_irqsave(&pm_qos_lock, irqflags);
|
||||
|
||||
prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
|
||||
|
||||
switch (action) {
|
||||
case PM_QOS_REMOVE_REQ:
|
||||
pm_qos_flags_remove_req(pqf, req);
|
||||
break;
|
||||
case PM_QOS_UPDATE_REQ:
|
||||
pm_qos_flags_remove_req(pqf, req);
|
||||
case PM_QOS_ADD_REQ:
|
||||
req->flags = val;
|
||||
INIT_LIST_HEAD(&req->node);
|
||||
list_add_tail(&req->node, &pqf->list);
|
||||
pqf->effective_flags |= val;
|
||||
break;
|
||||
default:
|
||||
/* no action */
|
||||
;
|
||||
}
|
||||
|
||||
curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
|
||||
|
||||
spin_unlock_irqrestore(&pm_qos_lock, irqflags);
|
||||
|
||||
return prev_value != curr_value;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_qos_request - returns current system wide qos expectation
|
||||
* @pm_qos_class: identification of which qos value is requested
|
||||
|
Loading…
Reference in New Issue
Block a user