2016-04-02 02:09:12 +03:00
/*
* CPUFreq governor based on scheduler - provided CPU utilization data .
*
* Copyright ( C ) 2016 , Intel Corporation
* Author : Rafael J . Wysocki < rafael . j . wysocki @ intel . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2016-05-18 15:25:28 +03:00
# define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2016-04-02 02:09:12 +03:00
# include <linux/cpufreq.h>
2016-11-15 11:23:22 +03:00
# include <linux/kthread.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2016-04-02 02:09:12 +03:00
# include <linux/slab.h>
# include <trace/events/power.h>
# include "sched.h"
2016-11-15 11:23:22 +03:00
# define SUGOV_KTHREAD_PRIORITY 50
2016-04-02 02:09:12 +03:00
struct sugov_tunables {
struct gov_attr_set attr_set ;
unsigned int rate_limit_us ;
} ;
struct sugov_policy {
struct cpufreq_policy * policy ;
struct sugov_tunables * tunables ;
struct list_head tunables_hook ;
raw_spinlock_t update_lock ; /* For shared policies */
u64 last_freq_update_time ;
s64 freq_update_delay_ns ;
unsigned int next_freq ;
2017-03-02 11:33:20 +03:00
unsigned int cached_raw_freq ;
2016-04-02 02:09:12 +03:00
/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work ;
2016-11-15 11:23:22 +03:00
struct kthread_work work ;
2016-04-02 02:09:12 +03:00
struct mutex work_lock ;
2016-11-15 11:23:22 +03:00
struct kthread_worker worker ;
struct task_struct * thread ;
2016-04-02 02:09:12 +03:00
bool work_in_progress ;
bool need_freq_update ;
} ;
struct sugov_cpu {
struct update_util_data update_util ;
struct sugov_policy * sg_policy ;
2016-09-10 01:00:31 +03:00
unsigned long iowait_boost ;
unsigned long iowait_boost_max ;
u64 last_update ;
2016-07-13 23:25:26 +03:00
2016-04-02 02:09:12 +03:00
/* The fields below are only needed when sharing a policy. */
unsigned long util ;
unsigned long max ;
2016-08-16 23:14:55 +03:00
unsigned int flags ;
2016-04-02 02:09:12 +03:00
} ;
static DEFINE_PER_CPU ( struct sugov_cpu , sugov_cpu ) ;
/************************ Governor internals ***********************/
static bool sugov_should_update_freq ( struct sugov_policy * sg_policy , u64 time )
{
s64 delta_ns ;
if ( sg_policy - > work_in_progress )
return false ;
if ( unlikely ( sg_policy - > need_freq_update ) ) {
sg_policy - > need_freq_update = false ;
/*
* This happens when limits change , so forget the previous
* next_freq value and force an update .
*/
sg_policy - > next_freq = UINT_MAX ;
return true ;
}
delta_ns = time - sg_policy - > last_freq_update_time ;
return delta_ns > = sg_policy - > freq_update_delay_ns ;
}
static void sugov_update_commit ( struct sugov_policy * sg_policy , u64 time ,
unsigned int next_freq )
{
struct cpufreq_policy * policy = sg_policy - > policy ;
sg_policy - > last_freq_update_time = time ;
if ( policy - > fast_switch_enabled ) {
if ( sg_policy - > next_freq = = next_freq ) {
trace_cpu_frequency ( policy - > cur , smp_processor_id ( ) ) ;
return ;
}
sg_policy - > next_freq = next_freq ;
next_freq = cpufreq_driver_fast_switch ( policy , next_freq ) ;
if ( next_freq = = CPUFREQ_ENTRY_INVALID )
return ;
policy - > cur = next_freq ;
trace_cpu_frequency ( next_freq , smp_processor_id ( ) ) ;
} else if ( sg_policy - > next_freq ! = next_freq ) {
sg_policy - > next_freq = next_freq ;
sg_policy - > work_in_progress = true ;
irq_work_queue ( & sg_policy - > irq_work ) ;
}
}
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy .
2017-03-02 11:33:21 +03:00
* @ sg_policy : schedutil policy object to compute the new frequency for .
2016-04-02 02:09:12 +03:00
* @ util : Current CPU utilization .
* @ max : CPU capacity .
*
* If the utilization is frequency - invariant , choose the new frequency to be
* proportional to it , that is
*
* next_freq = C * max_freq * util / max
*
* Otherwise , approximate the would - be frequency - invariant utilization by
* util_raw * ( curr_freq / max_freq ) which leads to
*
* next_freq = C * curr_freq * util_raw / max
*
* Take C = 1.25 for the frequency tipping point at ( util / max ) = 0.8 .
2016-07-13 23:25:26 +03:00
*
* The lowest driver - supported frequency which is equal or greater than the raw
* next_freq ( as calculated above ) is returned , subject to policy min / max and
* cpufreq driver limitations .
2016-04-02 02:09:12 +03:00
*/
2017-03-02 11:33:21 +03:00
static unsigned int get_next_freq ( struct sugov_policy * sg_policy ,
unsigned long util , unsigned long max )
2016-04-02 02:09:12 +03:00
{
2016-07-13 23:25:26 +03:00
struct cpufreq_policy * policy = sg_policy - > policy ;
2016-04-02 02:09:12 +03:00
unsigned int freq = arch_scale_freq_invariant ( ) ?
policy - > cpuinfo . max_freq : policy - > cur ;
2016-07-13 23:25:26 +03:00
freq = ( freq + ( freq > > 2 ) ) * util / max ;
2017-03-02 11:33:20 +03:00
if ( freq = = sg_policy - > cached_raw_freq & & sg_policy - > next_freq ! = UINT_MAX )
2016-07-13 23:25:26 +03:00
return sg_policy - > next_freq ;
2017-03-02 11:33:20 +03:00
sg_policy - > cached_raw_freq = freq ;
2016-07-13 23:25:26 +03:00
return cpufreq_driver_resolve_freq ( policy , freq ) ;
2016-04-02 02:09:12 +03:00
}
2016-08-16 23:14:55 +03:00
static void sugov_get_util ( unsigned long * util , unsigned long * max )
{
struct rq * rq = this_rq ( ) ;
2016-08-26 21:40:47 +03:00
unsigned long cfs_max ;
cfs_max = arch_scale_cpu_capacity ( NULL , smp_processor_id ( ) ) ;
2016-08-16 23:14:55 +03:00
* util = min ( rq - > cfs . avg . util_avg , cfs_max ) ;
* max = cfs_max ;
}
2016-09-10 01:00:31 +03:00
static void sugov_set_iowait_boost ( struct sugov_cpu * sg_cpu , u64 time ,
unsigned int flags )
{
if ( flags & SCHED_CPUFREQ_IOWAIT ) {
sg_cpu - > iowait_boost = sg_cpu - > iowait_boost_max ;
} else if ( sg_cpu - > iowait_boost ) {
s64 delta_ns = time - sg_cpu - > last_update ;
/* Clear iowait_boost if the CPU apprears to have been idle. */
if ( delta_ns > TICK_NSEC )
sg_cpu - > iowait_boost = 0 ;
}
}
static void sugov_iowait_boost ( struct sugov_cpu * sg_cpu , unsigned long * util ,
unsigned long * max )
{
unsigned long boost_util = sg_cpu - > iowait_boost ;
unsigned long boost_max = sg_cpu - > iowait_boost_max ;
if ( ! boost_util )
return ;
if ( * util * boost_max < * max * boost_util ) {
* util = boost_util ;
* max = boost_max ;
}
sg_cpu - > iowait_boost > > = 1 ;
}
2016-04-02 02:09:12 +03:00
static void sugov_update_single ( struct update_util_data * hook , u64 time ,
2016-08-16 23:14:55 +03:00
unsigned int flags )
2016-04-02 02:09:12 +03:00
{
struct sugov_cpu * sg_cpu = container_of ( hook , struct sugov_cpu , update_util ) ;
struct sugov_policy * sg_policy = sg_cpu - > sg_policy ;
struct cpufreq_policy * policy = sg_policy - > policy ;
2016-08-16 23:14:55 +03:00
unsigned long util , max ;
2016-04-02 02:09:12 +03:00
unsigned int next_f ;
2016-09-10 01:00:31 +03:00
sugov_set_iowait_boost ( sg_cpu , time , flags ) ;
sg_cpu - > last_update = time ;
2016-04-02 02:09:12 +03:00
if ( ! sugov_should_update_freq ( sg_policy , time ) )
return ;
2016-08-16 23:14:55 +03:00
if ( flags & SCHED_CPUFREQ_RT_DL ) {
next_f = policy - > cpuinfo . max_freq ;
} else {
sugov_get_util ( & util , & max ) ;
2016-09-10 01:00:31 +03:00
sugov_iowait_boost ( sg_cpu , & util , & max ) ;
2017-03-02 11:33:21 +03:00
next_f = get_next_freq ( sg_policy , util , max ) ;
2016-08-16 23:14:55 +03:00
}
2016-04-02 02:09:12 +03:00
sugov_update_commit ( sg_policy , time , next_f ) ;
}
2016-07-13 23:25:26 +03:00
static unsigned int sugov_next_freq_shared ( struct sugov_cpu * sg_cpu ,
2016-08-16 23:14:55 +03:00
unsigned long util , unsigned long max ,
unsigned int flags )
2016-04-02 02:09:12 +03:00
{
2016-07-13 23:25:26 +03:00
struct sugov_policy * sg_policy = sg_cpu - > sg_policy ;
2016-04-02 02:09:12 +03:00
struct cpufreq_policy * policy = sg_policy - > policy ;
unsigned int max_f = policy - > cpuinfo . max_freq ;
u64 last_freq_update_time = sg_policy - > last_freq_update_time ;
unsigned int j ;
2016-08-16 23:14:55 +03:00
if ( flags & SCHED_CPUFREQ_RT_DL )
2016-04-02 02:09:12 +03:00
return max_f ;
2016-09-10 01:00:31 +03:00
sugov_iowait_boost ( sg_cpu , & util , & max ) ;
2016-04-02 02:09:12 +03:00
for_each_cpu ( j , policy - > cpus ) {
struct sugov_cpu * j_sg_cpu ;
unsigned long j_util , j_max ;
s64 delta_ns ;
if ( j = = smp_processor_id ( ) )
continue ;
j_sg_cpu = & per_cpu ( sugov_cpu , j ) ;
/*
* If the CPU utilization was last updated before the previous
* frequency update and the time elapsed between the last update
* of the CPU utilization and the last frequency update is long
* enough , don ' t take the CPU into account as it probably is
2016-09-10 01:00:31 +03:00
* idle now ( and clear iowait_boost for it ) .
2016-04-02 02:09:12 +03:00
*/
delta_ns = last_freq_update_time - j_sg_cpu - > last_update ;
2016-09-10 01:00:31 +03:00
if ( delta_ns > TICK_NSEC ) {
j_sg_cpu - > iowait_boost = 0 ;
2016-04-02 02:09:12 +03:00
continue ;
2016-09-10 01:00:31 +03:00
}
2016-08-16 23:14:55 +03:00
if ( j_sg_cpu - > flags & SCHED_CPUFREQ_RT_DL )
2016-04-02 02:09:12 +03:00
return max_f ;
2016-08-16 23:14:55 +03:00
j_util = j_sg_cpu - > util ;
2016-04-02 02:09:12 +03:00
j_max = j_sg_cpu - > max ;
if ( j_util * max > j_max * util ) {
util = j_util ;
max = j_max ;
}
2016-09-10 01:00:31 +03:00
sugov_iowait_boost ( j_sg_cpu , & util , & max ) ;
2016-04-02 02:09:12 +03:00
}
2017-03-02 11:33:21 +03:00
return get_next_freq ( sg_policy , util , max ) ;
2016-04-02 02:09:12 +03:00
}
static void sugov_update_shared ( struct update_util_data * hook , u64 time ,
2016-08-16 23:14:55 +03:00
unsigned int flags )
2016-04-02 02:09:12 +03:00
{
struct sugov_cpu * sg_cpu = container_of ( hook , struct sugov_cpu , update_util ) ;
struct sugov_policy * sg_policy = sg_cpu - > sg_policy ;
2016-08-16 23:14:55 +03:00
unsigned long util , max ;
2016-04-02 02:09:12 +03:00
unsigned int next_f ;
2016-08-16 23:14:55 +03:00
sugov_get_util ( & util , & max ) ;
2016-04-02 02:09:12 +03:00
raw_spin_lock ( & sg_policy - > update_lock ) ;
sg_cpu - > util = util ;
sg_cpu - > max = max ;
2016-08-16 23:14:55 +03:00
sg_cpu - > flags = flags ;
2016-09-10 01:00:31 +03:00
sugov_set_iowait_boost ( sg_cpu , time , flags ) ;
2016-04-02 02:09:12 +03:00
sg_cpu - > last_update = time ;
if ( sugov_should_update_freq ( sg_policy , time ) ) {
2016-08-16 23:14:55 +03:00
next_f = sugov_next_freq_shared ( sg_cpu , util , max , flags ) ;
2016-04-02 02:09:12 +03:00
sugov_update_commit ( sg_policy , time , next_f ) ;
}
raw_spin_unlock ( & sg_policy - > update_lock ) ;
}
2016-11-15 11:23:22 +03:00
static void sugov_work ( struct kthread_work * work )
2016-04-02 02:09:12 +03:00
{
struct sugov_policy * sg_policy = container_of ( work , struct sugov_policy , work ) ;
mutex_lock ( & sg_policy - > work_lock ) ;
__cpufreq_driver_target ( sg_policy - > policy , sg_policy - > next_freq ,
CPUFREQ_RELATION_L ) ;
mutex_unlock ( & sg_policy - > work_lock ) ;
sg_policy - > work_in_progress = false ;
}
static void sugov_irq_work ( struct irq_work * irq_work )
{
struct sugov_policy * sg_policy ;
sg_policy = container_of ( irq_work , struct sugov_policy , irq_work ) ;
2016-11-15 11:23:22 +03:00
/*
2016-11-24 11:21:11 +03:00
* For RT and deadline tasks , the schedutil governor shoots the
* frequency to maximum . Special care must be taken to ensure that this
* kthread doesn ' t result in the same behavior .
2016-11-15 11:23:22 +03:00
*
* This is ( mostly ) guaranteed by the work_in_progress flag . The flag is
2016-11-24 11:21:11 +03:00
* updated only at the end of the sugov_work ( ) function and before that
* the schedutil governor rejects all other frequency scaling requests .
2016-11-15 11:23:22 +03:00
*
2016-11-24 11:21:11 +03:00
* There is a very rare case though , where the RT thread yields right
2016-11-15 11:23:22 +03:00
* after the work_in_progress flag is cleared . The effects of that are
* neglected for now .
*/
kthread_queue_work ( & sg_policy - > worker , & sg_policy - > work ) ;
2016-04-02 02:09:12 +03:00
}
/************************** sysfs interface ************************/
static struct sugov_tunables * global_tunables ;
static DEFINE_MUTEX ( global_tunables_lock ) ;
static inline struct sugov_tunables * to_sugov_tunables ( struct gov_attr_set * attr_set )
{
return container_of ( attr_set , struct sugov_tunables , attr_set ) ;
}
static ssize_t rate_limit_us_show ( struct gov_attr_set * attr_set , char * buf )
{
struct sugov_tunables * tunables = to_sugov_tunables ( attr_set ) ;
return sprintf ( buf , " %u \n " , tunables - > rate_limit_us ) ;
}
static ssize_t rate_limit_us_store ( struct gov_attr_set * attr_set , const char * buf ,
size_t count )
{
struct sugov_tunables * tunables = to_sugov_tunables ( attr_set ) ;
struct sugov_policy * sg_policy ;
unsigned int rate_limit_us ;
if ( kstrtouint ( buf , 10 , & rate_limit_us ) )
return - EINVAL ;
tunables - > rate_limit_us = rate_limit_us ;
list_for_each_entry ( sg_policy , & attr_set - > policy_list , tunables_hook )
sg_policy - > freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC ;
return count ;
}
static struct governor_attr rate_limit_us = __ATTR_RW ( rate_limit_us ) ;
static struct attribute * sugov_attributes [ ] = {
& rate_limit_us . attr ,
NULL
} ;
static struct kobj_type sugov_tunables_ktype = {
. default_attrs = sugov_attributes ,
. sysfs_ops = & governor_sysfs_ops ,
} ;
/********************** cpufreq governor interface *********************/
static struct cpufreq_governor schedutil_gov ;
static struct sugov_policy * sugov_policy_alloc ( struct cpufreq_policy * policy )
{
struct sugov_policy * sg_policy ;
sg_policy = kzalloc ( sizeof ( * sg_policy ) , GFP_KERNEL ) ;
if ( ! sg_policy )
return NULL ;
sg_policy - > policy = policy ;
raw_spin_lock_init ( & sg_policy - > update_lock ) ;
return sg_policy ;
}
static void sugov_policy_free ( struct sugov_policy * sg_policy )
{
kfree ( sg_policy ) ;
}
2016-11-15 11:23:22 +03:00
static int sugov_kthread_create ( struct sugov_policy * sg_policy )
{
struct task_struct * thread ;
struct sched_param param = { . sched_priority = MAX_USER_RT_PRIO / 2 } ;
struct cpufreq_policy * policy = sg_policy - > policy ;
int ret ;
/* kthread only required for slow path */
if ( policy - > fast_switch_enabled )
return 0 ;
kthread_init_work ( & sg_policy - > work , sugov_work ) ;
kthread_init_worker ( & sg_policy - > worker ) ;
thread = kthread_create ( kthread_worker_fn , & sg_policy - > worker ,
" sugov:%d " ,
cpumask_first ( policy - > related_cpus ) ) ;
if ( IS_ERR ( thread ) ) {
pr_err ( " failed to create sugov thread: %ld \n " , PTR_ERR ( thread ) ) ;
return PTR_ERR ( thread ) ;
}
ret = sched_setscheduler_nocheck ( thread , SCHED_FIFO , & param ) ;
if ( ret ) {
kthread_stop ( thread ) ;
pr_warn ( " %s: failed to set SCHED_FIFO \n " , __func__ ) ;
return ret ;
}
sg_policy - > thread = thread ;
kthread_bind_mask ( thread , policy - > related_cpus ) ;
2016-11-15 11:23:23 +03:00
init_irq_work ( & sg_policy - > irq_work , sugov_irq_work ) ;
mutex_init ( & sg_policy - > work_lock ) ;
2016-11-15 11:23:22 +03:00
wake_up_process ( thread ) ;
return 0 ;
}
static void sugov_kthread_stop ( struct sugov_policy * sg_policy )
{
/* kthread only required for slow path */
if ( sg_policy - > policy - > fast_switch_enabled )
return ;
kthread_flush_worker ( & sg_policy - > worker ) ;
kthread_stop ( sg_policy - > thread ) ;
2016-11-15 11:23:23 +03:00
mutex_destroy ( & sg_policy - > work_lock ) ;
2016-11-15 11:23:22 +03:00
}
2016-04-02 02:09:12 +03:00
static struct sugov_tunables * sugov_tunables_alloc ( struct sugov_policy * sg_policy )
{
struct sugov_tunables * tunables ;
tunables = kzalloc ( sizeof ( * tunables ) , GFP_KERNEL ) ;
if ( tunables ) {
gov_attr_set_init ( & tunables - > attr_set , & sg_policy - > tunables_hook ) ;
if ( ! have_governor_per_policy ( ) )
global_tunables = tunables ;
}
return tunables ;
}
static void sugov_tunables_free ( struct sugov_tunables * tunables )
{
if ( ! have_governor_per_policy ( ) )
global_tunables = NULL ;
kfree ( tunables ) ;
}
static int sugov_init ( struct cpufreq_policy * policy )
{
struct sugov_policy * sg_policy ;
struct sugov_tunables * tunables ;
unsigned int lat ;
int ret = 0 ;
/* State should be equivalent to EXIT */
if ( policy - > governor_data )
return - EBUSY ;
2016-11-15 11:23:21 +03:00
cpufreq_enable_fast_switch ( policy ) ;
2016-04-02 02:09:12 +03:00
sg_policy = sugov_policy_alloc ( policy ) ;
2016-11-15 11:23:21 +03:00
if ( ! sg_policy ) {
ret = - ENOMEM ;
goto disable_fast_switch ;
}
2016-04-02 02:09:12 +03:00
2016-11-15 11:23:22 +03:00
ret = sugov_kthread_create ( sg_policy ) ;
if ( ret )
goto free_sg_policy ;
2016-04-02 02:09:12 +03:00
mutex_lock ( & global_tunables_lock ) ;
if ( global_tunables ) {
if ( WARN_ON ( have_governor_per_policy ( ) ) ) {
ret = - EINVAL ;
2016-11-15 11:23:22 +03:00
goto stop_kthread ;
2016-04-02 02:09:12 +03:00
}
policy - > governor_data = sg_policy ;
sg_policy - > tunables = global_tunables ;
gov_attr_set_get ( & global_tunables - > attr_set , & sg_policy - > tunables_hook ) ;
goto out ;
}
tunables = sugov_tunables_alloc ( sg_policy ) ;
if ( ! tunables ) {
ret = - ENOMEM ;
2016-11-15 11:23:22 +03:00
goto stop_kthread ;
2016-04-02 02:09:12 +03:00
}
tunables - > rate_limit_us = LATENCY_MULTIPLIER ;
lat = policy - > cpuinfo . transition_latency / NSEC_PER_USEC ;
if ( lat )
tunables - > rate_limit_us * = lat ;
policy - > governor_data = sg_policy ;
sg_policy - > tunables = tunables ;
ret = kobject_init_and_add ( & tunables - > attr_set . kobj , & sugov_tunables_ktype ,
get_governor_parent_kobj ( policy ) , " %s " ,
schedutil_gov . name ) ;
if ( ret )
goto fail ;
2016-11-15 11:23:20 +03:00
out :
2016-04-02 02:09:12 +03:00
mutex_unlock ( & global_tunables_lock ) ;
return 0 ;
2016-11-15 11:23:20 +03:00
fail :
2016-04-02 02:09:12 +03:00
policy - > governor_data = NULL ;
sugov_tunables_free ( tunables ) ;
2016-11-15 11:23:22 +03:00
stop_kthread :
sugov_kthread_stop ( sg_policy ) ;
2016-11-15 11:23:20 +03:00
free_sg_policy :
2016-04-02 02:09:12 +03:00
mutex_unlock ( & global_tunables_lock ) ;
sugov_policy_free ( sg_policy ) ;
2016-11-15 11:23:21 +03:00
disable_fast_switch :
cpufreq_disable_fast_switch ( policy ) ;
2016-05-18 15:25:28 +03:00
pr_err ( " initialization failed (error %d) \n " , ret ) ;
2016-04-02 02:09:12 +03:00
return ret ;
}
2016-06-03 00:24:15 +03:00
static void sugov_exit ( struct cpufreq_policy * policy )
2016-04-02 02:09:12 +03:00
{
struct sugov_policy * sg_policy = policy - > governor_data ;
struct sugov_tunables * tunables = sg_policy - > tunables ;
unsigned int count ;
mutex_lock ( & global_tunables_lock ) ;
count = gov_attr_set_put ( & tunables - > attr_set , & sg_policy - > tunables_hook ) ;
policy - > governor_data = NULL ;
if ( ! count )
sugov_tunables_free ( tunables ) ;
mutex_unlock ( & global_tunables_lock ) ;
2016-11-15 11:23:22 +03:00
sugov_kthread_stop ( sg_policy ) ;
2016-04-02 02:09:12 +03:00
sugov_policy_free ( sg_policy ) ;
2016-11-15 11:23:21 +03:00
cpufreq_disable_fast_switch ( policy ) ;
2016-04-02 02:09:12 +03:00
}
static int sugov_start ( struct cpufreq_policy * policy )
{
struct sugov_policy * sg_policy = policy - > governor_data ;
unsigned int cpu ;
sg_policy - > freq_update_delay_ns = sg_policy - > tunables - > rate_limit_us * NSEC_PER_USEC ;
sg_policy - > last_freq_update_time = 0 ;
sg_policy - > next_freq = UINT_MAX ;
sg_policy - > work_in_progress = false ;
sg_policy - > need_freq_update = false ;
2017-03-02 11:33:20 +03:00
sg_policy - > cached_raw_freq = 0 ;
2016-04-02 02:09:12 +03:00
for_each_cpu ( cpu , policy - > cpus ) {
struct sugov_cpu * sg_cpu = & per_cpu ( sugov_cpu , cpu ) ;
sg_cpu - > sg_policy = sg_policy ;
if ( policy_is_shared ( policy ) ) {
2016-08-16 23:14:55 +03:00
sg_cpu - > util = 0 ;
2016-04-02 02:09:12 +03:00
sg_cpu - > max = 0 ;
2016-08-16 23:14:55 +03:00
sg_cpu - > flags = SCHED_CPUFREQ_RT ;
2016-04-02 02:09:12 +03:00
sg_cpu - > last_update = 0 ;
2016-09-10 01:00:31 +03:00
sg_cpu - > iowait_boost = 0 ;
sg_cpu - > iowait_boost_max = policy - > cpuinfo . max_freq ;
2016-04-02 02:09:12 +03:00
cpufreq_add_update_util_hook ( cpu , & sg_cpu - > update_util ,
sugov_update_shared ) ;
} else {
cpufreq_add_update_util_hook ( cpu , & sg_cpu - > update_util ,
sugov_update_single ) ;
}
}
return 0 ;
}
2016-06-03 00:24:15 +03:00
static void sugov_stop ( struct cpufreq_policy * policy )
2016-04-02 02:09:12 +03:00
{
struct sugov_policy * sg_policy = policy - > governor_data ;
unsigned int cpu ;
for_each_cpu ( cpu , policy - > cpus )
cpufreq_remove_update_util_hook ( cpu ) ;
synchronize_sched ( ) ;
2016-11-15 11:23:23 +03:00
if ( ! policy - > fast_switch_enabled ) {
irq_work_sync ( & sg_policy - > irq_work ) ;
kthread_cancel_work_sync ( & sg_policy - > work ) ;
}
2016-04-02 02:09:12 +03:00
}
2016-06-03 00:24:15 +03:00
static void sugov_limits ( struct cpufreq_policy * policy )
2016-04-02 02:09:12 +03:00
{
struct sugov_policy * sg_policy = policy - > governor_data ;
if ( ! policy - > fast_switch_enabled ) {
mutex_lock ( & sg_policy - > work_lock ) ;
2016-05-18 15:25:31 +03:00
cpufreq_policy_apply_limits ( policy ) ;
2016-04-02 02:09:12 +03:00
mutex_unlock ( & sg_policy - > work_lock ) ;
}
sg_policy - > need_freq_update = true ;
}
static struct cpufreq_governor schedutil_gov = {
. name = " schedutil " ,
. owner = THIS_MODULE ,
2016-06-03 00:24:15 +03:00
. init = sugov_init ,
. exit = sugov_exit ,
. start = sugov_start ,
. stop = sugov_stop ,
. limits = sugov_limits ,
2016-04-02 02:09:12 +03:00
} ;
# ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
struct cpufreq_governor * cpufreq_default_governor ( void )
{
return & schedutil_gov ;
}
# endif
2016-08-16 23:14:55 +03:00
static int __init sugov_register ( void )
{
return cpufreq_register_governor ( & schedutil_gov ) ;
}
fs_initcall ( sugov_register ) ;