2009-09-16 10:54:45 +04:00
/*
* Only give sleepers 50 % of their service deficit . This allows
* them to run sooner , but does not allow tons of sleepers to
* rip the spread apart .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( GENTLE_FAIR_SLEEPERS , true )
2009-09-11 14:31:23 +04:00
/*
* Place new tasks ahead so that they do not starve already running
* tasks
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( START_DEBIT , true )
2009-09-11 14:31:23 +04:00
/*
* Prefer to schedule the task we woke last ( assuming it failed
* wakeup - preemption ) , since its likely going to consume data we
* touched , increases cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( NEXT_BUDDY , false )
2009-09-11 14:31:23 +04:00
/*
* Prefer to schedule the task that ran last ( when we did
* wake - preempt ) as that likely will touch the same data , increases
* cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( LAST_BUDDY , true )
2009-09-11 14:31:23 +04:00
/*
* Consider buddies to be cache hot , decreases the likelyness of a
* cache buddy being migrated away , increases cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( CACHE_HOT_BUDDY , true )
2009-09-11 14:31:23 +04:00
2012-10-14 16:28:50 +04:00
/*
* Allow wakeup - time preemption of the current task :
*/
SCHED_FEAT ( WAKEUP_PREEMPTION , true )
2009-09-03 15:20:03 +04:00
/*
* Use arch dependent cpu power functions
*/
2012-07-09 13:27:06 +04:00
SCHED_FEAT ( ARCH_POWER , true )
2009-09-03 15:20:03 +04:00
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( HRTICK , false )
SCHED_FEAT ( DOUBLE_TICK , false )
SCHED_FEAT ( LB_BIAS , true )
2009-09-11 14:31:23 +04:00
2010-10-05 04:03:22 +04:00
/*
2011-07-11 23:28:18 +04:00
* Decrement CPU power based on time not spent running tasks
2010-10-05 04:03:22 +04:00
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( NONTASK_POWER , true )
2011-04-05 19:23:58 +04:00
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI . Reduces rq - > lock contention / bounces .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( TTWU_QUEUE , true )
2011-07-15 12:35:52 +04:00
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( FORCE_SD_OVERLAP , false )
SCHED_FEAT ( RT_RUNTIME_SHARE , true )
2012-04-17 15:38:40 +04:00
SCHED_FEAT ( LB_MIN , false )
2012-10-25 16:16:43 +04:00
/*
2012-11-22 15:16:36 +04:00
* Apply the automatic NUMA scheduling policy . Enabled automatically
* at runtime if running on a NUMA machine . Can be controlled via
2013-10-07 14:28:53 +04:00
* numa_balancing =
2012-10-25 16:16:43 +04:00
*/
# ifdef CONFIG_NUMA_BALANCING
2012-11-22 15:16:36 +04:00
SCHED_FEAT ( NUMA , false )
2013-10-07 14:29:00 +04:00
/*
* NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
* higher number of hinting faults are recorded during active load
* balancing .
*/
SCHED_FEAT ( NUMA_FAVOUR_HIGHER , true )
2013-10-07 14:29:01 +04:00
/*
* NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
* lower number of hinting faults have been recorded . As this has
* the potential to prevent a task ever migrating to a new node
* due to CPU overload it is disabled by default .
*/
SCHED_FEAT ( NUMA_RESIST_LOWER , false )
2012-10-25 16:16:43 +04:00
# endif