2009-09-16 08:54:45 +02:00
/*
* Only give sleepers 50 % of their service deficit . This allows
* them to run sooner , but does not allow tons of sleepers to
* rip the spread apart .
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( GENTLE_FAIR_SLEEPERS , true )
2009-09-11 12:31:23 +02:00
/*
* Place new tasks ahead so that they do not starve already running
* tasks
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( START_DEBIT , true )
2009-09-11 12:31:23 +02:00
/*
* Prefer to schedule the task we woke last ( assuming it failed
* wakeup - preemption ) , since its likely going to consume data we
* touched , increases cache locality .
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( NEXT_BUDDY , false )
2009-09-11 12:31:23 +02:00
/*
* Prefer to schedule the task that ran last ( when we did
* wake - preempt ) as that likely will touch the same data , increases
* cache locality .
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( LAST_BUDDY , true )
2009-09-11 12:31:23 +02:00
/*
* Consider buddies to be cache hot , decreases the likelyness of a
* cache buddy being migrated away , increases cache locality .
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( CACHE_HOT_BUDDY , true )
2009-09-11 12:31:23 +02:00
2012-10-14 14:28:50 +02:00
/*
* Allow wakeup - time preemption of the current task :
*/
SCHED_FEAT ( WAKEUP_PREEMPTION , true )
2009-09-03 13:20:03 +02:00
/*
2014-05-27 13:50:41 -04:00
* Use arch dependent cpu capacity functions
2009-09-03 13:20:03 +02:00
*/
2014-05-27 13:50:41 -04:00
SCHED_FEAT ( ARCH_CAPACITY , true )
2009-09-03 13:20:03 +02:00
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( HRTICK , false )
SCHED_FEAT ( DOUBLE_TICK , false )
SCHED_FEAT ( LB_BIAS , true )
2009-09-11 12:31:23 +02:00
2010-10-04 17:03:22 -07:00
/*
2014-05-27 13:50:41 -04:00
* Decrement CPU capacity based on time not spent running tasks
2010-10-04 17:03:22 -07:00
*/
2014-05-27 13:50:41 -04:00
SCHED_FEAT ( NONTASK_CAPACITY , true )
2011-04-05 17:23:58 +02:00
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI . Reduces rq - > lock contention / bounces .
*/
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( TTWU_QUEUE , true )
2011-07-15 10:35:52 +02:00
2011-07-06 14:20:14 +02:00
SCHED_FEAT ( FORCE_SD_OVERLAP , false )
SCHED_FEAT ( RT_RUNTIME_SHARE , true )
2012-04-17 13:38:40 +02:00
SCHED_FEAT ( LB_MIN , false )
2012-10-25 14:16:43 +02:00
/*
2012-11-22 11:16:36 +00:00
* Apply the automatic NUMA scheduling policy . Enabled automatically
* at runtime if running on a NUMA machine . Can be controlled via
2013-10-07 11:28:53 +01:00
* numa_balancing =
2012-10-25 14:16:43 +02:00
*/
# ifdef CONFIG_NUMA_BALANCING
2012-11-22 11:16:36 +00:00
SCHED_FEAT ( NUMA , false )
2013-10-07 11:29:00 +01:00
/*
* NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
* higher number of hinting faults are recorded during active load
* balancing .
*/
SCHED_FEAT ( NUMA_FAVOUR_HIGHER , true )
2013-10-07 11:29:01 +01:00
/*
* NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
* lower number of hinting faults have been recorded . As this has
* the potential to prevent a task ever migrating to a new node
* due to CPU overload it is disabled by default .
*/
SCHED_FEAT ( NUMA_RESIST_LOWER , false )
2012-10-25 14:16:43 +02:00
# endif