2009-09-16 10:54:45 +04:00
/*
* Only give sleepers 50 % of their service deficit . This allows
* them to run sooner , but does not allow tons of sleepers to
* rip the spread apart .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( GENTLE_FAIR_SLEEPERS , true )
2009-09-11 14:31:23 +04:00
/*
* Place new tasks ahead so that they do not starve already running
* tasks
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( START_DEBIT , true )
2009-09-11 14:31:23 +04:00
/*
* Prefer to schedule the task we woke last ( assuming it failed
* wakeup - preemption ) , since its likely going to consume data we
* touched , increases cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( NEXT_BUDDY , false )
2009-09-11 14:31:23 +04:00
/*
* Prefer to schedule the task that ran last ( when we did
* wake - preempt ) as that likely will touch the same data , increases
* cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( LAST_BUDDY , true )
2009-09-11 14:31:23 +04:00
/*
* Consider buddies to be cache hot , decreases the likelyness of a
* cache buddy being migrated away , increases cache locality .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( CACHE_HOT_BUDDY , true )
2009-09-11 14:31:23 +04:00
2012-10-14 16:28:50 +04:00
/*
* Allow wakeup - time preemption of the current task :
*/
SCHED_FEAT ( WAKEUP_PREEMPTION , true )
2009-09-03 15:20:03 +04:00
/*
* Use arch dependent cpu power functions
*/
2012-07-09 13:27:06 +04:00
SCHED_FEAT ( ARCH_POWER , true )
2009-09-03 15:20:03 +04:00
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( HRTICK , false )
SCHED_FEAT ( DOUBLE_TICK , false )
SCHED_FEAT ( LB_BIAS , true )
2009-09-11 14:31:23 +04:00
/*
* Spin - wait on mutex acquisition when the mutex owner is running on
* another cpu - - assumes that when the owner is running , it will soon
* release the lock . Decreases scheduling overhead .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( OWNER_SPIN , true )
2010-10-05 04:03:22 +04:00
/*
2011-07-11 23:28:18 +04:00
* Decrement CPU power based on time not spent running tasks
2010-10-05 04:03:22 +04:00
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( NONTASK_POWER , true )
2011-04-05 19:23:58 +04:00
/*
* Queue remote wakeups on the target CPU and process them
* using the scheduler IPI . Reduces rq - > lock contention / bounces .
*/
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( TTWU_QUEUE , true )
2011-07-15 12:35:52 +04:00
2011-07-06 16:20:14 +04:00
SCHED_FEAT ( FORCE_SD_OVERLAP , false )
SCHED_FEAT ( RT_RUNTIME_SHARE , true )
2012-04-17 15:38:40 +04:00
SCHED_FEAT ( LB_MIN , false )
2012-10-25 16:16:43 +04:00
/*
2012-11-22 15:16:36 +04:00
* Apply the automatic NUMA scheduling policy . Enabled automatically
* at runtime if running on a NUMA machine . Can be controlled via
2012-11-22 18:40:03 +04:00
* numa_balancing = . Allow PTE scanning to be forced on UMA machines
* for debugging the core machinery .
2012-10-25 16:16:43 +04:00
*/
# ifdef CONFIG_NUMA_BALANCING
2012-11-22 15:16:36 +04:00
SCHED_FEAT ( NUMA , false )
2012-11-22 18:40:03 +04:00
SCHED_FEAT ( NUMA_FORCE , false )
2012-10-25 16:16:43 +04:00
# endif