2013-11-07 17:43:47 +04:00
/*
* kernel / sched / cpudl . c
*
* Global CPU deadline management
*
* Author : Juri Lelli < j . lelli @ sssup . it >
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; version 2
* of the License .
*/
# include <linux/gfp.h>
# include <linux/kernel.h>
2014-05-14 18:13:56 +04:00
# include <linux/slab.h>
2013-11-07 17:43:47 +04:00
# include "cpudeadline.h"
static inline int parent ( int i )
{
return ( i - 1 ) > > 1 ;
}
static inline int left_child ( int i )
{
return ( i < < 1 ) + 1 ;
}
static inline int right_child ( int i )
{
return ( i < < 1 ) + 2 ;
}
2014-01-14 04:06:36 +04:00
static void cpudl_exchange ( struct cpudl * cp , int a , int b )
2013-11-07 17:43:47 +04:00
{
int cpu_a = cp - > elements [ a ] . cpu , cpu_b = cp - > elements [ b ] . cpu ;
2014-05-14 18:13:56 +04:00
swap ( cp - > elements [ a ] . cpu , cp - > elements [ b ] . cpu ) ;
swap ( cp - > elements [ a ] . dl , cp - > elements [ b ] . dl ) ;
swap ( cp - > elements [ cpu_a ] . idx , cp - > elements [ cpu_b ] . idx ) ;
2013-11-07 17:43:47 +04:00
}
2016-08-14 17:27:06 +03:00
static void cpudl_heapify_down ( struct cpudl * cp , int idx )
2013-11-07 17:43:47 +04:00
{
int l , r , largest ;
/* adapted from lib/prio_heap.c */
while ( 1 ) {
l = left_child ( idx ) ;
r = right_child ( idx ) ;
largest = idx ;
if ( ( l < cp - > size ) & & dl_time_before ( cp - > elements [ idx ] . dl ,
cp - > elements [ l ] . dl ) )
largest = l ;
if ( ( r < cp - > size ) & & dl_time_before ( cp - > elements [ largest ] . dl ,
cp - > elements [ r ] . dl ) )
largest = r ;
if ( largest = = idx )
break ;
/* Push idx down the heap one level and bump one up */
cpudl_exchange ( cp , largest , idx ) ;
idx = largest ;
}
}
2016-08-14 17:27:06 +03:00
static void cpudl_heapify_up ( struct cpudl * cp , int idx )
2013-11-07 17:43:47 +04:00
{
2016-08-14 17:27:06 +03:00
while ( idx > 0 & & dl_time_before ( cp - > elements [ parent ( idx ) ] . dl ,
cp - > elements [ idx ] . dl ) ) {
cpudl_exchange ( cp , idx , parent ( idx ) ) ;
idx = parent ( idx ) ;
2013-11-07 17:43:47 +04:00
}
}
2016-08-14 17:27:06 +03:00
static void cpudl_heapify ( struct cpudl * cp , int idx )
{
if ( idx > 0 & & dl_time_before ( cp - > elements [ parent ( idx ) ] . dl ,
cp - > elements [ idx ] . dl ) )
cpudl_heapify_up ( cp , idx ) ;
else
cpudl_heapify_down ( cp , idx ) ;
}
2013-11-07 17:43:47 +04:00
static inline int cpudl_maximum ( struct cpudl * cp )
{
return cp - > elements [ 0 ] . cpu ;
}
/*
* cpudl_find - find the best ( later - dl ) CPU in the system
* @ cp : the cpudl max - heap context
* @ p : the task
* @ later_mask : a mask to fill in with the selected CPUs ( or NULL )
*
* Returns : int - best CPU ( heap maximum if suitable )
*/
int cpudl_find ( struct cpudl * cp , struct task_struct * p ,
struct cpumask * later_mask )
{
int best_cpu = - 1 ;
const struct sched_dl_entity * dl_se = & p - > dl ;
2015-01-19 07:49:36 +03:00
if ( later_mask & &
2016-05-11 15:23:30 +03:00
cpumask_and ( later_mask , cp - > free_cpus , tsk_cpus_allowed ( p ) ) ) {
2013-11-07 17:43:47 +04:00
best_cpu = cpumask_any ( later_mask ) ;
goto out ;
2016-05-11 15:23:30 +03:00
} else if ( cpumask_test_cpu ( cpudl_maximum ( cp ) , tsk_cpus_allowed ( p ) ) & &
2013-11-07 17:43:47 +04:00
dl_time_before ( dl_se - > deadline , cp - > elements [ 0 ] . dl ) ) {
best_cpu = cpudl_maximum ( cp ) ;
if ( later_mask )
cpumask_set_cpu ( best_cpu , later_mask ) ;
}
out :
2014-02-24 14:47:12 +04:00
WARN_ON ( best_cpu ! = - 1 & & ! cpu_present ( best_cpu ) ) ;
2013-11-07 17:43:47 +04:00
return best_cpu ;
}
/*
* cpudl_set - update the cpudl max - heap
* @ cp : the cpudl max - heap context
* @ cpu : the target cpu
* @ dl : the new earliest deadline for this cpu
*
* Notes : assumes cpu_rq ( cpu ) - > lock is locked
*
* Returns : ( void )
*/
void cpudl_set ( struct cpudl * cp , int cpu , u64 dl , int is_valid )
{
int old_idx , new_cpu ;
unsigned long flags ;
2014-02-17 18:12:33 +04:00
WARN_ON ( ! cpu_present ( cpu ) ) ;
2013-11-07 17:43:47 +04:00
raw_spin_lock_irqsave ( & cp - > lock , flags ) ;
2014-05-14 18:13:56 +04:00
old_idx = cp - > elements [ cpu ] . idx ;
2013-11-07 17:43:47 +04:00
if ( ! is_valid ) {
/* remove item */
if ( old_idx = = IDX_INVALID ) {
/*
* Nothing to remove if old_idx was invalid .
* This could happen if a rq_offline_dl is
* called for a CPU without - dl tasks running .
*/
goto out ;
}
new_cpu = cp - > elements [ cp - > size - 1 ] . cpu ;
cp - > elements [ old_idx ] . dl = cp - > elements [ cp - > size - 1 ] . dl ;
cp - > elements [ old_idx ] . cpu = new_cpu ;
cp - > size - - ;
2014-05-14 18:13:56 +04:00
cp - > elements [ new_cpu ] . idx = old_idx ;
cp - > elements [ cpu ] . idx = IDX_INVALID ;
2016-08-14 17:27:06 +03:00
cpudl_heapify ( cp , old_idx ) ;
2013-11-07 17:43:47 +04:00
cpumask_set_cpu ( cpu , cp - > free_cpus ) ;
goto out ;
}
if ( old_idx = = IDX_INVALID ) {
2016-08-14 17:27:06 +03:00
int new_idx = cp - > size + + ;
cp - > elements [ new_idx ] . dl = dl ;
cp - > elements [ new_idx ] . cpu = cpu ;
cp - > elements [ cpu ] . idx = new_idx ;
cpudl_heapify_up ( cp , new_idx ) ;
2013-11-07 17:43:47 +04:00
cpumask_clear_cpu ( cpu , cp - > free_cpus ) ;
} else {
2016-08-14 17:27:06 +03:00
cp - > elements [ old_idx ] . dl = dl ;
cpudl_heapify ( cp , old_idx ) ;
2013-11-07 17:43:47 +04:00
}
out :
raw_spin_unlock_irqrestore ( & cp - > lock , flags ) ;
}
2015-01-19 07:49:36 +03:00
/*
* cpudl_set_freecpu - Set the cpudl . free_cpus
* @ cp : the cpudl max - heap context
* @ cpu : rd attached cpu
*/
void cpudl_set_freecpu ( struct cpudl * cp , int cpu )
{
cpumask_set_cpu ( cpu , cp - > free_cpus ) ;
}
/*
* cpudl_clear_freecpu - Clear the cpudl . free_cpus
* @ cp : the cpudl max - heap context
* @ cpu : rd attached cpu
*/
void cpudl_clear_freecpu ( struct cpudl * cp , int cpu )
{
cpumask_clear_cpu ( cpu , cp - > free_cpus ) ;
}
2013-11-07 17:43:47 +04:00
/*
* cpudl_init - initialize the cpudl structure
* @ cp : the cpudl max - heap context
*/
int cpudl_init ( struct cpudl * cp )
{
int i ;
memset ( cp , 0 , sizeof ( * cp ) ) ;
raw_spin_lock_init ( & cp - > lock ) ;
cp - > size = 0 ;
2014-05-14 18:13:56 +04:00
cp - > elements = kcalloc ( nr_cpu_ids ,
sizeof ( struct cpudl_item ) ,
GFP_KERNEL ) ;
if ( ! cp - > elements )
return - ENOMEM ;
2015-01-19 07:49:36 +03:00
if ( ! zalloc_cpumask_var ( & cp - > free_cpus , GFP_KERNEL ) ) {
2014-05-14 18:13:56 +04:00
kfree ( cp - > elements ) ;
2013-11-07 17:43:47 +04:00
return - ENOMEM ;
2014-05-14 18:13:56 +04:00
}
for_each_possible_cpu ( i )
cp - > elements [ i ] . idx = IDX_INVALID ;
2013-11-07 17:43:47 +04:00
return 0 ;
}
/*
* cpudl_cleanup - clean up the cpudl structure
* @ cp : the cpudl max - heap context
*/
void cpudl_cleanup ( struct cpudl * cp )
{
2014-04-17 06:05:02 +04:00
free_cpumask_var ( cp - > free_cpus ) ;
2014-05-14 18:13:56 +04:00
kfree ( cp - > elements ) ;
2013-11-07 17:43:47 +04:00
}