2017-06-20 01:37:55 +02:00
/*
* Copyright ( C ) 2016 Thomas Gleixner .
* Copyright ( C ) 2016 - 2017 Christoph Hellwig .
*/
2016-07-04 17:39:27 +09:00
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/cpu.h>
2016-09-14 16:18:48 +02:00
static void irq_spread_init_one ( struct cpumask * irqmsk , struct cpumask * nmsk ,
int cpus_per_vec )
{
const struct cpumask * siblmsk ;
int cpu , sibl ;
for ( ; cpus_per_vec > 0 ; ) {
cpu = cpumask_first ( nmsk ) ;
/* Should not happen, but I'm too lazy to think about it */
if ( cpu > = nr_cpu_ids )
return ;
cpumask_clear_cpu ( cpu , nmsk ) ;
cpumask_set_cpu ( cpu , irqmsk ) ;
cpus_per_vec - - ;
/* If the cpu has siblings, use them first */
siblmsk = topology_sibling_cpumask ( cpu ) ;
for ( sibl = - 1 ; cpus_per_vec > 0 ; ) {
sibl = cpumask_next ( sibl , siblmsk ) ;
if ( sibl > = nr_cpu_ids )
break ;
if ( ! cpumask_test_and_clear_cpu ( sibl , nmsk ) )
continue ;
cpumask_set_cpu ( sibl , irqmsk ) ;
cpus_per_vec - - ;
}
}
}
2017-06-20 01:37:55 +02:00
static cpumask_var_t * alloc_node_to_present_cpumask ( void )
{
cpumask_var_t * masks ;
int node ;
masks = kcalloc ( nr_node_ids , sizeof ( cpumask_var_t ) , GFP_KERNEL ) ;
if ( ! masks )
return NULL ;
for ( node = 0 ; node < nr_node_ids ; node + + ) {
if ( ! zalloc_cpumask_var ( & masks [ node ] , GFP_KERNEL ) )
goto out_unwind ;
}
return masks ;
out_unwind :
while ( - - node > = 0 )
free_cpumask_var ( masks [ node ] ) ;
kfree ( masks ) ;
return NULL ;
}
static void free_node_to_present_cpumask ( cpumask_var_t * masks )
{
int node ;
for ( node = 0 ; node < nr_node_ids ; node + + )
free_cpumask_var ( masks [ node ] ) ;
kfree ( masks ) ;
}
static void build_node_to_present_cpumask ( cpumask_var_t * masks )
{
int cpu ;
for_each_present_cpu ( cpu )
cpumask_set_cpu ( cpu , masks [ cpu_to_node ( cpu ) ] ) ;
}
static int get_nodes_in_cpumask ( cpumask_var_t * node_to_present_cpumask ,
const struct cpumask * mask , nodemask_t * nodemsk )
2016-09-14 16:18:48 +02:00
{
2016-12-14 16:01:12 -02:00
int n , nodes = 0 ;
2016-09-14 16:18:48 +02:00
/* Calculate the number of nodes in the supplied affinity mask */
2017-06-20 01:37:55 +02:00
for_each_node ( n ) {
if ( cpumask_intersects ( mask , node_to_present_cpumask [ n ] ) ) {
2016-09-14 16:18:48 +02:00
node_set ( n , * nodemsk ) ;
nodes + + ;
}
}
return nodes ;
}
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
2016-11-08 17:15:03 -08:00
* @ nvecs : The total number of vectors
* @ affd : Description of the affinity requirements
2016-09-14 16:18:48 +02:00
*
* Returns the masks pointer or NULL if allocation failed .
*/
2016-11-08 17:15:03 -08:00
struct cpumask *
irq_create_affinity_masks ( int nvecs , const struct irq_affinity * affd )
2016-09-14 16:18:48 +02:00
{
2017-04-03 15:25:53 -04:00
int n , nodes , cpus_per_vec , extra_vecs , curvec ;
2016-11-08 17:15:03 -08:00
int affv = nvecs - affd - > pre_vectors - affd - > post_vectors ;
2016-11-15 10:12:58 +01:00
int last_affv = affv + affd - > pre_vectors ;
2016-09-14 16:18:48 +02:00
nodemask_t nodemsk = NODE_MASK_NONE ;
struct cpumask * masks ;
2017-06-20 01:37:55 +02:00
cpumask_var_t nmsk , * node_to_present_cpumask ;
2016-09-14 16:18:48 +02:00
2017-05-18 10:47:47 -07:00
/*
* If there aren ' t any vectors left after applying the pre / post
* vectors don ' t bother with assigning affinity .
*/
if ( ! affv )
return NULL ;
2016-09-14 16:18:48 +02:00
if ( ! zalloc_cpumask_var ( & nmsk , GFP_KERNEL ) )
return NULL ;
2016-11-08 17:15:03 -08:00
masks = kcalloc ( nvecs , sizeof ( * masks ) , GFP_KERNEL ) ;
2016-09-14 16:18:48 +02:00
if ( ! masks )
goto out ;
2017-06-20 01:37:55 +02:00
node_to_present_cpumask = alloc_node_to_present_cpumask ( ) ;
if ( ! node_to_present_cpumask )
goto out ;
2016-11-08 17:15:03 -08:00
/* Fill out vectors at the beginning that don't need affinity */
for ( curvec = 0 ; curvec < affd - > pre_vectors ; curvec + + )
2016-11-16 18:36:44 +01:00
cpumask_copy ( masks + curvec , irq_default_affinity ) ;
2016-11-08 17:15:03 -08:00
2016-09-14 16:18:48 +02:00
/* Stabilize the cpumasks */
get_online_cpus ( ) ;
2017-06-20 01:37:55 +02:00
build_node_to_present_cpumask ( node_to_present_cpumask ) ;
nodes = get_nodes_in_cpumask ( node_to_present_cpumask , cpu_present_mask ,
& nodemsk ) ;
2016-09-14 16:18:48 +02:00
/*
2016-12-14 16:01:12 -02:00
* If the number of nodes in the mask is greater than or equal the
2016-09-14 16:18:48 +02:00
* number of vectors we just spread the vectors across the nodes .
*/
2016-11-08 17:15:03 -08:00
if ( affv < = nodes ) {
2016-09-14 16:18:48 +02:00
for_each_node_mask ( n , nodemsk ) {
2017-06-20 01:37:55 +02:00
cpumask_copy ( masks + curvec ,
node_to_present_cpumask [ n ] ) ;
2016-11-15 10:12:58 +01:00
if ( + + curvec = = last_affv )
2016-09-14 16:18:48 +02:00
break ;
}
2016-11-08 17:15:03 -08:00
goto done ;
2016-09-14 16:18:48 +02:00
}
for_each_node_mask ( n , nodemsk ) {
2017-04-03 15:25:53 -04:00
int ncpus , v , vecs_to_assign , vecs_per_node ;
/* Spread the vectors per node */
2017-04-19 19:51:10 -04:00
vecs_per_node = ( affv - ( curvec - affd - > pre_vectors ) ) / nodes ;
2016-09-14 16:18:48 +02:00
/* Get the cpus on this node which are in the mask */
2017-06-20 01:37:55 +02:00
cpumask_and ( nmsk , cpu_present_mask , node_to_present_cpumask [ n ] ) ;
2016-09-14 16:18:48 +02:00
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight ( nmsk ) ;
2017-04-03 15:25:53 -04:00
vecs_to_assign = min ( vecs_per_node , ncpus ) ;
/* Account for rounding errors */
2017-04-13 13:28:12 -04:00
extra_vecs = ncpus - vecs_to_assign * ( ncpus / vecs_to_assign ) ;
2016-09-14 16:18:48 +02:00
2016-11-15 10:12:58 +01:00
for ( v = 0 ; curvec < last_affv & & v < vecs_to_assign ;
curvec + + , v + + ) {
2016-09-14 16:18:48 +02:00
cpus_per_vec = ncpus / vecs_to_assign ;
/* Account for extra vectors to compensate rounding errors */
if ( extra_vecs ) {
cpus_per_vec + + ;
2017-04-03 15:25:53 -04:00
- - extra_vecs ;
2016-09-14 16:18:48 +02:00
}
irq_spread_init_one ( masks + curvec , nmsk , cpus_per_vec ) ;
}
2016-11-15 10:12:58 +01:00
if ( curvec > = last_affv )
2016-09-14 16:18:48 +02:00
break ;
2017-04-03 15:25:53 -04:00
- - nodes ;
2016-09-14 16:18:48 +02:00
}
2016-11-08 17:15:03 -08:00
done :
2016-09-14 16:18:48 +02:00
put_online_cpus ( ) ;
2016-11-08 17:15:03 -08:00
/* Fill out vectors at the end that don't need affinity */
for ( ; curvec < nvecs ; curvec + + )
2016-11-16 18:36:44 +01:00
cpumask_copy ( masks + curvec , irq_default_affinity ) ;
2017-06-20 01:37:55 +02:00
free_node_to_present_cpumask ( node_to_present_cpumask ) ;
2016-09-14 16:18:48 +02:00
out :
free_cpumask_var ( nmsk ) ;
return masks ;
}
/**
2016-11-08 17:15:02 -08:00
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
2017-05-18 10:47:47 -07:00
* @ minvec : The minimum number of vectors available
2016-11-08 17:15:02 -08:00
* @ maxvec : The maximum number of vectors available
* @ affd : Description of the affinity requirements
2016-09-14 16:18:48 +02:00
*/
2017-05-18 10:47:47 -07:00
int irq_calc_affinity_vectors ( int minvec , int maxvec , const struct irq_affinity * affd )
2016-09-14 16:18:48 +02:00
{
2016-11-08 17:15:02 -08:00
int resv = affd - > pre_vectors + affd - > post_vectors ;
int vecs = maxvec - resv ;
2017-06-20 01:37:55 +02:00
int ret ;
2016-09-14 16:18:48 +02:00
2017-05-18 10:47:47 -07:00
if ( resv > minvec )
return 0 ;
2016-09-14 16:18:48 +02:00
get_online_cpus ( ) ;
2017-06-20 01:37:55 +02:00
ret = min_t ( int , cpumask_weight ( cpu_present_mask ) , vecs ) + resv ;
2016-09-14 16:18:48 +02:00
put_online_cpus ( ) ;
2017-06-20 01:37:55 +02:00
return ret ;
2016-09-14 16:18:48 +02:00
}