2016-07-04 17:39:27 +09:00
# include <linux/interrupt.h>
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/cpu.h>
2016-09-14 16:18:48 +02:00
static void irq_spread_init_one ( struct cpumask * irqmsk , struct cpumask * nmsk ,
int cpus_per_vec )
{
const struct cpumask * siblmsk ;
int cpu , sibl ;
for ( ; cpus_per_vec > 0 ; ) {
cpu = cpumask_first ( nmsk ) ;
/* Should not happen, but I'm too lazy to think about it */
if ( cpu > = nr_cpu_ids )
return ;
cpumask_clear_cpu ( cpu , nmsk ) ;
cpumask_set_cpu ( cpu , irqmsk ) ;
cpus_per_vec - - ;
/* If the cpu has siblings, use them first */
siblmsk = topology_sibling_cpumask ( cpu ) ;
for ( sibl = - 1 ; cpus_per_vec > 0 ; ) {
sibl = cpumask_next ( sibl , siblmsk ) ;
if ( sibl > = nr_cpu_ids )
break ;
if ( ! cpumask_test_and_clear_cpu ( sibl , nmsk ) )
continue ;
cpumask_set_cpu ( sibl , irqmsk ) ;
cpus_per_vec - - ;
}
}
}
static int get_nodes_in_cpumask ( const struct cpumask * mask , nodemask_t * nodemsk )
{
2016-12-14 16:01:12 -02:00
int n , nodes = 0 ;
2016-09-14 16:18:48 +02:00
/* Calculate the number of nodes in the supplied affinity mask */
2016-12-14 16:01:12 -02:00
for_each_online_node ( n ) {
2016-09-14 16:18:48 +02:00
if ( cpumask_intersects ( mask , cpumask_of_node ( n ) ) ) {
node_set ( n , * nodemsk ) ;
nodes + + ;
}
}
return nodes ;
}
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
2016-11-08 17:15:03 -08:00
* @ nvecs : The total number of vectors
* @ affd : Description of the affinity requirements
2016-09-14 16:18:48 +02:00
*
* Returns the masks pointer or NULL if allocation failed .
*/
2016-11-08 17:15:03 -08:00
struct cpumask *
irq_create_affinity_masks ( int nvecs , const struct irq_affinity * affd )
2016-09-14 16:18:48 +02:00
{
2016-11-08 17:15:03 -08:00
int n , nodes , vecs_per_node , cpus_per_vec , extra_vecs , curvec ;
int affv = nvecs - affd - > pre_vectors - affd - > post_vectors ;
2016-11-15 10:12:58 +01:00
int last_affv = affv + affd - > pre_vectors ;
2016-09-14 16:18:48 +02:00
nodemask_t nodemsk = NODE_MASK_NONE ;
struct cpumask * masks ;
cpumask_var_t nmsk ;
if ( ! zalloc_cpumask_var ( & nmsk , GFP_KERNEL ) )
return NULL ;
2016-11-08 17:15:03 -08:00
masks = kcalloc ( nvecs , sizeof ( * masks ) , GFP_KERNEL ) ;
2016-09-14 16:18:48 +02:00
if ( ! masks )
goto out ;
2016-11-08 17:15:03 -08:00
/* Fill out vectors at the beginning that don't need affinity */
for ( curvec = 0 ; curvec < affd - > pre_vectors ; curvec + + )
2016-11-16 18:36:44 +01:00
cpumask_copy ( masks + curvec , irq_default_affinity ) ;
2016-11-08 17:15:03 -08:00
2016-09-14 16:18:48 +02:00
/* Stabilize the cpumasks */
get_online_cpus ( ) ;
2016-11-08 17:15:03 -08:00
nodes = get_nodes_in_cpumask ( cpu_online_mask , & nodemsk ) ;
2016-09-14 16:18:48 +02:00
/*
2016-12-14 16:01:12 -02:00
* If the number of nodes in the mask is greater than or equal the
2016-09-14 16:18:48 +02:00
* number of vectors we just spread the vectors across the nodes .
*/
2016-11-08 17:15:03 -08:00
if ( affv < = nodes ) {
2016-09-14 16:18:48 +02:00
for_each_node_mask ( n , nodemsk ) {
cpumask_copy ( masks + curvec , cpumask_of_node ( n ) ) ;
2016-11-15 10:12:58 +01:00
if ( + + curvec = = last_affv )
2016-09-14 16:18:48 +02:00
break ;
}
2016-11-08 17:15:03 -08:00
goto done ;
2016-09-14 16:18:48 +02:00
}
/* Spread the vectors per node */
2016-11-08 17:15:03 -08:00
vecs_per_node = affv / nodes ;
2016-09-14 16:18:48 +02:00
/* Account for rounding errors */
2016-11-08 17:15:03 -08:00
extra_vecs = affv - ( nodes * vecs_per_node ) ;
2016-09-14 16:18:48 +02:00
for_each_node_mask ( n , nodemsk ) {
int ncpus , v , vecs_to_assign = vecs_per_node ;
/* Get the cpus on this node which are in the mask */
2016-11-08 17:15:03 -08:00
cpumask_and ( nmsk , cpu_online_mask , cpumask_of_node ( n ) ) ;
2016-09-14 16:18:48 +02:00
/* Calculate the number of cpus per vector */
ncpus = cpumask_weight ( nmsk ) ;
2016-11-15 10:12:58 +01:00
for ( v = 0 ; curvec < last_affv & & v < vecs_to_assign ;
curvec + + , v + + ) {
2016-09-14 16:18:48 +02:00
cpus_per_vec = ncpus / vecs_to_assign ;
/* Account for extra vectors to compensate rounding errors */
if ( extra_vecs ) {
cpus_per_vec + + ;
if ( ! - - extra_vecs )
vecs_per_node + + ;
}
irq_spread_init_one ( masks + curvec , nmsk , cpus_per_vec ) ;
}
2016-11-15 10:12:58 +01:00
if ( curvec > = last_affv )
2016-09-14 16:18:48 +02:00
break ;
}
2016-11-08 17:15:03 -08:00
done :
2016-09-14 16:18:48 +02:00
put_online_cpus ( ) ;
2016-11-08 17:15:03 -08:00
/* Fill out vectors at the end that don't need affinity */
for ( ; curvec < nvecs ; curvec + + )
2016-11-16 18:36:44 +01:00
cpumask_copy ( masks + curvec , irq_default_affinity ) ;
2016-09-14 16:18:48 +02:00
out :
free_cpumask_var ( nmsk ) ;
return masks ;
}
/**
2016-11-08 17:15:02 -08:00
* irq_calc_affinity_vectors - Calculate the optimal number of vectors
* @ maxvec : The maximum number of vectors available
* @ affd : Description of the affinity requirements
2016-09-14 16:18:48 +02:00
*/
2016-11-08 17:15:02 -08:00
int irq_calc_affinity_vectors ( int maxvec , const struct irq_affinity * affd )
2016-09-14 16:18:48 +02:00
{
2016-11-08 17:15:02 -08:00
int resv = affd - > pre_vectors + affd - > post_vectors ;
int vecs = maxvec - resv ;
int cpus ;
2016-09-14 16:18:48 +02:00
/* Stabilize the cpumasks */
get_online_cpus ( ) ;
2016-11-08 17:15:02 -08:00
cpus = cpumask_weight ( cpu_online_mask ) ;
2016-09-14 16:18:48 +02:00
put_online_cpus ( ) ;
2016-11-08 17:15:02 -08:00
return min ( cpus , vecs ) + resv ;
2016-09-14 16:18:48 +02:00
}