2018-03-14 22:15:19 +01:00
// SPDX-License-Identifier: GPL-2.0
2015-09-24 17:32:13 +08:00
/*
* Generic cpu hotunplug interrupt migration code copied from the
* arch / arm implementation
*
* Copyright ( C ) Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/interrupt.h>
# include <linux/ratelimit.h>
# include <linux/irq.h>
# include "internals.h"
2017-06-20 01:37:39 +02:00
/* For !GENERIC_IRQ_EFFECTIVE_AFF_MASK this looks at general affinity mask */
static inline bool irq_needs_fixup ( struct irq_data * d )
{
const struct cpumask * m = irq_data_get_effective_affinity_mask ( d ) ;
2017-10-09 12:47:24 +02:00
unsigned int cpu = smp_processor_id ( ) ;
2017-06-20 01:37:39 +02:00
2017-10-09 12:47:24 +02:00
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
/*
* The cpumask_empty ( ) check is a workaround for interrupt chips ,
* which do not implement effective affinity , but the architecture has
* enabled the config switch . Use the general affinity mask instead .
*/
if ( cpumask_empty ( m ) )
m = irq_data_get_affinity_mask ( d ) ;
/*
* Sanity check . If the mask is not empty when excluding the outgoing
* CPU then it must contain at least one online CPU . The outgoing CPU
* has been removed from the online mask already .
*/
if ( cpumask_any_but ( m , cpu ) < nr_cpu_ids & &
cpumask_any_and ( m , cpu_online_mask ) > = nr_cpu_ids ) {
/*
* If this happens then there was a missed IRQ fixup at some
* point . Warn about it and enforce fixup .
*/
pr_warn ( " Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u \n " ,
cpumask_pr_args ( m ) , d - > irq , cpu ) ;
return true ;
}
# endif
return cpumask_test_cpu ( cpu , m ) ;
2017-06-20 01:37:39 +02:00
}
2015-09-24 17:32:13 +08:00
static bool migrate_one_irq ( struct irq_desc * desc )
{
struct irq_data * d = irq_desc_get_irq_data ( desc ) ;
2017-06-20 01:37:27 +02:00
struct irq_chip * chip = irq_data_get_irq_chip ( d ) ;
2017-06-20 01:37:30 +02:00
bool maskchip = ! irq_can_move_pcntxt ( d ) & & ! irqd_irq_masked ( d ) ;
2017-06-20 01:37:29 +02:00
const struct cpumask * affinity ;
2017-06-20 01:37:27 +02:00
bool brokeaff = false ;
int err ;
/*
* IRQ chip might be already torn down , but the irq descriptor is
* still in the radix tree . Also if the chip has no affinity setter ,
* nothing can be done here .
*/
if ( ! chip | | ! chip - > irq_set_affinity ) {
pr_debug ( " IRQ %u: Unable to migrate away \n " , d - > irq ) ;
return false ;
}
2015-09-24 17:32:13 +08:00
/*
2017-06-20 01:37:28 +02:00
* No move required , if :
* - Interrupt is per cpu
* - Interrupt is not started
* - Affinity mask does not include this CPU .
*
* Note : Do not check desc - > action as this might be a chained
* interrupt .
2015-09-24 17:32:13 +08:00
*/
2017-06-20 01:37:39 +02:00
if ( irqd_is_per_cpu ( d ) | | ! irqd_is_started ( d ) | | ! irq_needs_fixup ( d ) ) {
2017-06-20 01:37:29 +02:00
/*
* If an irq move is pending , abort it if the dying CPU is
* the sole target .
*/
irq_fixup_move_pending ( desc , false ) ;
2015-09-24 17:32:13 +08:00
return false ;
2017-06-20 01:37:29 +02:00
}
/*
* Complete an eventually pending irq move cleanup . If this
* interrupt was moved in hard irq context , then the vectors need
* to be cleaned up . It can ' t wait until this interrupt actually
* happens and this CPU was involved .
*/
irq_force_complete_move ( desc ) ;
/*
* If there is a setaffinity pending , then try to reuse the pending
* mask , so the last change of the affinity does not get lost . If
* there is no move pending or the pending mask does not contain
* any online CPU , use the current affinity mask .
*/
if ( irq_fixup_move_pending ( desc , true ) )
affinity = irq_desc_get_pending_mask ( desc ) ;
2017-06-20 01:37:39 +02:00
else
affinity = irq_data_get_affinity_mask ( d ) ;
2015-09-24 17:32:13 +08:00
2017-06-20 01:37:30 +02:00
/* Mask the chip for interrupts which cannot move in process context */
if ( maskchip & & chip - > irq_mask )
chip - > irq_mask ( d ) ;
2015-09-24 17:32:13 +08:00
if ( cpumask_any_and ( affinity , cpu_online_mask ) > = nr_cpu_ids ) {
2017-06-20 01:37:51 +02:00
/*
* If the interrupt is managed , then shut it down and leave
* the affinity untouched .
*/
if ( irqd_affinity_is_managed ( d ) ) {
irqd_set_managed_shutdown ( d ) ;
irq_shutdown ( desc ) ;
return false ;
}
2015-09-24 17:32:13 +08:00
affinity = cpu_online_mask ;
2017-06-20 01:37:27 +02:00
brokeaff = true ;
2015-09-24 17:32:13 +08:00
}
2017-07-27 12:21:11 +02:00
/*
* Do not set the force argument of irq_do_set_affinity ( ) as this
* disables the masking of offline CPUs from the supplied affinity
* mask and therefore might keep / reassign the irq to the outgoing
* CPU .
*/
err = irq_do_set_affinity ( d , affinity , false ) ;
2017-06-20 01:37:27 +02:00
if ( err ) {
pr_warn_ratelimited ( " IRQ%u: set affinity failed(%d). \n " ,
d - > irq , err ) ;
2017-06-20 01:37:30 +02:00
brokeaff = false ;
2015-09-24 17:32:13 +08:00
}
2017-06-20 01:37:30 +02:00
if ( maskchip & & chip - > irq_unmask )
chip - > irq_unmask ( d ) ;
2017-06-20 01:37:27 +02:00
return brokeaff ;
2015-09-24 17:32:13 +08:00
}
/**
* irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu
*
* The current CPU has been marked offline . Migrate IRQs off this CPU .
* If the affinity settings do not allow other CPUs , force them onto any
* available CPU .
*
* Note : we must iterate over all IRQs , whether they have an attached
* action structure or not , as we need to get chained interrupts too .
*/
void irq_migrate_all_off_this_cpu ( void )
{
struct irq_desc * desc ;
2017-06-20 01:37:25 +02:00
unsigned int irq ;
2015-09-24 17:32:13 +08:00
for_each_active_irq ( irq ) {
bool affinity_broken ;
desc = irq_to_desc ( irq ) ;
raw_spin_lock ( & desc - > lock ) ;
affinity_broken = migrate_one_irq ( desc ) ;
raw_spin_unlock ( & desc - > lock ) ;
2017-06-20 01:37:25 +02:00
if ( affinity_broken ) {
pr_warn_ratelimited ( " IRQ %u: no longer affine to CPU%u \n " ,
2015-09-24 17:32:13 +08:00
irq , smp_processor_id ( ) ) ;
2017-06-20 01:37:25 +02:00
}
2015-09-24 17:32:13 +08:00
}
}
2017-06-20 01:37:51 +02:00
static void irq_restore_affinity_of_irq ( struct irq_desc * desc , unsigned int cpu )
{
struct irq_data * data = irq_desc_get_irq_data ( desc ) ;
const struct cpumask * affinity = irq_data_get_affinity_mask ( data ) ;
if ( ! irqd_affinity_is_managed ( data ) | | ! desc - > action | |
! irq_data_get_irq_chip ( data ) | | ! cpumask_test_cpu ( cpu , affinity ) )
return ;
2017-06-20 01:37:53 +02:00
if ( irqd_is_managed_and_shutdown ( data ) ) {
2017-06-20 01:37:51 +02:00
irq_startup ( desc , IRQ_RESEND , IRQ_START_COND ) ;
2017-06-20 01:37:53 +02:00
return ;
}
/*
* If the interrupt can only be directed to a single target
* CPU then it is already assigned to a CPU in the affinity
* mask . No point in trying to move it around .
*/
if ( ! irqd_is_single_target ( data ) )
2017-06-20 01:37:51 +02:00
irq_set_affinity_locked ( data , affinity , false ) ;
}
/**
* irq_affinity_online_cpu - Restore affinity for managed interrupts
* @ cpu : Upcoming CPU for which interrupts should be restored
*/
int irq_affinity_online_cpu ( unsigned int cpu )
{
struct irq_desc * desc ;
unsigned int irq ;
irq_lock_sparse ( ) ;
for_each_active_irq ( irq ) {
desc = irq_to_desc ( irq ) ;
raw_spin_lock_irq ( & desc - > lock ) ;
irq_restore_affinity_of_irq ( desc , cpu ) ;
raw_spin_unlock_irq ( & desc - > lock ) ;
}
irq_unlock_sparse ( ) ;
return 0 ;
}