genirq: Wake interrupt threads immediately when changing affinity

The affinity setting of interrupt threads happens in the context of the
thread when the thread is woken up by an hard interrupt. As this can be an
arbitrary after changing the affinity, the thread can become runnable on an
isolated CPU and cause isolation disruption.

Avoid this by checking the set affinity request in wait_for_interrupt() and
waking the threads immediately when the affinity is modified.

Note that this is of the most benefit on systems where the interrupt
affinity itself does not need to be deferred to the interrupt handler, but
even where that's not the case, the total dirsuption will be less.

Signed-off-by: Crystal Wood <crwood@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240122235353.15235-1-crwood@redhat.com
This commit is contained in:
Crystal Wood 2024-01-22 17:53:53 -06:00 committed by Thomas Gleixner
parent ee4c1592b7
commit c99303a2d2

View File

@ -192,10 +192,14 @@ void irq_set_thread_affinity(struct irq_desc *desc)
struct irqaction *action;
for_each_action_of_desc(desc, action) {
if (action->thread)
if (action->thread) {
set_bit(IRQTF_AFFINITY, &action->thread_flags);
if (action->secondary && action->secondary->thread)
wake_up_process(action->thread);
}
if (action->secondary && action->secondary->thread) {
set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags);
wake_up_process(action->secondary->thread);
}
}
}
@ -1049,10 +1053,57 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
return IRQ_NONE;
}
static int irq_wait_for_interrupt(struct irqaction *action)
#ifdef CONFIG_SMP
/*
* Check whether we need to change the affinity of the interrupt thread.
*/
static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
bool valid = false;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
__set_current_state(TASK_RUNNING);
/*
* In case we are out of memory we set IRQTF_AFFINITY again and
* try again next time
*/
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
set_bit(IRQTF_AFFINITY, &action->thread_flags);
return;
}
raw_spin_lock_irq(&desc->lock);
/*
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
if (cpumask_available(desc->irq_common_data.affinity)) {
const struct cpumask *m;
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
cpumask_copy(mask, m);
valid = true;
}
raw_spin_unlock_irq(&desc->lock);
if (valid)
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
static int irq_wait_for_interrupt(struct irq_desc *desc,
struct irqaction *action)
{
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
irq_thread_check_affinity(desc, action);
if (kthread_should_stop()) {
/* may need to run one last time */
@ -1129,52 +1180,6 @@ out_unlock:
chip_bus_sync_unlock(desc);
}
#ifdef CONFIG_SMP
/*
* Check whether we need to change the affinity of the interrupt thread.
*/
static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
/*
* In case we are out of memory we set IRQTF_AFFINITY again and
* try again next time
*/
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
set_bit(IRQTF_AFFINITY, &action->thread_flags);
return;
}
raw_spin_lock_irq(&desc->lock);
/*
* This code is triggered unconditionally. Check the affinity
* mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
*/
if (cpumask_available(desc->irq_common_data.affinity)) {
const struct cpumask *m;
m = irq_data_get_effective_affinity_mask(&desc->irq_data);
cpumask_copy(mask, m);
} else {
valid = false;
}
raw_spin_unlock_irq(&desc->lock);
if (valid)
set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
static inline void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
* Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
@ -1312,13 +1317,9 @@ static int irq_thread(void *data)
init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, TWA_NONE);
irq_thread_check_affinity(desc, action);
while (!irq_wait_for_interrupt(action)) {
while (!irq_wait_for_interrupt(desc, action)) {
irqreturn_t action_ret;
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);