2010-10-14 10:01:34 +04:00
/*
* Copyright ( C ) 2010 Red Hat , Inc . , Peter Zijlstra < pzijlstr @ redhat . com >
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context . The enqueueing is NMI - safe .
*/
2012-04-02 00:38:37 +04:00
# include <linux/bug.h>
2010-10-14 10:01:34 +04:00
# include <linux/kernel.h>
2011-05-23 22:51:41 +04:00
# include <linux/export.h>
2010-10-14 10:01:34 +04:00
# include <linux/irq_work.h>
2011-07-18 21:03:04 +04:00
# include <linux/percpu.h>
2010-10-14 10:01:34 +04:00
# include <linux/hardirq.h>
2012-04-11 20:21:39 +04:00
# include <linux/irqflags.h>
2011-07-18 21:03:04 +04:00
# include <asm/processor.h>
2010-10-14 10:01:34 +04:00
/*
* An entry can be in one of four states :
*
* free NULL , 0 - > { claimed } : free to be used
* claimed NULL , 3 - > { pending } : claimed to be enqueued
* pending next , 3 - > { busy } : queued , pending callback
* busy NULL , 2 - > { free , claimed } : callback in progress , can be claimed
*/
# define IRQ_WORK_PENDING 1UL
# define IRQ_WORK_BUSY 2UL
# define IRQ_WORK_FLAGS 3UL
2011-09-08 10:00:46 +04:00
static DEFINE_PER_CPU ( struct llist_head , irq_work_list ) ;
2010-10-14 10:01:34 +04:00
/*
* Claim the entry so that no one else will poke at it .
*/
2011-09-08 10:00:46 +04:00
static bool irq_work_claim ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
unsigned long flags , nflags ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
for ( ; ; ) {
flags = work - > flags ;
if ( flags & IRQ_WORK_PENDING )
2010-10-14 10:01:34 +04:00
return false ;
2011-09-08 10:00:46 +04:00
nflags = flags | IRQ_WORK_FLAGS ;
if ( cmpxchg ( & work - > flags , flags , nflags ) = = flags )
break ;
cpu_relax ( ) ;
}
2010-10-14 10:01:34 +04:00
return true ;
}
void __weak arch_irq_work_raise ( void )
{
/*
* Lame architectures will get the timer tick callback
*/
}
/*
* Queue the entry and raise the IPI if needed .
*/
2011-09-08 10:00:46 +04:00
static void __irq_work_queue ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
bool empty ;
2010-10-14 10:01:34 +04:00
2010-12-14 19:28:45 +03:00
preempt_disable ( ) ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
empty = llist_add ( & work - > llnode , & __get_cpu_var ( irq_work_list ) ) ;
2010-10-14 10:01:34 +04:00
/* The list was empty, raise self-interrupt to start processing. */
2011-09-08 10:00:46 +04:00
if ( empty )
2010-10-14 10:01:34 +04:00
arch_irq_work_raise ( ) ;
2010-12-14 19:28:45 +03:00
preempt_enable ( ) ;
2010-10-14 10:01:34 +04:00
}
/*
* Enqueue the irq_work @ entry , returns true on success , failure when the
* @ entry was already enqueued by someone else .
*
* Can be re - enqueued while the callback is still in progress .
*/
2011-09-08 10:00:46 +04:00
bool irq_work_queue ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
if ( ! irq_work_claim ( work ) ) {
2010-10-14 10:01:34 +04:00
/*
* Already enqueued , can ' t do !
*/
return false ;
}
2011-09-08 10:00:46 +04:00
__irq_work_queue ( work ) ;
2010-10-14 10:01:34 +04:00
return true ;
}
EXPORT_SYMBOL_GPL ( irq_work_queue ) ;
/*
* Run the irq_work entries on this cpu . Requires to be ran from hardirq
* context with local IRQs disabled .
*/
void irq_work_run ( void )
{
2011-09-08 10:00:46 +04:00
struct irq_work * work ;
struct llist_head * this_list ;
struct llist_node * llnode ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
this_list = & __get_cpu_var ( irq_work_list ) ;
if ( llist_empty ( this_list ) )
2010-10-14 10:01:34 +04:00
return ;
BUG_ON ( ! in_irq ( ) ) ;
BUG_ON ( ! irqs_disabled ( ) ) ;
2011-09-08 10:00:46 +04:00
llnode = llist_del_all ( this_list ) ;
while ( llnode ! = NULL ) {
work = llist_entry ( llnode , struct irq_work , llnode ) ;
2010-10-14 10:01:34 +04:00
2011-09-12 15:12:28 +04:00
llnode = llist_next ( llnode ) ;
2010-10-14 10:01:34 +04:00
/*
2011-09-08 10:00:46 +04:00
* Clear the PENDING bit , after this point the @ work
2010-10-14 10:01:34 +04:00
* can be re - used .
irq_work: Fix racy IRQ_WORK_BUSY flag setting
The IRQ_WORK_BUSY flag is set right before we execute the
work. Once this flag value is set, the work enters a
claimable state again.
So if we have specific data to compute in our work, we ensure it's
either handled by another CPU or locally by enqueuing the work again.
This state machine is guanranteed by atomic operations on the flags.
So when we set IRQ_WORK_BUSY without using an xchg-like operation,
we break this guarantee as in the following summarized scenario:
CPU 1 CPU 2
----- -----
(flags = 0)
old_flags = flags;
(flags = 0)
cmpxchg(flags, old_flags,
old_flags | IRQ_WORK_FLAGS)
(flags = 3)
[...]
flags = IRQ_WORK_BUSY
(flags = 2)
func()
(sees flags = 3)
cmpxchg(flags, old_flags,
old_flags | IRQ_WORK_FLAGS)
(give up)
cmpxchg(flags, 2, 0);
(flags = 0)
CPU 1 claims a work and executes it, so it sets IRQ_WORK_BUSY and
the work is again in a claimable state. Now CPU 2 has new data to process
and try to claim that work but it may see a stale value of the flags
and think the work is still pending somewhere that will handle our data.
This is because CPU 1 doesn't set IRQ_WORK_BUSY atomically.
As a result, the data expected to be handle by CPU 2 won't get handled.
To fix this, use xchg() to set IRQ_WORK_BUSY, this way we ensure the CPU 2
will see the correct value with cmpxchg() using the expected ordering.
Changelog-heavily-inspired-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Anish Kumar <anish198519851985@gmail.com>
2012-10-30 16:33:54 +04:00
* Make it immediately visible so that other CPUs trying
* to claim that work don ' t rely on us to handle their data
* while we are in the middle of the func .
2010-10-14 10:01:34 +04:00
*/
irq_work: Fix racy IRQ_WORK_BUSY flag setting
The IRQ_WORK_BUSY flag is set right before we execute the
work. Once this flag value is set, the work enters a
claimable state again.
So if we have specific data to compute in our work, we ensure it's
either handled by another CPU or locally by enqueuing the work again.
This state machine is guanranteed by atomic operations on the flags.
So when we set IRQ_WORK_BUSY without using an xchg-like operation,
we break this guarantee as in the following summarized scenario:
CPU 1 CPU 2
----- -----
(flags = 0)
old_flags = flags;
(flags = 0)
cmpxchg(flags, old_flags,
old_flags | IRQ_WORK_FLAGS)
(flags = 3)
[...]
flags = IRQ_WORK_BUSY
(flags = 2)
func()
(sees flags = 3)
cmpxchg(flags, old_flags,
old_flags | IRQ_WORK_FLAGS)
(give up)
cmpxchg(flags, 2, 0);
(flags = 0)
CPU 1 claims a work and executes it, so it sets IRQ_WORK_BUSY and
the work is again in a claimable state. Now CPU 2 has new data to process
and try to claim that work but it may see a stale value of the flags
and think the work is still pending somewhere that will handle our data.
This is because CPU 1 doesn't set IRQ_WORK_BUSY atomically.
As a result, the data expected to be handle by CPU 2 won't get handled.
To fix this, use xchg() to set IRQ_WORK_BUSY, this way we ensure the CPU 2
will see the correct value with cmpxchg() using the expected ordering.
Changelog-heavily-inspired-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Anish Kumar <anish198519851985@gmail.com>
2012-10-30 16:33:54 +04:00
xchg ( & work - > flags , IRQ_WORK_BUSY ) ;
2011-09-08 10:00:46 +04:00
work - > func ( work ) ;
2010-10-14 10:01:34 +04:00
/*
* Clear the BUSY bit and return to the free state if
* no - one else claimed it meanwhile .
*/
2011-09-08 10:00:46 +04:00
( void ) cmpxchg ( & work - > flags , IRQ_WORK_BUSY , 0 ) ;
2010-10-14 10:01:34 +04:00
}
}
EXPORT_SYMBOL_GPL ( irq_work_run ) ;
/*
* Synchronize against the irq_work @ entry , ensures the entry is not
* currently in use .
*/
2011-09-08 10:00:46 +04:00
void irq_work_sync ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
WARN_ON_ONCE ( irqs_disabled ( ) ) ;
2011-09-08 10:00:46 +04:00
while ( work - > flags & IRQ_WORK_BUSY )
2010-10-14 10:01:34 +04:00
cpu_relax ( ) ;
}
EXPORT_SYMBOL_GPL ( irq_work_sync ) ;