2010-10-14 10:01:34 +04:00
/*
* Copyright ( C ) 2010 Red Hat , Inc . , Peter Zijlstra < pzijlstr @ redhat . com >
*
* Provides a framework for enqueueing and running callbacks from hardirq
* context . The enqueueing is NMI - safe .
*/
2012-04-02 00:38:37 +04:00
# include <linux/bug.h>
2010-10-14 10:01:34 +04:00
# include <linux/kernel.h>
2011-05-23 22:51:41 +04:00
# include <linux/export.h>
2010-10-14 10:01:34 +04:00
# include <linux/irq_work.h>
2011-07-18 21:03:04 +04:00
# include <linux/percpu.h>
2010-10-14 10:01:34 +04:00
# include <linux/hardirq.h>
2012-04-11 20:21:39 +04:00
# include <linux/irqflags.h>
2011-07-18 21:03:04 +04:00
# include <asm/processor.h>
2010-10-14 10:01:34 +04:00
/*
* An entry can be in one of four states :
*
* free NULL , 0 - > { claimed } : free to be used
* claimed NULL , 3 - > { pending } : claimed to be enqueued
* pending next , 3 - > { busy } : queued , pending callback
* busy NULL , 2 - > { free , claimed } : callback in progress , can be claimed
*/
# define IRQ_WORK_PENDING 1UL
# define IRQ_WORK_BUSY 2UL
# define IRQ_WORK_FLAGS 3UL
2011-09-08 10:00:46 +04:00
static DEFINE_PER_CPU ( struct llist_head , irq_work_list ) ;
2010-10-14 10:01:34 +04:00
/*
* Claim the entry so that no one else will poke at it .
*/
2011-09-08 10:00:46 +04:00
static bool irq_work_claim ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
unsigned long flags , nflags ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
for ( ; ; ) {
flags = work - > flags ;
if ( flags & IRQ_WORK_PENDING )
2010-10-14 10:01:34 +04:00
return false ;
2011-09-08 10:00:46 +04:00
nflags = flags | IRQ_WORK_FLAGS ;
if ( cmpxchg ( & work - > flags , flags , nflags ) = = flags )
break ;
cpu_relax ( ) ;
}
2010-10-14 10:01:34 +04:00
return true ;
}
void __weak arch_irq_work_raise ( void )
{
/*
* Lame architectures will get the timer tick callback
*/
}
/*
* Queue the entry and raise the IPI if needed .
*/
2011-09-08 10:00:46 +04:00
static void __irq_work_queue ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
bool empty ;
2010-10-14 10:01:34 +04:00
2010-12-14 19:28:45 +03:00
preempt_disable ( ) ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
empty = llist_add ( & work - > llnode , & __get_cpu_var ( irq_work_list ) ) ;
2010-10-14 10:01:34 +04:00
/* The list was empty, raise self-interrupt to start processing. */
2011-09-08 10:00:46 +04:00
if ( empty )
2010-10-14 10:01:34 +04:00
arch_irq_work_raise ( ) ;
2010-12-14 19:28:45 +03:00
preempt_enable ( ) ;
2010-10-14 10:01:34 +04:00
}
/*
* Enqueue the irq_work @ entry , returns true on success , failure when the
* @ entry was already enqueued by someone else .
*
* Can be re - enqueued while the callback is still in progress .
*/
2011-09-08 10:00:46 +04:00
bool irq_work_queue ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
2011-09-08 10:00:46 +04:00
if ( ! irq_work_claim ( work ) ) {
2010-10-14 10:01:34 +04:00
/*
* Already enqueued , can ' t do !
*/
return false ;
}
2011-09-08 10:00:46 +04:00
__irq_work_queue ( work ) ;
2010-10-14 10:01:34 +04:00
return true ;
}
EXPORT_SYMBOL_GPL ( irq_work_queue ) ;
/*
* Run the irq_work entries on this cpu . Requires to be ran from hardirq
* context with local IRQs disabled .
*/
void irq_work_run ( void )
{
2011-09-08 10:00:46 +04:00
struct irq_work * work ;
struct llist_head * this_list ;
struct llist_node * llnode ;
2010-10-14 10:01:34 +04:00
2011-09-08 10:00:46 +04:00
this_list = & __get_cpu_var ( irq_work_list ) ;
if ( llist_empty ( this_list ) )
2010-10-14 10:01:34 +04:00
return ;
BUG_ON ( ! in_irq ( ) ) ;
BUG_ON ( ! irqs_disabled ( ) ) ;
2011-09-08 10:00:46 +04:00
llnode = llist_del_all ( this_list ) ;
while ( llnode ! = NULL ) {
work = llist_entry ( llnode , struct irq_work , llnode ) ;
2010-10-14 10:01:34 +04:00
2011-09-12 15:12:28 +04:00
llnode = llist_next ( llnode ) ;
2010-10-14 10:01:34 +04:00
/*
2011-09-08 10:00:46 +04:00
* Clear the PENDING bit , after this point the @ work
2010-10-14 10:01:34 +04:00
* can be re - used .
*/
2011-09-08 10:00:46 +04:00
work - > flags = IRQ_WORK_BUSY ;
work - > func ( work ) ;
2010-10-14 10:01:34 +04:00
/*
* Clear the BUSY bit and return to the free state if
* no - one else claimed it meanwhile .
*/
2011-09-08 10:00:46 +04:00
( void ) cmpxchg ( & work - > flags , IRQ_WORK_BUSY , 0 ) ;
2010-10-14 10:01:34 +04:00
}
}
EXPORT_SYMBOL_GPL ( irq_work_run ) ;
/*
* Synchronize against the irq_work @ entry , ensures the entry is not
* currently in use .
*/
2011-09-08 10:00:46 +04:00
void irq_work_sync ( struct irq_work * work )
2010-10-14 10:01:34 +04:00
{
WARN_ON_ONCE ( irqs_disabled ( ) ) ;
2011-09-08 10:00:46 +04:00
while ( work - > flags & IRQ_WORK_BUSY )
2010-10-14 10:01:34 +04:00
cpu_relax ( ) ;
}
EXPORT_SYMBOL_GPL ( irq_work_sync ) ;