2015-10-08 02:20:35 +03:00
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/once.h>
# include <linux/random.h>
2015-10-08 02:20:36 +03:00
struct once_work {
2015-10-08 02:20:35 +03:00
struct work_struct work ;
struct static_key * key ;
} ;
2015-10-08 02:20:36 +03:00
static void once_deferred ( struct work_struct * w )
2015-10-08 02:20:35 +03:00
{
2015-10-08 02:20:36 +03:00
struct once_work * work ;
2015-10-08 02:20:35 +03:00
2015-10-08 02:20:36 +03:00
work = container_of ( w , struct once_work , work ) ;
2015-10-08 02:20:35 +03:00
BUG_ON ( ! static_key_enabled ( work - > key ) ) ;
static_key_slow_dec ( work - > key ) ;
kfree ( work ) ;
}
2015-10-08 02:20:36 +03:00
static void once_disable_jump ( struct static_key * key )
2015-10-08 02:20:35 +03:00
{
2015-10-08 02:20:36 +03:00
struct once_work * w ;
2015-10-08 02:20:35 +03:00
w = kmalloc ( sizeof ( * w ) , GFP_ATOMIC ) ;
if ( ! w )
return ;
2015-10-08 02:20:36 +03:00
INIT_WORK ( & w - > work , once_deferred ) ;
2015-10-08 02:20:35 +03:00
w - > key = key ;
schedule_work ( & w - > work ) ;
}
2015-10-08 02:20:36 +03:00
static DEFINE_SPINLOCK ( once_lock ) ;
2015-10-08 02:20:35 +03:00
2015-10-08 02:20:36 +03:00
bool __do_once_start ( bool * done , unsigned long * flags )
__acquires ( once_lock )
{
spin_lock_irqsave ( & once_lock , * flags ) ;
2015-10-08 02:20:35 +03:00
if ( * done ) {
2015-10-08 02:20:36 +03:00
spin_unlock_irqrestore ( & once_lock , * flags ) ;
/* Keep sparse happy by restoring an even lock count on
* this lock . In case we return here , we don ' t call into
* __do_once_done but return early in the DO_ONCE ( ) macro .
*/
__acquire ( once_lock ) ;
2015-10-08 02:20:35 +03:00
return false ;
}
return true ;
}
2015-10-08 02:20:36 +03:00
EXPORT_SYMBOL ( __do_once_start ) ;
void __do_once_done ( bool * done , struct static_key * once_key ,
unsigned long * flags )
__releases ( once_lock )
{
* done = true ;
spin_unlock_irqrestore ( & once_lock , * flags ) ;
once_disable_jump ( once_key ) ;
}
EXPORT_SYMBOL ( __do_once_done ) ;