2005-04-17 02:20:36 +04:00
/*
* lib / kernel_lock . c
*
* This is the traditional BKL - big kernel lock . Largely
2007-10-20 03:29:18 +04:00
* relegated to obsolescence , but used by various less
2005-04-17 02:20:36 +04:00
* important ( or lazy ) subsystems .
*/
# include <linux/module.h>
# include <linux/kallsyms.h>
2008-04-19 06:21:05 +04:00
# include <linux/semaphore.h>
2009-08-01 03:34:24 +04:00
# include <linux/smp_lock.h>
2005-04-17 02:20:36 +04:00
2009-09-28 19:12:49 +04:00
# define CREATE_TRACE_POINTS
# include <trace/events/bkl.h>
2005-04-17 02:20:36 +04:00
/*
2008-05-11 07:58:02 +04:00
* The ' big kernel lock '
2005-04-17 02:20:36 +04:00
*
2008-05-11 07:58:02 +04:00
* This spinlock is taken and released recursively by lock_kernel ( )
2006-06-26 20:35:02 +04:00
* and unlock_kernel ( ) . It is transparently dropped and reacquired
2005-04-17 02:20:36 +04:00
* over schedule ( ) . It is used to protect legacy code that hasn ' t
* been migrated to a proper locking design yet .
*
* Don ' t use in new code .
*/
2009-11-17 16:45:06 +03:00
static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK ( kernel_flag ) ;
2008-05-11 07:58:02 +04:00
2005-04-17 02:20:36 +04:00
/*
2008-05-11 07:58:02 +04:00
* Acquire / release the underlying lock from the scheduler .
2005-04-17 02:20:36 +04:00
*
2008-05-11 07:58:02 +04:00
* This is called with preemption disabled , and should
* return an error value if it cannot get the lock and
* TIF_NEED_RESCHED gets set .
2005-04-17 02:20:36 +04:00
*
2008-05-11 07:58:02 +04:00
* If it successfully gets the lock , it should increment
* the preemption count like any spinlock does .
*
2009-12-03 22:55:53 +03:00
* ( This works on UP too - do_raw_spin_trylock will never
2008-05-11 07:58:02 +04:00
* return false in that case )
2005-04-17 02:20:36 +04:00
*/
int __lockfunc __reacquire_kernel_lock ( void )
{
2009-12-03 22:55:53 +03:00
while ( ! do_raw_spin_trylock ( & kernel_flag ) ) {
2009-03-06 14:40:20 +03:00
if ( need_resched ( ) )
2008-05-11 07:58:02 +04:00
return - EAGAIN ;
cpu_relax ( ) ;
}
2005-04-17 02:20:36 +04:00
preempt_disable ( ) ;
return 0 ;
}
void __lockfunc __release_kernel_lock ( void )
{
2009-12-03 22:55:53 +03:00
do_raw_spin_unlock ( & kernel_flag ) ;
2008-05-11 07:58:02 +04:00
preempt_enable_no_resched ( ) ;
2005-04-17 02:20:36 +04:00
}
/*
2008-05-11 07:58:02 +04:00
* These are the BKL spinlocks - we try to be polite about preemption .
* If SMP is not on ( ie UP preemption ) , this all goes away because the
2009-12-03 22:55:53 +03:00
* do_raw_spin_trylock ( ) will always succeed .
2005-04-17 02:20:36 +04:00
*/
2008-05-11 07:58:02 +04:00
# ifdef CONFIG_PREEMPT
static inline void __lock_kernel ( void )
2005-04-17 02:20:36 +04:00
{
2008-05-11 07:58:02 +04:00
preempt_disable ( ) ;
2009-12-03 22:55:53 +03:00
if ( unlikely ( ! do_raw_spin_trylock ( & kernel_flag ) ) ) {
2008-05-11 07:58:02 +04:00
/*
* If preemption was disabled even before this
* was called , there ' s nothing we can be polite
* about - just spin .
*/
if ( preempt_count ( ) > 1 ) {
2009-12-03 22:55:53 +03:00
do_raw_spin_lock ( & kernel_flag ) ;
2008-05-11 07:58:02 +04:00
return ;
}
2005-04-17 02:20:36 +04:00
/*
2008-05-11 07:58:02 +04:00
* Otherwise , let ' s wait for the kernel lock
* with preemption enabled . .
2005-04-17 02:20:36 +04:00
*/
2008-05-11 07:58:02 +04:00
do {
preempt_enable ( ) ;
2009-11-17 16:45:06 +03:00
while ( raw_spin_is_locked ( & kernel_flag ) )
2008-05-11 07:58:02 +04:00
cpu_relax ( ) ;
preempt_disable ( ) ;
2009-12-03 22:55:53 +03:00
} while ( ! do_raw_spin_trylock ( & kernel_flag ) ) ;
2008-05-11 07:58:02 +04:00
}
}
2005-04-17 02:20:36 +04:00
2008-05-11 07:58:02 +04:00
# else
/*
* Non - preemption case - just get the spinlock
*/
static inline void __lock_kernel ( void )
{
2009-12-03 22:55:53 +03:00
do_raw_spin_lock ( & kernel_flag ) ;
2005-04-17 02:20:36 +04:00
}
2008-05-11 07:58:02 +04:00
# endif
2005-04-17 02:20:36 +04:00
2008-05-11 07:58:02 +04:00
static inline void __unlock_kernel ( void )
2005-04-17 02:20:36 +04:00
{
2008-05-11 07:58:02 +04:00
/*
* the BKL is not covered by lockdep , so we open - code the
* unlocking sequence ( and thus avoid the dep - chain ops ) :
*/
2009-12-03 22:55:53 +03:00
do_raw_spin_unlock ( & kernel_flag ) ;
2008-05-11 07:58:02 +04:00
preempt_enable ( ) ;
}
2005-04-17 02:20:36 +04:00
2008-05-11 07:58:02 +04:00
/*
* Getting the big kernel lock .
*
* This cannot happen asynchronously , so we only need to
* worry about other CPU ' s .
*/
2009-09-28 19:12:49 +04:00
void __lockfunc _lock_kernel ( const char * func , const char * file , int line )
2008-05-11 07:58:02 +04:00
{
2009-09-28 19:12:49 +04:00
int depth = current - > lock_depth + 1 ;
trace_lock_kernel ( func , file , line ) ;
2009-12-13 01:46:33 +03:00
if ( likely ( ! depth ) ) {
might_sleep ( ) ;
2008-05-11 07:58:02 +04:00
__lock_kernel ( ) ;
2009-12-13 01:46:33 +03:00
}
2008-05-11 07:58:02 +04:00
current - > lock_depth = depth ;
}
2005-04-17 02:20:36 +04:00
2009-09-28 19:12:49 +04:00
void __lockfunc _unlock_kernel ( const char * func , const char * file , int line )
2008-05-11 07:58:02 +04:00
{
BUG_ON ( current - > lock_depth < 0 ) ;
if ( likely ( - - current - > lock_depth < 0 ) )
__unlock_kernel ( ) ;
2009-09-28 19:12:49 +04:00
trace_unlock_kernel ( func , file , line ) ;
2005-04-17 02:20:36 +04:00
}
2009-08-01 03:34:24 +04:00
EXPORT_SYMBOL ( _lock_kernel ) ;
EXPORT_SYMBOL ( _unlock_kernel ) ;
2005-04-17 02:20:36 +04:00