2012-05-08 13:32:24 +09:30
/* See include/linux/lglock.h for description */
# include <linux/module.h>
# include <linux/lglock.h>
# include <linux/cpu.h>
# include <linux/string.h>
/*
* Note there is no uninit , so lglocks cannot be defined in
* modules ( but it ' s fine to use them from there )
* Could be added though , just undo lg_lock_init
*/
void lg_lock_init ( struct lglock * lg , char * name )
{
LOCKDEP_INIT_MAP ( & lg - > lock_dep_map , name , & lg - > lock_key , 0 ) ;
}
EXPORT_SYMBOL ( lg_lock_init ) ;
void lg_local_lock ( struct lglock * lg )
{
arch_spinlock_t * lock ;
preempt_disable ( ) ;
2013-07-08 14:23:51 -07:00
lock_acquire_shared ( & lg - > lock_dep_map , 0 , 0 , NULL , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
lock = this_cpu_ptr ( lg - > lock ) ;
arch_spin_lock ( lock ) ;
}
EXPORT_SYMBOL ( lg_local_lock ) ;
void lg_local_unlock ( struct lglock * lg )
{
arch_spinlock_t * lock ;
2013-07-08 14:23:51 -07:00
lock_release ( & lg - > lock_dep_map , 1 , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
lock = this_cpu_ptr ( lg - > lock ) ;
arch_spin_unlock ( lock ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( lg_local_unlock ) ;
void lg_local_lock_cpu ( struct lglock * lg , int cpu )
{
arch_spinlock_t * lock ;
preempt_disable ( ) ;
2013-07-08 14:23:51 -07:00
lock_acquire_shared ( & lg - > lock_dep_map , 0 , 0 , NULL , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
lock = per_cpu_ptr ( lg - > lock , cpu ) ;
arch_spin_lock ( lock ) ;
}
EXPORT_SYMBOL ( lg_local_lock_cpu ) ;
void lg_local_unlock_cpu ( struct lglock * lg , int cpu )
{
arch_spinlock_t * lock ;
2013-07-08 14:23:51 -07:00
lock_release ( & lg - > lock_dep_map , 1 , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
lock = per_cpu_ptr ( lg - > lock , cpu ) ;
arch_spin_unlock ( lock ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( lg_local_unlock_cpu ) ;
sched/stop_machine: Fix deadlock between multiple stop_two_cpus()
Jiri reported a machine stuck in multi_cpu_stop() with
migrate_swap_stop() as function and with the following src,dst cpu
pairs: {11, 4} {13, 11} { 4, 13}
4 11 13
cpuM: queue(4 ,13)
*Ma
cpuN: queue(13,11)
*N Na
*M Mb
cpuO: queue(11, 4)
*O Oa
*Nb
*Ob
Where *X denotes the cpu running the queueing of cpu-X and X[ab] denotes
the first/second queued work.
You'll observe the top of the workqueue for each cpu: 4,11,13 to be work
from cpus: M, O, N resp. IOW. deadlock.
Do away with the queueing trickery and introduce lg_double_lock() to
lock both CPUs and fully serialize the stop_two_cpus() callers instead
of the partial (and buggy) serialization we have now.
Reported-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150605153023.GH19282@twins.programming.kicks-ass.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-05 17:30:23 +02:00
void lg_double_lock ( struct lglock * lg , int cpu1 , int cpu2 )
{
BUG_ON ( cpu1 = = cpu2 ) ;
/* lock in cpu order, just like lg_global_lock */
if ( cpu2 < cpu1 )
swap ( cpu1 , cpu2 ) ;
preempt_disable ( ) ;
lock_acquire_shared ( & lg - > lock_dep_map , 0 , 0 , NULL , _RET_IP_ ) ;
arch_spin_lock ( per_cpu_ptr ( lg - > lock , cpu1 ) ) ;
arch_spin_lock ( per_cpu_ptr ( lg - > lock , cpu2 ) ) ;
}
void lg_double_unlock ( struct lglock * lg , int cpu1 , int cpu2 )
{
lock_release ( & lg - > lock_dep_map , 1 , _RET_IP_ ) ;
arch_spin_unlock ( per_cpu_ptr ( lg - > lock , cpu1 ) ) ;
arch_spin_unlock ( per_cpu_ptr ( lg - > lock , cpu2 ) ) ;
preempt_enable ( ) ;
}
2012-05-08 13:32:24 +09:30
void lg_global_lock ( struct lglock * lg )
{
int i ;
preempt_disable ( ) ;
2013-07-08 14:23:51 -07:00
lock_acquire_exclusive ( & lg - > lock_dep_map , 0 , 0 , NULL , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
for_each_possible_cpu ( i ) {
arch_spinlock_t * lock ;
lock = per_cpu_ptr ( lg - > lock , i ) ;
arch_spin_lock ( lock ) ;
}
}
EXPORT_SYMBOL ( lg_global_lock ) ;
void lg_global_unlock ( struct lglock * lg )
{
int i ;
2013-07-08 14:23:51 -07:00
lock_release ( & lg - > lock_dep_map , 1 , _RET_IP_ ) ;
2012-05-08 13:32:24 +09:30
for_each_possible_cpu ( i ) {
arch_spinlock_t * lock ;
lock = per_cpu_ptr ( lg - > lock , i ) ;
arch_spin_unlock ( lock ) ;
}
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( lg_global_unlock ) ;