2005-10-10 22:50:37 +10:00
/*
* Spin and read / write lock operations .
*
* Copyright ( C ) 2001 - 2004 Paul Mackerras < paulus @ au . ibm . com > , IBM
* Copyright ( C ) 2001 Anton Blanchard < anton @ au . ibm . com > , IBM
* Copyright ( C ) 2002 Dave Engebretsen < engebret @ us . ibm . com > , IBM
* Rework to support virtual processors
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/kernel.h>
# include <linux/spinlock.h>
2011-07-22 18:24:23 -04:00
# include <linux/export.h>
2005-10-10 22:50:37 +10:00
# include <linux/stringify.h>
2005-10-31 13:57:01 +11:00
# include <linux/smp.h>
2005-10-10 22:50:37 +10:00
/* waiting for a spinlock... */
2012-03-15 18:18:00 +00:00
# if defined(CONFIG_PPC_SPLPAR)
2005-10-10 22:50:37 +10:00
# include <asm/hvcall.h>
2005-11-07 13:18:13 +11:00
# include <asm/smp.h>
2005-10-10 22:50:37 +10:00
2009-12-02 19:49:50 +01:00
void __spin_yield ( arch_spinlock_t * lock )
2005-10-10 22:50:37 +10:00
{
unsigned int lock_value , holder_cpu , yield_count ;
lock_value = lock - > slock ;
if ( lock_value = = 0 )
return ;
holder_cpu = lock_value & 0xffff ;
BUG_ON ( holder_cpu > = NR_CPUS ) ;
2013-08-07 02:01:46 +10:00
yield_count = be32_to_cpu ( lppaca_of ( holder_cpu ) . yield_count ) ;
2005-10-10 22:50:37 +10:00
if ( ( yield_count & 1 ) = = 0 )
return ; /* virtual cpu is currently running */
rmb ( ) ;
if ( lock - > slock ! = lock_value )
return ; /* something has changed */
2012-03-15 18:18:00 +00:00
plpar_hcall_norets ( H_CONFER ,
get_hard_smp_processor_id ( holder_cpu ) , yield_count ) ;
2005-10-10 22:50:37 +10:00
}
2015-03-04 12:56:20 +01:00
EXPORT_SYMBOL_GPL ( __spin_yield ) ;
2005-10-10 22:50:37 +10:00
/*
* Waiting for a read lock or a write lock on a rwlock . . .
* This turns out to be the same for read and write locks , since
* we only know the holder if it is write - locked .
*/
2009-12-03 20:01:19 +01:00
void __rw_yield ( arch_rwlock_t * rw )
2005-10-10 22:50:37 +10:00
{
int lock_value ;
unsigned int holder_cpu , yield_count ;
lock_value = rw - > lock ;
if ( lock_value > = 0 )
return ; /* no write lock at present */
holder_cpu = lock_value & 0xffff ;
BUG_ON ( holder_cpu > = NR_CPUS ) ;
2013-08-07 02:01:46 +10:00
yield_count = be32_to_cpu ( lppaca_of ( holder_cpu ) . yield_count ) ;
2005-10-10 22:50:37 +10:00
if ( ( yield_count & 1 ) = = 0 )
return ; /* virtual cpu is currently running */
rmb ( ) ;
if ( rw - > lock ! = lock_value )
return ; /* something has changed */
2012-03-15 18:18:00 +00:00
plpar_hcall_norets ( H_CONFER ,
get_hard_smp_processor_id ( holder_cpu ) , yield_count ) ;
2005-10-10 22:50:37 +10:00
}
# endif