2005-10-10 16:50:37 +04:00
/*
* Spin and read / write lock operations .
*
* Copyright ( C ) 2001 - 2004 Paul Mackerras < paulus @ au . ibm . com > , IBM
* Copyright ( C ) 2001 Anton Blanchard < anton @ au . ibm . com > , IBM
* Copyright ( C ) 2002 Dave Engebretsen < engebret @ us . ibm . com > , IBM
* Rework to support virtual processors
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*/
# include <linux/kernel.h>
# include <linux/spinlock.h>
2011-07-23 02:24:23 +04:00
# include <linux/export.h>
2005-10-10 16:50:37 +04:00
# include <linux/stringify.h>
2005-10-31 05:57:01 +03:00
# include <linux/smp.h>
2005-10-10 16:50:37 +04:00
/* waiting for a spinlock... */
# if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
# include <asm/hvcall.h>
2005-11-01 08:59:20 +03:00
# include <asm/iseries/hv_call.h>
2005-11-07 05:18:13 +03:00
# include <asm/smp.h>
2006-08-24 07:29:33 +04:00
# include <asm/firmware.h>
2005-10-10 16:50:37 +04:00
2009-12-02 21:49:50 +03:00
void __spin_yield ( arch_spinlock_t * lock )
2005-10-10 16:50:37 +04:00
{
unsigned int lock_value , holder_cpu , yield_count ;
lock_value = lock - > slock ;
if ( lock_value = = 0 )
return ;
holder_cpu = lock_value & 0xffff ;
BUG_ON ( holder_cpu > = NR_CPUS ) ;
2010-08-13 00:18:15 +04:00
yield_count = lppaca_of ( holder_cpu ) . yield_count ;
2005-10-10 16:50:37 +04:00
if ( ( yield_count & 1 ) = = 0 )
return ; /* virtual cpu is currently running */
rmb ( ) ;
if ( lock - > slock ! = lock_value )
return ; /* something has changed */
2006-08-24 07:29:33 +04:00
if ( firmware_has_feature ( FW_FEATURE_ISERIES ) )
HvCall2 ( HvCallBaseYieldProcessor , HvCall_YieldToProc ,
( ( u64 ) holder_cpu < < 32 ) | yield_count ) ;
2007-02-23 06:41:41 +03:00
# ifdef CONFIG_PPC_SPLPAR
2006-08-24 07:29:33 +04:00
else
plpar_hcall_norets ( H_CONFER ,
get_hard_smp_processor_id ( holder_cpu ) , yield_count ) ;
2007-02-23 06:41:41 +03:00
# endif
2005-10-10 16:50:37 +04:00
}
/*
* Waiting for a read lock or a write lock on a rwlock . . .
* This turns out to be the same for read and write locks , since
* we only know the holder if it is write - locked .
*/
2009-12-03 22:01:19 +03:00
void __rw_yield ( arch_rwlock_t * rw )
2005-10-10 16:50:37 +04:00
{
int lock_value ;
unsigned int holder_cpu , yield_count ;
lock_value = rw - > lock ;
if ( lock_value > = 0 )
return ; /* no write lock at present */
holder_cpu = lock_value & 0xffff ;
BUG_ON ( holder_cpu > = NR_CPUS ) ;
2010-08-13 00:18:15 +04:00
yield_count = lppaca_of ( holder_cpu ) . yield_count ;
2005-10-10 16:50:37 +04:00
if ( ( yield_count & 1 ) = = 0 )
return ; /* virtual cpu is currently running */
rmb ( ) ;
if ( rw - > lock ! = lock_value )
return ; /* something has changed */
2006-08-24 07:29:33 +04:00
if ( firmware_has_feature ( FW_FEATURE_ISERIES ) )
HvCall2 ( HvCallBaseYieldProcessor , HvCall_YieldToProc ,
( ( u64 ) holder_cpu < < 32 ) | yield_count ) ;
2007-02-23 06:41:41 +03:00
# ifdef CONFIG_PPC_SPLPAR
2006-08-24 07:29:33 +04:00
else
plpar_hcall_norets ( H_CONFER ,
get_hard_smp_processor_id ( holder_cpu ) , yield_count ) ;
2007-02-23 06:41:41 +03:00
# endif
2005-10-10 16:50:37 +04:00
}
# endif
2009-12-02 22:01:25 +03:00
void arch_spin_unlock_wait ( arch_spinlock_t * lock )
2005-10-10 16:50:37 +04:00
{
while ( lock - > slock ) {
HMT_low ( ) ;
if ( SHARED_PROCESSOR )
__spin_yield ( lock ) ;
}
HMT_medium ( ) ;
}
2009-12-02 22:01:25 +03:00
EXPORT_SYMBOL ( arch_spin_unlock_wait ) ;