2005-07-27 11:44:57 -07:00
/*
* Out of line spinlock code .
*
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2004 , 2006
2005-07-27 11:44:57 -07:00
* Author ( s ) : Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
# include <linux/types.h>
2017-02-09 15:20:25 -05:00
# include <linux/export.h>
2005-07-27 11:44:57 -07:00
# include <linux/spinlock.h>
# include <linux/init.h>
2012-03-11 11:59:26 -04:00
# include <linux/smp.h>
2005-07-27 11:44:57 -07:00
# include <asm/io.h>
2015-01-14 17:52:33 +01:00
int spin_retry = - 1 ;
static int __init spin_retry_init ( void )
{
if ( spin_retry < 0 )
spin_retry = MACHINE_HAS_CAD ? 10 : 1000 ;
return 0 ;
}
early_initcall ( spin_retry_init ) ;
2005-07-27 11:44:57 -07:00
/**
* spin_retry = parameter
*/
static int __init spin_retry_setup ( char * str )
{
spin_retry = simple_strtoul ( str , & str , 0 ) ;
return 1 ;
}
__setup ( " spin_retry= " , spin_retry_setup ) ;
2016-11-28 15:50:48 +01:00
static inline void compare_and_delay ( int * lock , int old )
2015-01-14 17:52:33 +01:00
{
asm ( " .insn rsy,0xeb0000000022,%0,0,%1 " : : " d " ( old ) , " Q " ( * lock ) ) ;
}
2009-12-02 20:01:25 +01:00
void arch_spin_lock_wait ( arch_spinlock_t * lp )
2005-07-27 11:44:57 -07:00
{
2016-11-28 15:50:48 +01:00
int cpu = SPINLOCK_LOCKVAL ;
int owner , count , first_diag ;
2005-07-27 11:44:57 -07:00
2015-11-12 12:51:17 +01:00
first_diag = 1 ;
2005-07-27 11:44:57 -07:00
while ( 1 ) {
2014-05-16 15:11:12 +02:00
owner = ACCESS_ONCE ( lp - > lock ) ;
/* Try to get the lock if it is free. */
if ( ! owner ) {
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & lp - > lock , 0 , cpu ) )
2014-05-16 15:11:12 +02:00
return ;
continue ;
2005-07-27 11:44:57 -07:00
}
2015-11-12 12:51:17 +01:00
/* First iteration: check if the lock owner is running. */
2016-11-02 05:08:32 -04:00
if ( first_diag & & arch_vcpu_is_preempted ( ~ owner ) ) {
2014-05-16 15:11:12 +02:00
smp_yield_cpu ( ~ owner ) ;
2015-11-12 12:51:17 +01:00
first_diag = 0 ;
2014-05-16 15:11:12 +02:00
continue ;
}
/* Loop for a while on the lock value. */
count = spin_retry ;
do {
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & lp - > lock , owner ) ;
2014-05-16 15:11:12 +02:00
owner = ACCESS_ONCE ( lp - > lock ) ;
} while ( owner & & count - - > 0 ) ;
if ( ! owner )
continue ;
/*
* For multiple layers of hypervisors , e . g . z / VM + LPAR
2015-11-12 12:51:17 +01:00
* yield the CPU unconditionally . For LPAR rely on the
* sense running status .
2014-05-16 15:11:12 +02:00
*/
2016-11-02 05:08:32 -04:00
if ( ! MACHINE_IS_LPAR | | arch_vcpu_is_preempted ( ~ owner ) ) {
2012-03-11 11:59:26 -04:00
smp_yield_cpu ( ~ owner ) ;
2015-11-12 12:51:17 +01:00
first_diag = 0 ;
}
2005-07-27 11:44:57 -07:00
}
}
2009-12-02 20:01:25 +01:00
EXPORT_SYMBOL ( arch_spin_lock_wait ) ;
2005-07-27 11:44:57 -07:00
2009-12-02 20:01:25 +01:00
void arch_spin_lock_wait_flags ( arch_spinlock_t * lp , unsigned long flags )
2008-01-26 14:11:28 +01:00
{
2016-11-28 15:50:48 +01:00
int cpu = SPINLOCK_LOCKVAL ;
int owner , count , first_diag ;
2008-01-26 14:11:28 +01:00
local_irq_restore ( flags ) ;
2015-11-12 12:51:17 +01:00
first_diag = 1 ;
2008-01-26 14:11:28 +01:00
while ( 1 ) {
2014-05-16 15:11:12 +02:00
owner = ACCESS_ONCE ( lp - > lock ) ;
/* Try to get the lock if it is free. */
if ( ! owner ) {
local_irq_disable ( ) ;
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & lp - > lock , 0 , cpu ) )
2014-05-16 15:11:12 +02:00
return ;
local_irq_restore ( flags ) ;
2016-04-13 11:05:20 +02:00
continue ;
2014-05-16 15:11:12 +02:00
}
/* Check if the lock owner is running. */
2016-11-02 05:08:32 -04:00
if ( first_diag & & arch_vcpu_is_preempted ( ~ owner ) ) {
2014-05-16 15:11:12 +02:00
smp_yield_cpu ( ~ owner ) ;
2015-11-12 12:51:17 +01:00
first_diag = 0 ;
2014-05-16 15:11:12 +02:00
continue ;
2008-01-26 14:11:28 +01:00
}
2014-05-16 15:11:12 +02:00
/* Loop for a while on the lock value. */
count = spin_retry ;
do {
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & lp - > lock , owner ) ;
2014-05-16 15:11:12 +02:00
owner = ACCESS_ONCE ( lp - > lock ) ;
} while ( owner & & count - - > 0 ) ;
if ( ! owner )
continue ;
/*
* For multiple layers of hypervisors , e . g . z / VM + LPAR
2015-11-12 12:51:17 +01:00
* yield the CPU unconditionally . For LPAR rely on the
* sense running status .
2014-05-16 15:11:12 +02:00
*/
2016-11-02 05:08:32 -04:00
if ( ! MACHINE_IS_LPAR | | arch_vcpu_is_preempted ( ~ owner ) ) {
2012-03-11 11:59:26 -04:00
smp_yield_cpu ( ~ owner ) ;
2015-11-12 12:51:17 +01:00
first_diag = 0 ;
}
2008-01-26 14:11:28 +01:00
}
}
2009-12-02 20:01:25 +01:00
EXPORT_SYMBOL ( arch_spin_lock_wait_flags ) ;
2008-01-26 14:11:28 +01:00
2009-12-02 20:01:25 +01:00
int arch_spin_trylock_retry ( arch_spinlock_t * lp )
2005-07-27 11:44:57 -07:00
{
2016-11-28 15:50:48 +01:00
int cpu = SPINLOCK_LOCKVAL ;
int owner , count ;
2005-07-27 11:44:57 -07:00
2015-01-14 17:52:33 +01:00
for ( count = spin_retry ; count > 0 ; count - - ) {
2017-02-10 12:34:49 +01:00
owner = READ_ONCE ( lp - > lock ) ;
2015-01-14 17:52:33 +01:00
/* Try to get the lock if it is free. */
if ( ! owner ) {
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & lp - > lock , 0 , cpu ) )
2015-01-14 17:52:33 +01:00
return 1 ;
} else if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & lp - > lock , owner ) ;
2015-01-14 17:52:33 +01:00
}
2005-07-27 11:44:57 -07:00
return 0 ;
}
2009-12-02 20:01:25 +01:00
EXPORT_SYMBOL ( arch_spin_trylock_retry ) ;
2005-07-27 11:44:57 -07:00
2009-12-03 20:01:19 +01:00
void _raw_read_lock_wait ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
int count = spin_retry ;
2016-11-28 15:50:48 +01:00
int owner , old ;
2005-07-27 11:44:57 -07:00
2014-09-22 16:34:38 +02:00
# ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
__RAW_LOCK ( & rw - > lock , - 1 , __RAW_OP_ADD ) ;
# endif
2014-09-19 14:29:31 +02:00
owner = 0 ;
2005-07-27 11:44:57 -07:00
while ( 1 ) {
if ( count - - < = 0 ) {
2016-11-02 05:08:32 -04:00
if ( owner & & arch_vcpu_is_preempted ( ~ owner ) )
2014-09-19 14:29:31 +02:00
smp_yield_cpu ( ~ owner ) ;
2005-07-27 11:44:57 -07:00
count = spin_retry ;
}
2014-05-15 11:00:44 +02:00
old = ACCESS_ONCE ( rw - > lock ) ;
2014-09-19 14:29:31 +02:00
owner = ACCESS_ONCE ( rw - > owner ) ;
2016-11-28 15:50:48 +01:00
if ( old < 0 ) {
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & rw - > lock , old ) ;
2006-03-09 17:33:49 -08:00
continue ;
2015-01-14 17:52:33 +01:00
}
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & rw - > lock , old , old + 1 ) )
2005-07-27 11:44:57 -07:00
return ;
}
}
EXPORT_SYMBOL ( _raw_read_lock_wait ) ;
2009-12-03 20:01:19 +01:00
int _raw_read_trylock_retry ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
int count = spin_retry ;
2016-11-28 15:50:48 +01:00
int old ;
2005-07-27 11:44:57 -07:00
while ( count - - > 0 ) {
2014-05-15 11:00:44 +02:00
old = ACCESS_ONCE ( rw - > lock ) ;
2016-11-28 15:50:48 +01:00
if ( old < 0 ) {
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & rw - > lock , old ) ;
2006-03-09 17:33:49 -08:00
continue ;
2015-01-14 17:52:33 +01:00
}
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & rw - > lock , old , old + 1 ) )
2005-07-27 11:44:57 -07:00
return 1 ;
}
return 0 ;
}
EXPORT_SYMBOL ( _raw_read_trylock_retry ) ;
2014-09-22 16:34:38 +02:00
# ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
2016-11-28 15:50:48 +01:00
void _raw_write_lock_wait ( arch_rwlock_t * rw , int prev )
2014-09-22 16:34:38 +02:00
{
int count = spin_retry ;
2016-11-28 15:50:48 +01:00
int owner , old ;
2014-09-22 16:34:38 +02:00
owner = 0 ;
while ( 1 ) {
if ( count - - < = 0 ) {
2016-11-02 05:08:32 -04:00
if ( owner & & arch_vcpu_is_preempted ( ~ owner ) )
2014-09-22 16:34:38 +02:00
smp_yield_cpu ( ~ owner ) ;
count = spin_retry ;
}
old = ACCESS_ONCE ( rw - > lock ) ;
owner = ACCESS_ONCE ( rw - > owner ) ;
2015-09-11 16:09:56 +02:00
smp_mb ( ) ;
2016-11-28 15:50:48 +01:00
if ( old > = 0 ) {
2014-09-22 16:34:38 +02:00
prev = __RAW_LOCK ( & rw - > lock , 0x80000000 , __RAW_OP_OR ) ;
old = prev ;
}
2016-11-28 15:50:48 +01:00
if ( ( old & 0x7fffffff ) = = 0 & & prev > = 0 )
2014-09-22 16:34:38 +02:00
break ;
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & rw - > lock , old ) ;
2014-09-22 16:34:38 +02:00
}
}
EXPORT_SYMBOL ( _raw_write_lock_wait ) ;
# else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
2009-12-03 20:01:19 +01:00
void _raw_write_lock_wait ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
int count = spin_retry ;
2016-11-28 15:50:48 +01:00
int owner , old , prev ;
2005-07-27 11:44:57 -07:00
2014-09-22 16:25:25 +02:00
prev = 0x80000000 ;
2014-09-19 14:29:31 +02:00
owner = 0 ;
2005-07-27 11:44:57 -07:00
while ( 1 ) {
if ( count - - < = 0 ) {
2016-11-02 05:08:32 -04:00
if ( owner & & arch_vcpu_is_preempted ( ~ owner ) )
2014-09-19 14:29:31 +02:00
smp_yield_cpu ( ~ owner ) ;
2005-07-27 11:44:57 -07:00
count = spin_retry ;
}
2014-05-15 11:00:44 +02:00
old = ACCESS_ONCE ( rw - > lock ) ;
2014-09-19 14:29:31 +02:00
owner = ACCESS_ONCE ( rw - > owner ) ;
2016-11-28 15:50:48 +01:00
if ( old > = 0 & &
__atomic_cmpxchg_bool ( & rw - > lock , old , old | 0x80000000 ) )
2014-09-22 16:25:25 +02:00
prev = old ;
else
2015-09-11 16:09:56 +02:00
smp_mb ( ) ;
2016-11-28 15:50:48 +01:00
if ( ( old & 0x7fffffff ) = = 0 & & prev > = 0 )
2014-09-22 16:25:25 +02:00
break ;
2015-01-14 17:52:33 +01:00
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & rw - > lock , old ) ;
2005-07-27 11:44:57 -07:00
}
}
EXPORT_SYMBOL ( _raw_write_lock_wait ) ;
2014-09-22 16:34:38 +02:00
# endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
2009-12-03 20:01:19 +01:00
int _raw_write_trylock_retry ( arch_rwlock_t * rw )
2005-07-27 11:44:57 -07:00
{
int count = spin_retry ;
2016-11-28 15:50:48 +01:00
int old ;
2005-07-27 11:44:57 -07:00
while ( count - - > 0 ) {
2014-05-15 11:00:44 +02:00
old = ACCESS_ONCE ( rw - > lock ) ;
2015-01-14 17:52:33 +01:00
if ( old ) {
if ( MACHINE_HAS_CAD )
2016-11-28 15:50:48 +01:00
compare_and_delay ( & rw - > lock , old ) ;
2006-03-09 17:33:49 -08:00
continue ;
2015-01-14 17:52:33 +01:00
}
2016-11-28 15:50:48 +01:00
if ( __atomic_cmpxchg_bool ( & rw - > lock , 0 , 0x80000000 ) )
2005-07-27 11:44:57 -07:00
return 1 ;
}
return 0 ;
}
EXPORT_SYMBOL ( _raw_write_trylock_retry ) ;
2014-09-19 14:29:31 +02:00
2016-11-28 15:50:48 +01:00
void arch_lock_relax ( int cpu )
2014-09-19 14:29:31 +02:00
{
if ( ! cpu )
return ;
2016-11-02 05:08:32 -04:00
if ( MACHINE_IS_LPAR & & ! arch_vcpu_is_preempted ( ~ cpu ) )
2014-09-19 14:29:31 +02:00
return ;
smp_yield_cpu ( ~ cpu ) ;
}
EXPORT_SYMBOL ( arch_lock_relax ) ;