2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2006-09-28 01:45:21 +01:00
* Copyright ( C ) 1999 , 2000 , 06 Ralf Baechle ( ralf @ linux - mips . org )
2005-04-16 15:20:36 -07:00
* Copyright ( C ) 1999 , 2000 Silicon Graphics , Inc .
*/
# ifndef _ASM_SPINLOCK_H
# define _ASM_SPINLOCK_H
2008-08-28 15:17:49 +01:00
# include <linux/compiler.h>
2006-10-31 03:45:07 +00:00
# include <asm/barrier.h>
2014-11-15 22:08:48 +00:00
# include <asm/compiler.h>
2005-04-16 15:20:36 -07:00
# include <asm/war.h>
/*
* Your basic SMP spinlocks , allowing only a single CPU anywhere
2008-08-28 15:17:49 +01:00
*
2013-01-22 12:59:30 +01:00
* Simple spin lock operations . There are two variants , one clears IRQ ' s
2008-08-28 15:17:49 +01:00
* on the local processor , one does not .
*
* These are fair FIFO ticket locks
*
* ( the type definitions are in asm / spinlock_types . h )
2005-04-16 15:20:36 -07:00
*/
/*
2008-08-28 15:17:49 +01:00
* Ticket locks are conceptually two parts , one indicating the current head of
* the queue , and the other indicating the current tail . The lock is acquired
* by atomically noting the tail and incrementing it by one ( thus adding
* ourself to the queue and noting our position ) , then waiting until the head
* becomes equal to the the initial value of the tail .
2005-04-16 15:20:36 -07:00
*/
2009-12-02 20:01:25 +01:00
static inline int arch_spin_is_locked ( arch_spinlock_t * lock )
2008-08-28 15:17:49 +01:00
{
2010-02-04 11:31:49 -08:00
u32 counters = ACCESS_ONCE ( lock - > lock ) ;
2008-08-28 15:17:49 +01:00
2010-02-04 11:31:49 -08:00
return ( ( counters > > 16 ) ^ counters ) & 0xffff ;
2008-08-28 15:17:49 +01:00
}
2009-12-02 20:01:25 +01:00
# define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
# define arch_spin_unlock_wait(x) \
while ( arch_spin_is_locked ( x ) ) { cpu_relax ( ) ; }
2008-08-28 15:17:49 +01:00
2009-12-02 20:01:25 +01:00
static inline int arch_spin_is_contended ( arch_spinlock_t * lock )
2008-08-28 15:17:49 +01:00
{
2010-02-04 11:31:49 -08:00
u32 counters = ACCESS_ONCE ( lock - > lock ) ;
2008-08-28 15:17:49 +01:00
2010-02-04 11:31:49 -08:00
return ( ( ( counters > > 16 ) - counters ) & 0xffff ) > 1 ;
2008-08-28 15:17:49 +01:00
}
2009-12-02 20:01:25 +01:00
# define arch_spin_is_contended arch_spin_is_contended
2008-08-28 15:17:49 +01:00
2009-12-02 20:01:25 +01:00
static inline void arch_spin_lock ( arch_spinlock_t * lock )
2005-04-16 15:20:36 -07:00
{
2008-08-28 15:17:49 +01:00
int my_ticket ;
int tmp ;
2010-02-04 11:31:49 -08:00
int inc = 0x10000 ;
2005-04-16 15:20:36 -07:00
if ( R10000_LLSC_WAR ) {
2008-08-28 15:17:49 +01:00
__asm__ __volatile__ (
2009-12-02 20:01:25 +01:00
" .set push # arch_spin_lock \n "
2008-08-28 15:17:49 +01:00
" .set noreorder \n "
" \n "
" 1: ll %[ticket], %[ticket_ptr] \n "
2010-02-04 11:31:49 -08:00
" addu %[my_ticket], %[ticket], %[inc] \n "
2008-08-28 15:17:49 +01:00
" sc %[my_ticket], %[ticket_ptr] \n "
" beqzl %[my_ticket], 1b \n "
2005-04-16 15:20:36 -07:00
" nop \n "
2010-02-04 11:31:49 -08:00
" srl %[my_ticket], %[ticket], 16 \n "
" andi %[ticket], %[ticket], 0xffff \n "
2008-08-28 15:17:49 +01:00
" bne %[ticket], %[my_ticket], 4f \n "
" subu %[ticket], %[my_ticket], %[ticket] \n "
" 2: \n "
" .subsection 2 \n "
2010-02-04 11:31:49 -08:00
" 4: andi %[ticket], %[ticket], 0xffff \n "
2009-03-27 10:07:02 -07:00
" sll %[ticket], 5 \n "
2008-08-28 15:17:49 +01:00
" \n "
" 6: bnez %[ticket], 6b \n "
" subu %[ticket], 1 \n "
" \n "
2010-02-04 11:31:49 -08:00
" lhu %[ticket], %[serving_now_ptr] \n "
2008-08-28 15:17:49 +01:00
" beq %[ticket], %[my_ticket], 2b \n "
" subu %[ticket], %[my_ticket], %[ticket] \n "
2009-03-27 10:07:02 -07:00
" b 4b \n "
2008-08-28 15:17:49 +01:00
" subu %[ticket], %[ticket], 1 \n "
" .previous \n "
" .set pop \n "
2015-01-26 12:44:11 +00:00
: [ ticket_ptr ] " + " GCC_OFF_SMALL_ASM ( ) ( lock - > lock ) ,
2010-02-04 11:31:49 -08:00
[ serving_now_ptr ] " +m " ( lock - > h . serving_now ) ,
2008-08-28 15:17:49 +01:00
[ ticket ] " =&r " ( tmp ) ,
2010-02-04 11:31:49 -08:00
[ my_ticket ] " =&r " ( my_ticket )
: [ inc ] " r " ( inc ) ) ;
2005-04-16 15:20:36 -07:00
} else {
2008-08-28 15:17:49 +01:00
__asm__ __volatile__ (
2009-12-02 20:01:25 +01:00
" .set push # arch_spin_lock \n "
2008-08-28 15:17:49 +01:00
" .set noreorder \n "
" \n "
2010-02-04 11:31:49 -08:00
" 1: ll %[ticket], %[ticket_ptr] \n "
" addu %[my_ticket], %[ticket], %[inc] \n "
2008-08-28 15:17:49 +01:00
" sc %[my_ticket], %[ticket_ptr] \n "
2010-02-04 11:31:49 -08:00
" beqz %[my_ticket], 1b \n "
" srl %[my_ticket], %[ticket], 16 \n "
" andi %[ticket], %[ticket], 0xffff \n "
2008-08-28 15:17:49 +01:00
" bne %[ticket], %[my_ticket], 4f \n "
" subu %[ticket], %[my_ticket], %[ticket] \n "
" 2: \n "
2006-09-28 01:45:21 +01:00
" .subsection 2 \n "
2015-04-20 10:54:34 +01:00
" 4: andi %[ticket], %[ticket], 0xffff \n "
2009-03-27 10:07:02 -07:00
" sll %[ticket], 5 \n "
2008-08-28 15:17:49 +01:00
" \n "
" 6: bnez %[ticket], 6b \n "
" subu %[ticket], 1 \n "
" \n "
2010-02-04 11:31:49 -08:00
" lhu %[ticket], %[serving_now_ptr] \n "
2008-08-28 15:17:49 +01:00
" beq %[ticket], %[my_ticket], 2b \n "
" subu %[ticket], %[my_ticket], %[ticket] \n "
2009-03-27 10:07:02 -07:00
" b 4b \n "
2008-08-28 15:17:49 +01:00
" subu %[ticket], %[ticket], 1 \n "
2006-09-28 01:45:21 +01:00
" .previous \n "
2008-08-28 15:17:49 +01:00
" .set pop \n "
2015-01-26 12:44:11 +00:00
: [ ticket_ptr ] " + " GCC_OFF_SMALL_ASM ( ) ( lock - > lock ) ,
2010-02-04 11:31:49 -08:00
[ serving_now_ptr ] " +m " ( lock - > h . serving_now ) ,
2008-08-28 15:17:49 +01:00
[ ticket ] " =&r " ( tmp ) ,
2010-02-04 11:31:49 -08:00
[ my_ticket ] " =&r " ( my_ticket )
: [ inc ] " r " ( inc ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-10-31 03:45:07 +00:00
2007-07-14 13:24:05 +01:00
smp_llsc_mb ( ) ;
2005-04-16 15:20:36 -07:00
}
2009-12-02 20:01:25 +01:00
static inline void arch_spin_unlock ( arch_spinlock_t * lock )
2005-04-16 15:20:36 -07:00
{
2010-02-04 11:31:49 -08:00
unsigned int serving_now = lock - > h . serving_now + 1 ;
wmb ( ) ;
lock - > h . serving_now = ( u16 ) serving_now ;
nudge_writes ( ) ;
2005-04-16 15:20:36 -07:00
}
2009-12-02 20:01:25 +01:00
static inline unsigned int arch_spin_trylock ( arch_spinlock_t * lock )
2005-04-16 15:20:36 -07:00
{
2008-08-28 15:17:49 +01:00
int tmp , tmp2 , tmp3 ;
2010-02-04 11:31:49 -08:00
int inc = 0x10000 ;
2005-04-16 15:20:36 -07:00
if ( R10000_LLSC_WAR ) {
2008-08-28 15:17:49 +01:00
__asm__ __volatile__ (
2009-12-02 20:01:25 +01:00
" .set push # arch_spin_trylock \n "
2008-08-28 15:17:49 +01:00
" .set noreorder \n "
" \n "
" 1: ll %[ticket], %[ticket_ptr] \n "
2010-02-04 11:31:49 -08:00
" srl %[my_ticket], %[ticket], 16 \n "
" andi %[now_serving], %[ticket], 0xffff \n "
2008-08-28 15:17:49 +01:00
" bne %[my_ticket], %[now_serving], 3f \n "
2010-02-04 11:31:49 -08:00
" addu %[ticket], %[ticket], %[inc] \n "
2008-08-28 15:17:49 +01:00
" sc %[ticket], %[ticket_ptr] \n "
" beqzl %[ticket], 1b \n "
" li %[ticket], 1 \n "
" 2: \n "
" .subsection 2 \n "
" 3: b 2b \n "
" li %[ticket], 0 \n "
" .previous \n "
" .set pop \n "
2015-01-26 12:44:11 +00:00
: [ ticket_ptr ] " + " GCC_OFF_SMALL_ASM ( ) ( lock - > lock ) ,
2008-08-28 15:17:49 +01:00
[ ticket ] " =&r " ( tmp ) ,
[ my_ticket ] " =&r " ( tmp2 ) ,
2010-02-04 11:31:49 -08:00
[ now_serving ] " =&r " ( tmp3 )
: [ inc ] " r " ( inc ) ) ;
2005-04-16 15:20:36 -07:00
} else {
2008-08-28 15:17:49 +01:00
__asm__ __volatile__ (
2009-12-02 20:01:25 +01:00
" .set push # arch_spin_trylock \n "
2008-08-28 15:17:49 +01:00
" .set noreorder \n "
" \n "
2010-02-04 11:31:49 -08:00
" 1: ll %[ticket], %[ticket_ptr] \n "
" srl %[my_ticket], %[ticket], 16 \n "
" andi %[now_serving], %[ticket], 0xffff \n "
2008-08-28 15:17:49 +01:00
" bne %[my_ticket], %[now_serving], 3f \n "
2010-02-04 11:31:49 -08:00
" addu %[ticket], %[ticket], %[inc] \n "
2008-08-28 15:17:49 +01:00
" sc %[ticket], %[ticket_ptr] \n "
2010-02-04 11:31:49 -08:00
" beqz %[ticket], 1b \n "
2008-08-28 15:17:49 +01:00
" li %[ticket], 1 \n "
" 2: \n "
2006-09-28 01:45:21 +01:00
" .subsection 2 \n "
2008-08-28 15:17:49 +01:00
" 3: b 2b \n "
" li %[ticket], 0 \n "
2006-09-28 01:45:21 +01:00
" .previous \n "
2008-08-28 15:17:49 +01:00
" .set pop \n "
2015-01-26 12:44:11 +00:00
: [ ticket_ptr ] " + " GCC_OFF_SMALL_ASM ( ) ( lock - > lock ) ,
2008-08-28 15:17:49 +01:00
[ ticket ] " =&r " ( tmp ) ,
[ my_ticket ] " =&r " ( tmp2 ) ,
2010-02-04 11:31:49 -08:00
[ now_serving ] " =&r " ( tmp3 )
: [ inc ] " r " ( inc ) ) ;
2005-04-16 15:20:36 -07:00
}
2007-07-14 13:24:05 +01:00
smp_llsc_mb ( ) ;
2006-10-31 03:45:07 +00:00
2008-08-28 15:17:49 +01:00
return tmp ;
2005-04-16 15:20:36 -07:00
}
/*
* Read - write spinlocks , allowing multiple readers but only one writer .
*
* NOTE ! it is quite common to have readers in interrupts but no interrupt
* writers . For those circumstances we can " mix " irq - safe locks - any writer
* needs to get a irq - safe write - lock , but readers can get non - irqsafe
* read - locks .
*/
2005-02-03 13:34:45 +00:00
/*
* read_can_lock - would read_trylock ( ) succeed ?
* @ lock : the rwlock in question .
*/
2009-12-03 20:08:46 +01:00
# define arch_read_can_lock(rw) ((rw)->lock >= 0)
2005-02-03 13:34:45 +00:00
/*
* write_can_lock - would write_trylock ( ) succeed ?
* @ lock : the rwlock in question .
*/
2013-01-22 12:59:30 +01:00
# define arch_write_can_lock(rw) (!(rw)->lock)
2005-02-03 13:34:45 +00:00
2009-12-03 20:08:46 +01:00
static inline void arch_read_lock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
unsigned int tmp ;
if ( R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" .set noreorder # arch_read_lock \n "
2005-04-16 15:20:36 -07:00
" 1: ll %1, %2 \n "
" bltz %1, 1b \n "
" addu %1, 1 \n "
" sc %1, %0 \n "
" beqzl %1, 1b \n "
" nop \n "
" .set reorder \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2005-04-16 15:20:36 -07:00
: " memory " ) ;
} else {
2013-04-11 00:16:53 +02:00
do {
__asm__ __volatile__ (
" 1: ll %1, %2 # arch_read_lock \n "
" bltz %1, 1b \n "
" addu %1, 1 \n "
" 2: sc %1, %0 \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2013-04-11 00:16:53 +02:00
: " memory " ) ;
} while ( unlikely ( ! tmp ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-10-31 03:45:07 +00:00
2007-07-14 13:24:05 +01:00
smp_llsc_mb ( ) ;
2005-04-16 15:20:36 -07:00
}
2009-12-03 20:08:46 +01:00
static inline void arch_read_unlock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
unsigned int tmp ;
2010-01-08 17:17:43 -08:00
smp_mb__before_llsc ( ) ;
2006-10-31 03:45:07 +00:00
2005-04-16 15:20:36 -07:00
if ( R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" 1: ll %1, %2 # arch_read_unlock \n "
2015-03-03 18:48:48 +00:00
" addiu %1, -1 \n "
2005-04-16 15:20:36 -07:00
" sc %1, %0 \n "
" beqzl %1, 1b \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2005-04-16 15:20:36 -07:00
: " memory " ) ;
} else {
2013-04-11 00:16:53 +02:00
do {
__asm__ __volatile__ (
" 1: ll %1, %2 # arch_read_unlock \n "
2014-11-24 14:11:39 +00:00
" addiu %1, -1 \n "
2013-04-11 00:16:53 +02:00
" sc %1, %0 \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2013-04-11 00:16:53 +02:00
: " memory " ) ;
} while ( unlikely ( ! tmp ) ) ;
2005-04-16 15:20:36 -07:00
}
}
2009-12-03 20:08:46 +01:00
static inline void arch_write_lock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
unsigned int tmp ;
if ( R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" .set noreorder # arch_write_lock \n "
2005-04-16 15:20:36 -07:00
" 1: ll %1, %2 \n "
" bnez %1, 1b \n "
" lui %1, 0x8000 \n "
" sc %1, %0 \n "
" beqzl %1, 1b \n "
2006-10-31 03:45:07 +00:00
" nop \n "
2005-04-16 15:20:36 -07:00
" .set reorder \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2005-04-16 15:20:36 -07:00
: " memory " ) ;
} else {
2013-04-11 00:16:53 +02:00
do {
__asm__ __volatile__ (
" 1: ll %1, %2 # arch_write_lock \n "
" bnez %1, 1b \n "
" lui %1, 0x8000 \n "
" 2: sc %1, %0 \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2013-04-11 00:16:53 +02:00
: " memory " ) ;
} while ( unlikely ( ! tmp ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-10-31 03:45:07 +00:00
2007-07-14 13:24:05 +01:00
smp_llsc_mb ( ) ;
2005-04-16 15:20:36 -07:00
}
2009-12-03 20:08:46 +01:00
static inline void arch_write_unlock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
2015-06-01 17:09:52 -07:00
smp_mb__before_llsc ( ) ;
2006-10-31 03:45:07 +00:00
2005-04-16 15:20:36 -07:00
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" # arch_write_unlock \n "
2005-04-16 15:20:36 -07:00
" sw $0, %0 \n "
: " =m " ( rw - > lock )
: " m " ( rw - > lock )
: " memory " ) ;
}
2009-12-03 20:08:46 +01:00
static inline int arch_read_trylock ( arch_rwlock_t * rw )
2006-08-31 14:16:06 +01:00
{
unsigned int tmp ;
int ret ;
if ( R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" .set noreorder # arch_read_trylock \n "
2006-08-31 14:16:06 +01:00
" li %2, 0 \n "
" 1: ll %1, %3 \n "
2007-03-05 20:50:27 -05:00
" bltz %1, 2f \n "
2006-08-31 14:16:06 +01:00
" addu %1, 1 \n "
" sc %1, %0 \n "
" .set reorder \n "
2006-10-31 03:45:07 +00:00
" beqzl %1, 1b \n "
" nop \n "
2007-07-14 13:24:05 +01:00
__WEAK_LLSC_MB
2006-08-31 14:16:06 +01:00
" li %2, 1 \n "
" 2: \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp ) , " =&r " ( ret )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2006-08-31 14:16:06 +01:00
: " memory " ) ;
} else {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" .set noreorder # arch_read_trylock \n "
2006-08-31 14:16:06 +01:00
" li %2, 0 \n "
" 1: ll %1, %3 \n "
2007-03-05 20:50:27 -05:00
" bltz %1, 2f \n "
2006-08-31 14:16:06 +01:00
" addu %1, 1 \n "
" sc %1, %0 \n "
" beqz %1, 1b \n "
2006-10-31 03:45:07 +00:00
" nop \n "
2006-08-31 14:16:06 +01:00
" .set reorder \n "
2007-07-14 13:24:05 +01:00
__WEAK_LLSC_MB
2006-08-31 14:16:06 +01:00
" li %2, 1 \n "
" 2: \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp ) , " =&r " ( ret )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2006-08-31 14:16:06 +01:00
: " memory " ) ;
}
return ret ;
}
2005-04-16 15:20:36 -07:00
2009-12-03 20:08:46 +01:00
static inline int arch_write_trylock ( arch_rwlock_t * rw )
2005-04-16 15:20:36 -07:00
{
unsigned int tmp ;
int ret ;
if ( R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2009-12-03 20:08:46 +01:00
" .set noreorder # arch_write_trylock \n "
2005-04-16 15:20:36 -07:00
" li %2, 0 \n "
" 1: ll %1, %3 \n "
" bnez %1, 2f \n "
" lui %1, 0x8000 \n "
" sc %1, %0 \n "
" beqzl %1, 1b \n "
2006-10-31 03:45:07 +00:00
" nop \n "
2007-07-14 13:24:05 +01:00
__WEAK_LLSC_MB
2005-04-16 15:20:36 -07:00
" li %2, 1 \n "
" .set reorder \n "
" 2: \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp ) , " =&r " ( ret )
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2005-04-16 15:20:36 -07:00
: " memory " ) ;
} else {
2013-04-11 00:16:53 +02:00
do {
__asm__ __volatile__ (
" ll %1, %3 # arch_write_trylock \n "
" li %2, 0 \n "
" bnez %1, 2f \n "
" lui %1, 0x8000 \n "
" sc %1, %0 \n "
" li %2, 1 \n "
" 2: \n "
2015-01-26 12:44:11 +00:00
: " = " GCC_OFF_SMALL_ASM ( ) ( rw - > lock ) , " =&r " ( tmp ) ,
2014-11-15 22:08:48 +00:00
" =&r " ( ret )
2015-01-26 12:44:11 +00:00
: GCC_OFF_SMALL_ASM ( ) ( rw - > lock )
2013-04-11 00:16:53 +02:00
: " memory " ) ;
} while ( unlikely ( ! tmp ) ) ;
smp_llsc_mb ( ) ;
2005-04-16 15:20:36 -07:00
}
return ret ;
}
2009-12-03 20:08:46 +01:00
# define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
# define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
2006-08-31 14:16:06 +01:00
2009-12-02 20:01:25 +01:00
# define arch_spin_relax(lock) cpu_relax()
# define arch_read_relax(lock) cpu_relax()
# define arch_write_relax(lock) cpu_relax()
2006-09-30 23:27:43 -07:00
2005-04-16 15:20:36 -07:00
# endif /* _ASM_SPINLOCK_H */