2008-11-25 21:37:14 +09:00
/*
* arch / sh / include / asm / mutex - llsc . h
*
* SH - 4 A optimized mutex locking primitives
*
* Please look into asm - generic / mutex - xchg . h for a formal definition .
*/
# ifndef __ASM_SH_MUTEX_LLSC_H
# define __ASM_SH_MUTEX_LLSC_H
/*
* Attempting to lock a mutex on SH4A is done like in ARMv6 + architecure .
* with a bastardized atomic decrement ( it is not a reliable atomic decrement
* but it satisfies the defined semantics for our purpose , while being
* smaller and faster than a real atomic decrement or atomic swap .
* The idea is to attempt decrementing the lock value only once . If once
* decremented it isn ' t zero , or if its store - back fails due to a dispute
* on the exclusive store , we simply bail out immediately through the slow
* path where the lock will be reattempted until it succeeds .
*/
static inline void
__mutex_fastpath_lock ( atomic_t * count , void ( * fail_fn ) ( atomic_t * ) )
{
2009-01-28 09:29:13 +00:00
int __done , __res ;
2008-11-25 21:37:14 +09:00
__asm__ __volatile__ (
2008-12-08 11:25:50 +09:00
" movli.l @%2, %0 \n "
" add #-1, %0 \n "
" movco.l %0, @%2 \n "
" movt %1 \n "
2009-01-28 09:29:13 +00:00
: " =&z " ( __res ) , " =&r " ( __done )
2008-11-25 21:37:14 +09:00
: " r " ( & ( count ) - > counter )
: " t " ) ;
2009-01-28 09:29:13 +00:00
if ( unlikely ( ! __done | | __res ! = 0 ) )
2008-11-25 21:37:14 +09:00
fail_fn ( count ) ;
}
static inline int
2013-06-20 13:31:05 +02:00
__mutex_fastpath_lock_retval ( atomic_t * count )
2008-11-25 21:37:14 +09:00
{
2009-01-28 09:29:13 +00:00
int __done , __res ;
2008-11-25 21:37:14 +09:00
__asm__ __volatile__ (
2008-12-08 11:25:50 +09:00
" movli.l @%2, %0 \n "
" add #-1, %0 \n "
" movco.l %0, @%2 \n "
" movt %1 \n "
2009-01-28 09:29:13 +00:00
: " =&z " ( __res ) , " =&r " ( __done )
2008-11-25 21:37:14 +09:00
: " r " ( & ( count ) - > counter )
: " t " ) ;
2009-01-28 09:29:13 +00:00
if ( unlikely ( ! __done | | __res ! = 0 ) )
2013-06-20 13:31:05 +02:00
__res = - 1 ;
2008-11-25 21:37:14 +09:00
return __res ;
}
static inline void
__mutex_fastpath_unlock ( atomic_t * count , void ( * fail_fn ) ( atomic_t * ) )
{
2009-01-28 09:29:13 +00:00
int __done , __res ;
2008-11-25 21:37:14 +09:00
__asm__ __volatile__ (
2008-12-08 11:25:50 +09:00
" movli.l @%2, %0 \n \t "
2008-11-25 21:37:14 +09:00
" add #1, %0 \n \t "
2008-12-08 11:25:50 +09:00
" movco.l %0, @%2 \n \t "
" movt %1 \n \t "
2009-01-28 09:29:13 +00:00
: " =&z " ( __res ) , " =&r " ( __done )
2008-11-25 21:37:14 +09:00
: " r " ( & ( count ) - > counter )
: " t " ) ;
2009-01-28 09:29:13 +00:00
if ( unlikely ( ! __done | | __res < = 0 ) )
2008-11-25 21:37:14 +09:00
fail_fn ( count ) ;
}
/*
* If the unlock was done on a contended lock , or if the unlock simply fails
* then the mutex remains locked .
*/
# define __mutex_slowpath_needs_to_unlock() 1
/*
* For __mutex_fastpath_trylock we do an atomic decrement and check the
* result and put it in the __res variable .
*/
static inline int
__mutex_fastpath_trylock ( atomic_t * count , int ( * fail_fn ) ( atomic_t * ) )
{
int __res , __orig ;
__asm__ __volatile__ (
" 1: movli.l @%2, %0 \n \t "
" dt %0 \n \t "
" movco.l %0,@%2 \n \t "
" bf 1b \n \t "
" cmp/eq #0,%0 \n \t "
" bt 2f \n \t "
" mov #0, %1 \n \t "
" bf 3f \n \t "
" 2: mov #1, %1 \n \t "
" 3: "
: " =&z " ( __orig ) , " =&r " ( __res )
: " r " ( & count - > counter )
: " t " ) ;
return __res ;
}
# endif /* __ASM_SH_MUTEX_LLSC_H */