2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
2007-02-16 20:18:50 +03:00
* Copyright ( c ) 1994 - 1997 , 99 , 2000 , 06 , 07 Ralf Baechle ( ralf @ linux - mips . org )
2005-04-17 02:20:36 +04:00
* Copyright ( c ) 1999 , 2000 Silicon Graphics , Inc .
*/
# ifndef _ASM_BITOPS_H
# define _ASM_BITOPS_H
2007-10-19 10:40:26 +04:00
# ifndef _LINUX_BITOPS_H
# error only <linux / bitops.h> can be included directly
# endif
2005-04-17 02:20:36 +04:00
# include <linux/compiler.h>
2006-11-30 04:14:50 +03:00
# include <linux/irqflags.h>
2005-04-17 02:20:36 +04:00
# include <linux/types.h>
2006-10-31 06:45:07 +03:00
# include <asm/barrier.h>
2005-10-07 19:58:15 +04:00
# include <asm/bug.h>
2005-04-17 02:20:36 +04:00
# include <asm/byteorder.h> /* sigh ... */
# include <asm/cpu-features.h>
2006-11-30 04:14:50 +03:00
# include <asm/sgidefs.h>
# include <asm/war.h>
2005-04-17 02:20:36 +04:00
2007-10-12 02:46:15 +04:00
# if _MIPS_SZLONG == 32
2005-04-17 02:20:36 +04:00
# define SZLONG_LOG 5
# define SZLONG_MASK 31UL
2005-06-14 21:35:03 +04:00
# define __LL "ll "
# define __SC "sc "
2007-02-16 20:18:50 +03:00
# define __INS "ins "
# define __EXT "ext "
2007-10-12 02:46:15 +04:00
# elif _MIPS_SZLONG == 64
2005-04-17 02:20:36 +04:00
# define SZLONG_LOG 6
# define SZLONG_MASK 63UL
2005-06-14 21:35:03 +04:00
# define __LL "lld "
# define __SC "scd "
2007-02-16 20:18:50 +03:00
# define __INS "dins "
# define __EXT "dext "
2005-04-17 02:20:36 +04:00
# endif
/*
* clear_bit ( ) doesn ' t provide any barrier for the compiler .
*/
2010-01-09 04:17:43 +03:00
# define smp_mb__before_clear_bit() smp_mb__before_llsc()
2007-07-14 16:24:05 +04:00
# define smp_mb__after_clear_bit() smp_llsc_mb()
2005-04-17 02:20:36 +04:00
/*
* set_bit - Atomically set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This function is atomic and may not be reordered . See __set_bit ( )
* if you do not require the atomic guarantees .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void set_bit ( unsigned long nr , volatile unsigned long * addr )
{
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2005-04-17 02:20:36 +04:00
unsigned long temp ;
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: " __LL " %0, %1 # set_bit \n "
" or %0, %2 \n "
2005-06-14 21:35:03 +04:00
" " __SC " %0, %1 \n "
2005-04-17 02:20:36 +04:00
" beqzl %0, 1b \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2005-04-17 02:20:36 +04:00
: " =&r " ( temp ) , " =m " ( * m )
2007-03-05 03:56:15 +03:00
: " ir " ( 1UL < < bit ) , " m " ( * m ) ) ;
2007-02-16 20:18:50 +03:00
# ifdef CONFIG_CPU_MIPSR2
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc & & __builtin_constant_p ( bit ) ) {
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" " __LL " %0, %1 # set_bit \n "
" " __INS " %0, %3, %2, 1 \n "
" " __SC " %0, %1 \n "
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( bit ) , " r " ( ~ 0 ) ) ;
} while ( unlikely ( ! temp ) ) ;
2007-02-16 20:18:50 +03:00
# endif /* CONFIG_CPU_MIPSR2 */
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # set_bit \n "
" or %0, %2 \n "
" " __SC " %0, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( 1UL < < bit ) ) ;
} while ( unlikely ( ! temp ) ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
* a | = mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* clear_bit - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and may not be reordered . However , it does
* not contain a memory barrier , so if it is used for locking purposes ,
* you should call smp_mb__before_clear_bit ( ) and / or smp_mb__after_clear_bit ( )
* in order to ensure changes are visible on other processors .
*/
static inline void clear_bit ( unsigned long nr , volatile unsigned long * addr )
{
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2005-04-17 02:20:36 +04:00
unsigned long temp ;
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: " __LL " %0, %1 # clear_bit \n "
" and %0, %2 \n "
" " __SC " %0, %1 \n "
" beqzl %0, 1b \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( ~ ( 1UL < < bit ) ) ) ;
2007-02-16 20:18:50 +03:00
# ifdef CONFIG_CPU_MIPSR2
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc & & __builtin_constant_p ( bit ) ) {
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" " __LL " %0, %1 # clear_bit \n "
" " __INS " %0, $0, %2, 1 \n "
" " __SC " %0, %1 \n "
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( bit ) ) ;
} while ( unlikely ( ! temp ) ) ;
2007-02-16 20:18:50 +03:00
# endif /* CONFIG_CPU_MIPSR2 */
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # clear_bit \n "
" and %0, %2 \n "
" " __SC " %0, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( ~ ( 1UL < < bit ) ) ) ;
} while ( unlikely ( ! temp ) ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
* a & = ~ mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
2007-10-18 14:06:53 +04:00
/*
* clear_bit_unlock - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and implies release semantics before the memory
* operation . It can be used for an unlock .
*/
static inline void clear_bit_unlock ( unsigned long nr , volatile unsigned long * addr )
{
smp_mb__before_clear_bit ( ) ;
clear_bit ( nr , addr ) ;
}
2005-04-17 02:20:36 +04:00
/*
* change_bit - Toggle a bit in memory
* @ nr : Bit to change
* @ addr : Address to start counting from
*
* change_bit ( ) is atomic and may not be reordered .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void change_bit ( unsigned long nr , volatile unsigned long * addr )
{
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: " __LL " %0, %1 # change_bit \n "
" xor %0, %2 \n "
2005-06-14 21:35:03 +04:00
" " __SC " %0, %1 \n "
2005-04-17 02:20:36 +04:00
" beqzl %0, 1b \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( 1UL < < bit ) ) ;
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # change_bit \n "
" xor %0, %2 \n "
" " __SC " %0, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m )
: " ir " ( 1UL < < bit ) ) ;
} while ( unlikely ( ! temp ) ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2005-04-17 02:20:36 +04:00
* a ^ = mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_set_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2007-06-07 16:17:30 +04:00
unsigned long res ;
2007-03-05 03:56:15 +03:00
2010-01-09 04:17:43 +03:00
smp_mb__before_llsc ( ) ;
2007-10-18 14:06:52 +04:00
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: " __LL " %0, %1 # test_and_set_bit \n "
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
2005-04-17 02:20:36 +04:00
: " memory " ) ;
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # test_and_set_bit \n "
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
: " memory " ) ;
} while ( unlikely ( ! res ) ) ;
res = temp & ( 1UL < < bit ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2007-06-07 16:17:30 +04:00
res = ( mask & * a ) ;
2005-04-17 02:20:36 +04:00
* a | = mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-31 06:45:07 +03:00
2007-07-14 16:24:05 +04:00
smp_llsc_mb ( ) ;
2007-06-07 16:17:30 +04:00
return res ! = 0 ;
2005-04-17 02:20:36 +04:00
}
2007-10-18 14:06:53 +04:00
/*
* test_and_set_bit_lock - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and implies acquire ordering semantics
* after the memory operation .
*/
static inline int test_and_set_bit_lock ( unsigned long nr ,
volatile unsigned long * addr )
{
unsigned short bit = nr & SZLONG_MASK ;
unsigned long res ;
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2007-10-18 14:06:53 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
__asm__ __volatile__ (
" .set mips3 \n "
" 1: " __LL " %0, %1 # test_and_set_bit \n "
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
2007-10-18 14:06:53 +04:00
: " memory " ) ;
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2007-10-18 14:06:53 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # test_and_set_bit \n "
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
: " memory " ) ;
} while ( unlikely ( ! res ) ) ;
res = temp & ( 1UL < < bit ) ;
2007-10-18 14:06:53 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
unsigned long flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < bit ;
raw_local_irq_save ( flags ) ;
res = ( mask & * a ) ;
* a | = mask ;
raw_local_irq_restore ( flags ) ;
}
smp_llsc_mb ( ) ;
return res ! = 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_clear_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2007-06-07 16:17:30 +04:00
unsigned long res ;
2007-03-05 03:56:15 +03:00
2010-01-09 04:17:43 +03:00
smp_mb__before_llsc ( ) ;
2007-10-18 14:06:52 +04:00
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-13 19:56:31 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-04-17 02:20:36 +04:00
" 1: " __LL " %0, %1 # test_and_clear_bit \n "
" or %2, %0, %3 \n "
" xor %2, %3 \n "
2005-06-14 21:35:03 +04:00
" " __SC " %2, %1 \n "
2005-04-17 02:20:36 +04:00
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
2005-04-17 02:20:36 +04:00
: " memory " ) ;
2007-02-16 20:18:50 +03:00
# ifdef CONFIG_CPU_MIPSR2
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc & & __builtin_constant_p ( nr ) ) {
2007-02-16 20:18:50 +03:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2007-02-16 20:18:50 +03:00
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" " __LL " %0, %1 # test_and_clear_bit \n "
" " __EXT " %2, %0, %3, 1 \n "
" " __INS " %0, $0, %3, 1 \n "
" " __SC " %0, %1 \n "
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " ir " ( bit )
: " memory " ) ;
} while ( unlikely ( ! temp ) ) ;
2007-02-16 20:18:50 +03:00
# endif
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # test_and_clear_bit \n "
" or %2, %0, %3 \n "
" xor %2, %3 \n "
" " __SC " %2, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
: " memory " ) ;
} while ( unlikely ( ! res ) ) ;
res = temp & ( 1UL < < bit ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2007-06-07 16:17:30 +04:00
res = ( mask & * a ) ;
2005-04-17 02:20:36 +04:00
* a & = ~ mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-31 06:45:07 +03:00
2007-07-14 16:24:05 +04:00
smp_llsc_mb ( ) ;
2007-06-07 16:17:30 +04:00
return res ! = 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* test_and_change_bit - Change a bit and return its old value
* @ nr : Bit to change
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_change_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
2007-03-05 03:56:15 +03:00
unsigned short bit = nr & SZLONG_MASK ;
2007-06-07 16:17:30 +04:00
unsigned long res ;
2007-03-05 03:56:15 +03:00
2010-01-09 04:17:43 +03:00
smp_mb__before_llsc ( ) ;
2007-10-18 14:06:52 +04:00
2009-07-13 22:15:19 +04:00
if ( kernel_uses_llsc & & R10000_LLSC_WAR ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
__asm__ __volatile__ (
2005-06-23 19:57:15 +04:00
" .set mips3 \n "
2005-06-14 21:35:03 +04:00
" 1: " __LL " %0, %1 # test_and_change_bit \n "
2005-04-17 02:20:36 +04:00
" xor %2, %0, %3 \n "
2005-06-14 21:35:03 +04:00
" " __SC " %2, %1 \n "
2005-04-17 02:20:36 +04:00
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
2005-06-14 21:35:03 +04:00
" .set mips0 \n "
2010-10-29 22:08:24 +04:00
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
2005-04-17 02:20:36 +04:00
: " memory " ) ;
2009-07-13 22:15:19 +04:00
} else if ( kernel_uses_llsc ) {
2005-04-17 02:20:36 +04:00
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
2007-06-07 16:17:30 +04:00
unsigned long temp ;
2005-04-17 02:20:36 +04:00
2010-10-29 22:08:24 +04:00
do {
__asm__ __volatile__ (
" .set mips3 \n "
" " __LL " %0, %1 # test_and_change_bit \n "
" xor %2, %0, %3 \n "
" " __SC " \t %2, %1 \n "
" .set mips0 \n "
: " =&r " ( temp ) , " +m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < bit )
: " memory " ) ;
} while ( unlikely ( ! res ) ) ;
res = temp & ( 1UL < < bit ) ;
2005-04-17 02:20:36 +04:00
} else {
volatile unsigned long * a = addr ;
2007-06-07 16:17:30 +04:00
unsigned long mask ;
2006-11-30 04:14:50 +03:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
a + = nr > > SZLONG_LOG ;
2007-03-05 03:56:15 +03:00
mask = 1UL < < bit ;
2007-03-16 19:10:36 +03:00
raw_local_irq_save ( flags ) ;
2007-06-07 16:17:30 +04:00
res = ( mask & * a ) ;
2005-04-17 02:20:36 +04:00
* a ^ = mask ;
2007-03-16 19:10:36 +03:00
raw_local_irq_restore ( flags ) ;
2005-04-17 02:20:36 +04:00
}
2006-10-31 06:45:07 +03:00
2007-07-14 16:24:05 +04:00
smp_llsc_mb ( ) ;
2007-06-07 16:17:30 +04:00
return res ! = 0 ;
2005-04-17 02:20:36 +04:00
}
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:30 +04:00
# include <asm-generic/bitops/non-atomic.h>
2005-04-17 02:20:36 +04:00
2007-10-18 14:06:53 +04:00
/*
* __clear_bit_unlock - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* __clear_bit ( ) is non - atomic and implies release semantics before the memory
* operation . It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word .
*/
static inline void __clear_bit_unlock ( unsigned long nr , volatile unsigned long * addr )
{
smp_mb ( ) ;
__clear_bit ( nr , addr ) ;
}
2005-04-17 02:20:36 +04:00
/*
2005-10-07 19:58:15 +04:00
* Return the bit position ( 0. .63 ) of the most significant 1 bit in a word
2005-07-12 16:50:30 +04:00
* Returns - 1 if no 1 bit exists
*/
2008-10-28 12:40:35 +03:00
static inline unsigned long __fls ( unsigned long word )
2005-07-12 16:50:30 +04:00
{
2008-10-28 12:40:35 +03:00
int num ;
2005-07-12 16:50:30 +04:00
2008-10-28 12:40:35 +03:00
if ( BITS_PER_LONG = = 32 & &
2009-04-19 05:21:22 +04:00
__builtin_constant_p ( cpu_has_clo_clz ) & & cpu_has_clo_clz ) {
2007-10-12 02:46:15 +04:00
__asm__ (
2005-10-07 19:58:15 +04:00
" .set push \n "
" .set mips32 \n "
" clz %0, %1 \n "
" .set pop \n "
2008-10-28 12:40:35 +03:00
: " =r " ( num )
: " r " ( word ) ) ;
2005-07-12 16:50:30 +04:00
2008-10-28 12:40:35 +03:00
return 31 - num ;
2005-10-07 19:58:15 +04:00
}
2008-10-28 12:40:35 +03:00
if ( BITS_PER_LONG = = 64 & &
__builtin_constant_p ( cpu_has_mips64 ) & & cpu_has_mips64 ) {
__asm__ (
" .set push \n "
" .set mips64 \n "
" dclz %0, %1 \n "
" .set pop \n "
: " =r " ( num )
: " r " ( word ) ) ;
2005-07-12 16:50:30 +04:00
2008-10-28 12:40:35 +03:00
return 63 - num ;
}
num = BITS_PER_LONG - 1 ;
2005-07-12 16:50:30 +04:00
2008-10-28 12:40:35 +03:00
# if BITS_PER_LONG == 64
if ( ! ( word & ( ~ 0ul < < 32 ) ) ) {
num - = 32 ;
word < < = 32 ;
}
# endif
if ( ! ( word & ( ~ 0ul < < ( BITS_PER_LONG - 16 ) ) ) ) {
num - = 16 ;
word < < = 16 ;
}
if ( ! ( word & ( ~ 0ul < < ( BITS_PER_LONG - 8 ) ) ) ) {
num - = 8 ;
word < < = 8 ;
}
if ( ! ( word & ( ~ 0ul < < ( BITS_PER_LONG - 4 ) ) ) ) {
num - = 4 ;
word < < = 4 ;
}
if ( ! ( word & ( ~ 0ul < < ( BITS_PER_LONG - 2 ) ) ) ) {
num - = 2 ;
word < < = 2 ;
}
if ( ! ( word & ( ~ 0ul < < ( BITS_PER_LONG - 1 ) ) ) )
num - = 1 ;
return num ;
2005-07-12 16:50:30 +04:00
}
/*
* __ffs - find first bit in word .
2005-04-17 02:20:36 +04:00
* @ word : The word to search
*
2005-07-12 16:50:30 +04:00
* Returns 0. . SZLONG - 1
* Undefined if no bit exists , so code should check against 0 first .
2005-04-17 02:20:36 +04:00
*/
2005-07-12 16:50:30 +04:00
static inline unsigned long __ffs ( unsigned long word )
2005-04-17 02:20:36 +04:00
{
2008-05-04 17:53:53 +04:00
return __fls ( word & - word ) ;
2005-04-17 02:20:36 +04:00
}
/*
2006-04-17 16:19:12 +04:00
* fls - find last bit set .
2005-04-17 02:20:36 +04:00
* @ word : The word to search
*
2006-04-17 16:19:12 +04:00
* This is defined the same way as ffs .
* Note fls ( 0 ) = 0 , fls ( 1 ) = 1 , fls ( 0x80000000 ) = 32.
2005-04-17 02:20:36 +04:00
*/
2008-10-28 12:40:35 +03:00
static inline int fls ( int x )
2005-04-17 02:20:36 +04:00
{
2008-10-28 12:40:35 +03:00
int r ;
2005-07-12 16:50:30 +04:00
2009-04-19 05:21:22 +04:00
if ( __builtin_constant_p ( cpu_has_clo_clz ) & & cpu_has_clo_clz ) {
2008-10-28 12:40:35 +03:00
__asm__ ( " clz %0, %1 " : " =r " ( x ) : " r " ( x ) ) ;
2005-04-17 02:20:36 +04:00
2008-10-28 12:40:35 +03:00
return 32 - x ;
}
2006-04-17 16:19:12 +04:00
2008-10-28 12:40:35 +03:00
r = 32 ;
if ( ! x )
return 0 ;
if ( ! ( x & 0xffff0000u ) ) {
x < < = 16 ;
r - = 16 ;
}
if ( ! ( x & 0xff000000u ) ) {
x < < = 8 ;
r - = 8 ;
}
if ( ! ( x & 0xf0000000u ) ) {
x < < = 4 ;
r - = 4 ;
}
if ( ! ( x & 0xc0000000u ) ) {
x < < = 2 ;
r - = 2 ;
}
if ( ! ( x & 0x80000000u ) ) {
x < < = 1 ;
r - = 1 ;
}
return r ;
2005-07-12 16:50:30 +04:00
}
2008-10-28 12:40:35 +03:00
2006-04-17 16:19:12 +04:00
# include <asm-generic/bitops/fls64.h>
2005-07-12 16:50:30 +04:00
/*
2006-04-17 16:19:12 +04:00
* ffs - find first bit set .
2005-07-12 16:50:30 +04:00
* @ word : The word to search
*
2006-04-17 16:19:12 +04:00
* This is defined the same way as
* the libc and compiler builtin ffs routines , therefore
* differs in spirit from the above ffz ( man ffs ) .
2005-07-12 16:50:30 +04:00
*/
2006-04-17 16:19:12 +04:00
static inline int ffs ( int word )
2005-07-12 16:50:30 +04:00
{
2006-04-17 16:19:12 +04:00
if ( ! word )
return 0 ;
2006-01-30 20:14:41 +03:00
2006-04-17 16:19:12 +04:00
return fls ( word & - word ) ;
2005-07-12 16:50:30 +04:00
}
2006-04-17 16:19:12 +04:00
# include <asm-generic/bitops/ffz.h>
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:30 +04:00
# include <asm-generic/bitops/find.h>
2005-04-17 02:20:36 +04:00
# ifdef __KERNEL__
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:30 +04:00
# include <asm-generic/bitops/sched.h>
2010-06-26 03:46:07 +04:00
# include <asm/arch_hweight.h>
# include <asm-generic/bitops/const_hweight.h>
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:30 +04:00
# include <asm-generic/bitops/ext2-non-atomic.h>
# include <asm-generic/bitops/ext2-atomic.h>
# include <asm-generic/bitops/minix.h>
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
# endif /* _ASM_BITOPS_H */