2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( c ) 1994 - 1997 , 1999 , 2000 Ralf Baechle ( ralf @ gnu . org )
* Copyright ( c ) 1999 , 2000 Silicon Graphics , Inc .
*/
# ifndef _ASM_BITOPS_H
# define _ASM_BITOPS_H
# include <linux/compiler.h>
# include <linux/types.h>
2005-10-07 16:58:15 +01:00
# include <asm/bug.h>
2005-04-16 15:20:36 -07:00
# include <asm/byteorder.h> /* sigh ... */
# include <asm/cpu-features.h>
# if (_MIPS_SZLONG == 32)
# define SZLONG_LOG 5
# define SZLONG_MASK 31UL
2005-06-14 17:35:03 +00:00
# define __LL "ll "
# define __SC "sc "
2005-09-03 15:56:17 -07:00
# define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
2005-04-16 15:20:36 -07:00
# elif (_MIPS_SZLONG == 64)
# define SZLONG_LOG 6
# define SZLONG_MASK 63UL
2005-06-14 17:35:03 +00:00
# define __LL "lld "
# define __SC "scd "
2005-09-03 15:56:17 -07:00
# define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
2005-04-16 15:20:36 -07:00
# endif
# ifdef __KERNEL__
2006-07-07 14:07:18 +01:00
# include <linux/irqflags.h>
2005-04-16 15:20:36 -07:00
# include <asm/sgidefs.h>
# include <asm/war.h>
/*
* clear_bit ( ) doesn ' t provide any barrier for the compiler .
*/
# define smp_mb__before_clear_bit() smp_mb()
# define smp_mb__after_clear_bit() smp_mb()
/*
* Only disable interrupt for kernel mode stuff to keep usermode stuff
* that dares to use kernel include files alive .
*/
# define __bi_flags unsigned long flags
# define __bi_local_irq_save(x) local_irq_save(x)
# define __bi_local_irq_restore(x) local_irq_restore(x)
# else
# define __bi_flags
# define __bi_local_irq_save(x)
# define __bi_local_irq_restore(x)
# endif /* __KERNEL__ */
/*
* set_bit - Atomically set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This function is atomic and may not be reordered . See __set_bit ( )
* if you do not require the atomic guarantees .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void set_bit ( unsigned long nr , volatile unsigned long * addr )
{
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # set_bit \n "
" or %0, %2 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %0, %1 \n "
2005-04-16 15:20:36 -07:00
" beqzl %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m ) ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # set_bit \n "
" or %0, %2 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %0, %1 \n "
2005-04-16 15:20:36 -07:00
" beqz %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m ) ) ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
* a | = mask ;
__bi_local_irq_restore ( flags ) ;
}
}
/*
* clear_bit - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and may not be reordered . However , it does
* not contain a memory barrier , so if it is used for locking purposes ,
* you should call smp_mb__before_clear_bit ( ) and / or smp_mb__after_clear_bit ( )
* in order to ensure changes are visible on other processors .
*/
static inline void clear_bit ( unsigned long nr , volatile unsigned long * addr )
{
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # clear_bit \n "
" and %0, %2 \n "
" " __SC " %0, %1 \n "
" beqzl %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( ~ ( 1UL < < ( nr & SZLONG_MASK ) ) ) , " m " ( * m ) ) ;
} else if ( cpu_has_llsc ) {
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # clear_bit \n "
" and %0, %2 \n "
" " __SC " %0, %1 \n "
" beqz %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( ~ ( 1UL < < ( nr & SZLONG_MASK ) ) ) , " m " ( * m ) ) ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
* a & = ~ mask ;
__bi_local_irq_restore ( flags ) ;
}
}
/*
* change_bit - Toggle a bit in memory
* @ nr : Bit to change
* @ addr : Address to start counting from
*
* change_bit ( ) is atomic and may not be reordered .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void change_bit ( unsigned long nr , volatile unsigned long * addr )
{
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # change_bit \n "
" xor %0, %2 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %0, %1 \n "
2005-04-16 15:20:36 -07:00
" beqzl %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m ) ) ;
} else if ( cpu_has_llsc ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # change_bit \n "
" xor %0, %2 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %0, %1 \n "
2005-04-16 15:20:36 -07:00
" beqz %0, 1b \n "
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m )
: " ir " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m ) ) ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
* a ^ = mask ;
__bi_local_irq_restore ( flags ) ;
}
}
/*
* test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_set_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # test_and_set_bit \n "
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
2005-06-14 17:35:03 +00:00
" sync \n "
2005-04-16 15:20:36 -07:00
# endif
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else if ( cpu_has_llsc ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
" .set noreorder \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-06-14 17:35:03 +00:00
" 1: " __LL " %0, %1 # test_and_set_bit \n "
2005-04-16 15:20:36 -07:00
" or %2, %0, %3 \n "
" " __SC " %2, %1 \n "
" beqz %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
2005-06-14 17:35:03 +00:00
" sync \n "
2005-04-16 15:20:36 -07:00
# endif
2005-06-14 17:35:03 +00:00
" .set pop \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
int retval ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
retval = ( mask & * a ) ! = 0 ;
* a | = mask ;
__bi_local_irq_restore ( flags ) ;
return retval ;
}
}
/*
* test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_clear_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-04-16 15:20:36 -07:00
" 1: " __LL " %0, %1 # test_and_clear_bit \n "
" or %2, %0, %3 \n "
" xor %2, %3 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %2, %1 \n "
2005-04-16 15:20:36 -07:00
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else if ( cpu_has_llsc ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
" .set noreorder \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-06-14 17:35:03 +00:00
" 1: " __LL " %0, %1 # test_and_clear_bit \n "
2005-04-16 15:20:36 -07:00
" or %2, %0, %3 \n "
" xor %2, %3 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %2, %1 \n "
2005-04-16 15:20:36 -07:00
" beqz %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 17:35:03 +00:00
" .set pop \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask ;
int retval ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
retval = ( mask & * a ) ! = 0 ;
* a & = ~ mask ;
__bi_local_irq_restore ( flags ) ;
return retval ;
}
}
/*
* test_and_change_bit - Change a bit and return its old value
* @ nr : Bit to change
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_change_bit ( unsigned long nr ,
volatile unsigned long * addr )
{
if ( cpu_has_llsc & & R10000_LLSC_WAR ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-06-14 17:35:03 +00:00
" 1: " __LL " %0, %1 # test_and_change_bit \n "
2005-04-16 15:20:36 -07:00
" xor %2, %0, %3 \n "
2005-06-14 17:35:03 +00:00
" " __SC " %2, %1 \n "
2005-04-16 15:20:36 -07:00
" beqzl %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 17:35:03 +00:00
" .set mips0 \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else if ( cpu_has_llsc ) {
unsigned long * m = ( ( unsigned long * ) addr ) + ( nr > > SZLONG_LOG ) ;
unsigned long temp , res ;
__asm__ __volatile__ (
2005-06-14 17:35:03 +00:00
" .set push \n "
" .set noreorder \n "
2005-06-23 15:57:15 +00:00
" .set mips3 \n "
2005-06-14 17:35:03 +00:00
" 1: " __LL " %0, %1 # test_and_change_bit \n "
2005-04-16 15:20:36 -07:00
" xor %2, %0, %3 \n "
2005-06-14 17:35:03 +00:00
" " __SC " \t %2, %1 \n "
2005-04-16 15:20:36 -07:00
" beqz %2, 1b \n "
" and %2, %0, %3 \n "
# ifdef CONFIG_SMP
" sync \n "
# endif
2005-06-14 17:35:03 +00:00
" .set pop \n "
2005-04-16 15:20:36 -07:00
: " =&r " ( temp ) , " =m " ( * m ) , " =&r " ( res )
: " r " ( 1UL < < ( nr & SZLONG_MASK ) ) , " m " ( * m )
: " memory " ) ;
return res ! = 0 ;
} else {
volatile unsigned long * a = addr ;
unsigned long mask , retval ;
__bi_flags ;
a + = nr > > SZLONG_LOG ;
mask = 1UL < < ( nr & SZLONG_MASK ) ;
__bi_local_irq_save ( flags ) ;
retval = ( mask & * a ) ! = 0 ;
* a ^ = mask ;
__bi_local_irq_restore ( flags ) ;
return retval ;
}
}
# undef __bi_flags
# undef __bi_local_irq_save
# undef __bi_local_irq_restore
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# include <asm-generic/bitops/non-atomic.h>
2005-04-16 15:20:36 -07:00
/*
2005-10-07 16:58:15 +01:00
* Return the bit position ( 0. .63 ) of the most significant 1 bit in a word
2005-07-12 12:50:30 +00:00
* Returns - 1 if no 1 bit exists
*/
2005-10-07 16:58:15 +01:00
static inline int __ilog2 ( unsigned long x )
2005-07-12 12:50:30 +00:00
{
int lz ;
2005-10-07 16:58:15 +01:00
if ( sizeof ( x ) = = 4 ) {
__asm__ (
" .set push \n "
" .set mips32 \n "
" clz %0, %1 \n "
" .set pop \n "
: " =r " ( lz )
: " r " ( x ) ) ;
2005-07-12 12:50:30 +00:00
2005-10-07 16:58:15 +01:00
return 31 - lz ;
}
BUG_ON ( sizeof ( x ) ! = 8 ) ;
2005-07-12 12:50:30 +00:00
__asm__ (
" .set push \n "
" .set mips64 \n "
" dclz %0, %1 \n "
" .set pop \n "
: " =r " ( lz )
: " r " ( x ) ) ;
return 63 - lz ;
}
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
2005-07-12 12:50:30 +00:00
/*
* __ffs - find first bit in word .
2005-04-16 15:20:36 -07:00
* @ word : The word to search
*
2005-07-12 12:50:30 +00:00
* Returns 0. . SZLONG - 1
* Undefined if no bit exists , so code should check against 0 first .
2005-04-16 15:20:36 -07:00
*/
2005-07-12 12:50:30 +00:00
static inline unsigned long __ffs ( unsigned long word )
2005-04-16 15:20:36 -07:00
{
2005-07-12 12:50:30 +00:00
return __ilog2 ( word & - word ) ;
2005-04-16 15:20:36 -07:00
}
/*
2006-04-17 21:19:12 +09:00
* fls - find last bit set .
2005-04-16 15:20:36 -07:00
* @ word : The word to search
*
2006-04-17 21:19:12 +09:00
* This is defined the same way as ffs .
* Note fls ( 0 ) = 0 , fls ( 1 ) = 1 , fls ( 0x80000000 ) = 32.
2005-04-16 15:20:36 -07:00
*/
2006-04-17 21:19:12 +09:00
static inline int fls ( int word )
2005-04-16 15:20:36 -07:00
{
2006-04-17 21:19:12 +09:00
__asm__ ( " clz %0, %1 " : " =r " ( word ) : " r " ( word ) ) ;
2005-07-12 12:50:30 +00:00
2006-04-17 21:19:12 +09:00
return 32 - word ;
2005-04-16 15:20:36 -07:00
}
2006-04-17 21:19:12 +09:00
# if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
static inline int fls64 ( __u64 word )
2005-07-12 12:50:30 +00:00
{
2006-04-17 21:19:12 +09:00
__asm__ ( " dclz %0, %1 " : " =r " ( word ) : " r " ( word ) ) ;
return 64 - word ;
2005-07-12 12:50:30 +00:00
}
2006-04-17 21:19:12 +09:00
# else
# include <asm-generic/bitops/fls64.h>
# endif
2005-07-12 12:50:30 +00:00
/*
2006-04-17 21:19:12 +09:00
* ffs - find first bit set .
2005-07-12 12:50:30 +00:00
* @ word : The word to search
*
2006-04-17 21:19:12 +09:00
* This is defined the same way as
* the libc and compiler builtin ffs routines , therefore
* differs in spirit from the above ffz ( man ffs ) .
2005-07-12 12:50:30 +00:00
*/
2006-04-17 21:19:12 +09:00
static inline int ffs ( int word )
2005-07-12 12:50:30 +00:00
{
2006-04-17 21:19:12 +09:00
if ( ! word )
return 0 ;
2006-01-30 17:14:41 +00:00
2006-04-17 21:19:12 +09:00
return fls ( word & - word ) ;
2005-07-12 12:50:30 +00:00
}
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# else
2005-04-16 15:20:36 -07:00
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/fls.h>
2006-04-17 21:19:12 +09:00
# include <asm-generic/bitops/fls64.h>
2005-04-16 15:20:36 -07:00
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
2005-04-16 15:20:36 -07:00
2006-04-17 21:19:12 +09:00
# include <asm-generic/bitops/ffz.h>
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# include <asm-generic/bitops/find.h>
2005-04-16 15:20:36 -07:00
# ifdef __KERNEL__
[PATCH] bitops: mips: use generic bitops
- remove __{,test_and_}{set,clear,change}_bit() and test_bit()
- unless defined(CONFIG_CPU_MIPS32) or defined(CONFIG_CPU_MIPS64)
- remove __ffs()
- remove ffs()
- remove ffz()
- remove fls()
- remove fls64()
- remove find_{next,first}{,_zero}_bit()
- remove sched_find_first_bit()
- remove generic_hweight64()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove ext2_{set,clear}_bit_atomic()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 01:39:30 -08:00
# include <asm-generic/bitops/sched.h>
# include <asm-generic/bitops/hweight.h>
# include <asm-generic/bitops/ext2-non-atomic.h>
# include <asm-generic/bitops/ext2-atomic.h>
# include <asm-generic/bitops/minix.h>
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif /* _ASM_BITOPS_H */