2005-04-17 02:20:36 +04:00
# ifndef _PARISC_BITOPS_H
# define _PARISC_BITOPS_H
# include <linux/compiler.h>
2005-10-22 06:45:22 +04:00
# include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
2005-04-17 02:20:36 +04:00
# include <asm/byteorder.h>
# include <asm/atomic.h>
/*
* HP - PARISC specific bit operations
* for a detailed description of the functions please refer
* to include / asm - i386 / bitops . h or kerneldoc
*/
2005-10-22 06:45:22 +04:00
# define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
2005-04-17 02:20:36 +04:00
# define smp_mb__before_clear_bit() smp_mb()
# define smp_mb__after_clear_bit() smp_mb()
2005-10-22 06:45:22 +04:00
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __ * _bit ( ) ( set / clear / change ) :
* * _bit ( ) want use of volatile .
* __ * _bit ( ) are " relaxed " and don ' t use spinlock or volatile .
*/
static __inline__ void set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
* addr | = mask ;
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
}
2005-10-22 06:45:22 +04:00
static __inline__ void __set_bit ( unsigned long nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long * m = ( unsigned long * ) addr + ( nr > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
* m | = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
static __inline__ void clear_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = ~ ( 1UL < < CHOP_SHIFTCOUNT ( nr ) ) ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
* addr & = mask ;
2005-04-17 02:20:36 +04:00
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
}
2005-10-22 06:45:22 +04:00
static __inline__ void __clear_bit ( unsigned long nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long * m = ( unsigned long * ) addr + ( nr > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
* m & = ~ ( 1UL < < CHOP_SHIFTCOUNT ( nr ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
static __inline__ void change_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
* addr ^ = mask ;
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
}
2005-10-22 06:45:22 +04:00
static __inline__ void __change_bit ( unsigned long nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long * m = ( unsigned long * ) addr + ( nr > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
* m ^ = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
static __inline__ int test_and_set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long oldbit ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit | mask ;
2005-04-17 02:20:36 +04:00
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
static __inline__ int __test_and_set_bit ( int nr , volatile unsigned long * address )
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long oldbit ;
unsigned long * addr = ( unsigned long * ) address + ( nr > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit | mask ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
static __inline__ int test_and_clear_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long oldbit ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit & ~ mask ;
2005-04-17 02:20:36 +04:00
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
static __inline__ int __test_and_clear_bit ( int nr , volatile unsigned long * address )
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long * addr = ( unsigned long * ) address + ( nr > > SHIFT_PER_LONG ) ;
unsigned long oldbit ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit & ~ mask ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
static __inline__ int test_and_change_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long oldbit ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
addr + = ( nr > > SHIFT_PER_LONG ) ;
_atomic_spin_lock_irqsave ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit ^ mask ;
2005-04-17 02:20:36 +04:00
_atomic_spin_unlock_irqrestore ( addr , flags ) ;
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
static __inline__ int __test_and_change_bit ( int nr , volatile unsigned long * address )
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
unsigned long * addr = ( unsigned long * ) address + ( nr > > SHIFT_PER_LONG ) ;
unsigned long oldbit ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
oldbit = * addr ;
* addr = oldbit ^ mask ;
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
return ( oldbit & mask ) ? 1 : 0 ;
2005-04-17 02:20:36 +04:00
}
static __inline__ int test_bit ( int nr , const volatile unsigned long * address )
{
2005-10-22 06:45:22 +04:00
unsigned long mask = 1UL < < CHOP_SHIFTCOUNT ( nr ) ;
const unsigned long * addr = ( const unsigned long * ) address + ( nr > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
return ! ! ( * addr & mask ) ;
}
# ifdef __KERNEL__
/**
* __ffs - find first bit in word . returns 0 to " BITS_PER_LONG-1 " .
* @ word : The word to search
*
* __ffs ( ) return is undefined if no bit is set .
*
* 32 - bit fast __ffs by LaMont Jones " lamont At hp com " .
* 64 - bit enhancement by Grant Grundler " grundler At parisc-linux org " .
* ( with help from willy / jejb to get the semantics right )
*
* This algorithm avoids branches by making use of nullification .
* One side effect of " extr " instructions is it sets PSW [ N ] bit .
* How PSW [ N ] ( nullify next insn ) gets set is determined by the
* " condition " field ( eg " <> " or " TR " below ) in the extr * insn .
* Only the 1 st and one of either the 2 cd or 3 rd insn will get executed .
* Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so
* cycles for each mispredicted branch .
*/
static __inline__ unsigned long __ffs ( unsigned long x )
{
unsigned long ret ;
__asm__ (
2005-10-22 06:45:22 +04:00
# ifdef __LP64__
2005-04-17 02:20:36 +04:00
" ldi 63,%1 \n "
" extrd,u,*<> %0,63,32,%%r0 \n "
" extrd,u,*TR %0,31,32,%0 \n " /* move top 32-bits down */
" addi -32,%1,%1 \n "
# else
" ldi 31,%1 \n "
# endif
" extru,<> %0,31,16,%%r0 \n "
" extru,TR %0,15,16,%0 \n " /* xxxx0000 -> 0000xxxx */
" addi -16,%1,%1 \n "
" extru,<> %0,31,8,%%r0 \n "
" extru,TR %0,23,8,%0 \n " /* 0000xx00 -> 000000xx */
" addi -8,%1,%1 \n "
" extru,<> %0,31,4,%%r0 \n "
" extru,TR %0,27,4,%0 \n " /* 000000x0 -> 0000000x */
" addi -4,%1,%1 \n "
" extru,<> %0,31,2,%%r0 \n "
" extru,TR %0,29,2,%0 \n " /* 0000000y, 1100b -> 0011b */
" addi -2,%1,%1 \n "
" extru,= %0,31,1,%%r0 \n " /* check last bit */
" addi -1,%1,%1 \n "
: " +r " ( x ) , " =r " ( ret ) ) ;
return ret ;
}
/* Undefined if no bit is zero. */
# define ffz(x) __ffs(~x)
/*
* ffs : find first bit set . returns 1 to BITS_PER_LONG or 0 ( if none set )
* This is defined the same way as the libc and compiler builtin
* ffs routines , therefore differs in spirit from the above ffz ( man ffs ) .
*/
static __inline__ int ffs ( int x )
{
return x ? ( __ffs ( ( unsigned long ) x ) + 1 ) : 0 ;
}
/*
* fls : find last ( most significant ) bit set .
* fls ( 0 ) = 0 , fls ( 1 ) = 1 , fls ( 0x80000000 ) = 32.
*/
static __inline__ int fls ( int x )
{
int ret ;
if ( ! x )
return 0 ;
__asm__ (
" ldi 1,%1 \n "
" extru,<> %0,15,16,%%r0 \n "
" zdep,TR %0,15,16,%0 \n " /* xxxx0000 */
" addi 16,%1,%1 \n "
" extru,<> %0,7,8,%%r0 \n "
" zdep,TR %0,23,24,%0 \n " /* xx000000 */
" addi 8,%1,%1 \n "
" extru,<> %0,3,4,%%r0 \n "
" zdep,TR %0,27,28,%0 \n " /* x0000000 */
" addi 4,%1,%1 \n "
" extru,<> %0,1,2,%%r0 \n "
" zdep,TR %0,29,30,%0 \n " /* y0000000 (y&3 = 0) */
" addi 2,%1,%1 \n "
" extru,= %0,0,1,%%r0 \n "
" addi 1,%1,%1 \n " /* if y & 8, add 1 */
: " +r " ( x ) , " =r " ( ret ) ) ;
return ret ;
}
2005-12-22 06:30:53 +03:00
# define fls64(x) generic_fls64(x)
2005-04-17 02:20:36 +04:00
/*
* hweightN : returns the hamming weight ( i . e . the number
* of bits set ) of a N - bit word
*/
2005-10-22 06:45:22 +04:00
# define hweight64(x) generic_hweight64(x)
2005-04-17 02:20:36 +04:00
# define hweight32(x) generic_hweight32(x)
# define hweight16(x) generic_hweight16(x)
# define hweight8(x) generic_hweight8(x)
/*
* Every architecture must define this function . It ' s the fastest
* way of searching a 140 - bit bitmap where the first 100 bits are
* unlikely to be set . It ' s guaranteed that at least one of the 140
* bits is cleared .
*/
static inline int sched_find_first_bit ( const unsigned long * b )
{
2005-10-22 06:45:22 +04:00
# ifdef __LP64__
if ( unlikely ( b [ 0 ] ) )
return __ffs ( b [ 0 ] ) ;
if ( unlikely ( b [ 1 ] ) )
return __ffs ( b [ 1 ] ) + 64 ;
return __ffs ( b [ 2 ] ) + 128 ;
# else
2005-04-17 02:20:36 +04:00
if ( unlikely ( b [ 0 ] ) )
return __ffs ( b [ 0 ] ) ;
if ( unlikely ( b [ 1 ] ) )
return __ffs ( b [ 1 ] ) + 32 ;
if ( unlikely ( b [ 2 ] ) )
return __ffs ( b [ 2 ] ) + 64 ;
if ( b [ 3 ] )
return __ffs ( b [ 3 ] ) + 96 ;
return __ffs ( b [ 4 ] ) + 128 ;
# endif
}
# endif /* __KERNEL__ */
/*
* This implementation of find_ { first , next } _zero_bit was stolen from
* Linus ' asm - alpha / bitops . h .
*/
# define find_first_zero_bit(addr, size) \
find_next_zero_bit ( ( addr ) , ( size ) , 0 )
static __inline__ unsigned long find_next_zero_bit ( const void * addr , unsigned long size , unsigned long offset )
{
const unsigned long * p = ( ( unsigned long * ) addr ) + ( offset > > SHIFT_PER_LONG ) ;
unsigned long result = offset & ~ ( BITS_PER_LONG - 1 ) ;
unsigned long tmp ;
if ( offset > = size )
return size ;
size - = result ;
offset & = ( BITS_PER_LONG - 1 ) ;
if ( offset ) {
tmp = * ( p + + ) ;
tmp | = ~ 0UL > > ( BITS_PER_LONG - offset ) ;
if ( size < BITS_PER_LONG )
goto found_first ;
if ( ~ tmp )
goto found_middle ;
size - = BITS_PER_LONG ;
result + = BITS_PER_LONG ;
}
while ( size & ~ ( BITS_PER_LONG - 1 ) ) {
if ( ~ ( tmp = * ( p + + ) ) )
goto found_middle ;
result + = BITS_PER_LONG ;
size - = BITS_PER_LONG ;
}
if ( ! size )
return result ;
tmp = * p ;
found_first :
tmp | = ~ 0UL < < size ;
found_middle :
return result + ffz ( tmp ) ;
}
static __inline__ unsigned long find_next_bit ( const unsigned long * addr , unsigned long size , unsigned long offset )
{
2005-10-22 06:45:22 +04:00
const unsigned long * p = addr + ( offset > > SHIFT_PER_LONG ) ;
2005-04-17 02:20:36 +04:00
unsigned long result = offset & ~ ( BITS_PER_LONG - 1 ) ;
unsigned long tmp ;
if ( offset > = size )
return size ;
size - = result ;
offset & = ( BITS_PER_LONG - 1 ) ;
if ( offset ) {
tmp = * ( p + + ) ;
tmp & = ( ~ 0UL < < offset ) ;
if ( size < BITS_PER_LONG )
goto found_first ;
if ( tmp )
goto found_middle ;
size - = BITS_PER_LONG ;
result + = BITS_PER_LONG ;
}
while ( size & ~ ( BITS_PER_LONG - 1 ) ) {
if ( ( tmp = * ( p + + ) ) )
goto found_middle ;
result + = BITS_PER_LONG ;
size - = BITS_PER_LONG ;
}
if ( ! size )
return result ;
tmp = * p ;
found_first :
tmp & = ( ~ 0UL > > ( BITS_PER_LONG - size ) ) ;
if ( tmp = = 0UL ) /* Are any bits set? */
return result + size ; /* Nope. */
found_middle :
return result + __ffs ( tmp ) ;
}
/**
* find_first_bit - find the first set bit in a memory region
* @ addr : The address to start the search at
* @ size : The maximum size to search
*
* Returns the bit - number of the first set bit , not the number of the byte
* containing a bit .
*/
# define find_first_bit(addr, size) \
find_next_bit ( ( addr ) , ( size ) , 0 )
# define _EXT2_HAVE_ASM_BITOPS_
# ifdef __KERNEL__
/*
* test_and_ { set , clear } _bit guarantee atomicity without
* disabling interrupts .
*/
2005-10-22 06:45:22 +04:00
/* '3' is bits per byte */
# define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3)
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
# define ext2_test_bit(nr, addr) \
test_bit ( ( nr ) ^ LE_BYTE_ADDR , ( unsigned long * ) addr )
# define ext2_set_bit(nr, addr) \
__test_and_set_bit ( ( nr ) ^ LE_BYTE_ADDR , ( unsigned long * ) addr )
# define ext2_clear_bit(nr, addr) \
__test_and_clear_bit ( ( nr ) ^ LE_BYTE_ADDR , ( unsigned long * ) addr )
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
# define ext2_set_bit_atomic(l,nr,addr) \
test_and_set_bit ( ( nr ) ^ LE_BYTE_ADDR , ( unsigned long * ) addr )
# define ext2_clear_bit_atomic(l,nr,addr) \
test_and_clear_bit ( ( nr ) ^ LE_BYTE_ADDR , ( unsigned long * ) addr )
# endif /* __KERNEL__ */
2005-04-17 02:20:36 +04:00
# define ext2_find_first_zero_bit(addr, size) \
2005-10-22 06:45:22 +04:00
ext2_find_next_zero_bit ( ( addr ) , ( size ) , 0 )
2005-04-17 02:20:36 +04:00
2005-10-22 06:45:22 +04:00
/* include/linux/byteorder does not support "unsigned long" type */
static inline unsigned long ext2_swabp ( unsigned long * x )
2005-04-17 02:20:36 +04:00
{
2005-10-22 06:45:22 +04:00
# ifdef __LP64__
return ( unsigned long ) __swab64p ( ( u64 * ) x ) ;
# else
return ( unsigned long ) __swab32p ( ( u32 * ) x ) ;
# endif
}
/* include/linux/byteorder doesn't support "unsigned long" type */
static inline unsigned long ext2_swab ( unsigned long y )
{
# ifdef __LP64__
return ( unsigned long ) __swab64 ( ( u64 ) y ) ;
# else
return ( unsigned long ) __swab32 ( ( u32 ) y ) ;
# endif
}
static __inline__ unsigned long ext2_find_next_zero_bit ( void * addr , unsigned long size , unsigned long offset )
{
unsigned long * p = ( unsigned long * ) addr + ( offset > > SHIFT_PER_LONG ) ;
unsigned long result = offset & ~ ( BITS_PER_LONG - 1 ) ;
unsigned long tmp ;
2005-04-17 02:20:36 +04:00
if ( offset > = size )
return size ;
size - = result ;
2005-10-22 06:45:22 +04:00
offset & = ( BITS_PER_LONG - 1UL ) ;
2005-04-17 02:20:36 +04:00
if ( offset ) {
2005-10-22 06:45:22 +04:00
tmp = ext2_swabp ( p + + ) ;
tmp | = ( ~ 0UL > > ( BITS_PER_LONG - offset ) ) ;
if ( size < BITS_PER_LONG )
2005-04-17 02:20:36 +04:00
goto found_first ;
2005-10-22 06:45:22 +04:00
if ( ~ tmp )
2005-04-17 02:20:36 +04:00
goto found_middle ;
2005-10-22 06:45:22 +04:00
size - = BITS_PER_LONG ;
result + = BITS_PER_LONG ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
while ( size & ~ ( BITS_PER_LONG - 1 ) ) {
if ( ~ ( tmp = * ( p + + ) ) )
goto found_middle_swap ;
result + = BITS_PER_LONG ;
size - = BITS_PER_LONG ;
2005-04-17 02:20:36 +04:00
}
if ( ! size )
return result ;
2005-10-22 06:45:22 +04:00
tmp = ext2_swabp ( p ) ;
2005-04-17 02:20:36 +04:00
found_first :
2005-10-22 06:45:22 +04:00
tmp | = ~ 0UL < < size ;
if ( tmp = = ~ 0UL ) /* Are any bits zero? */
return result + size ; /* Nope. Skip ffz */
2005-04-17 02:20:36 +04:00
found_middle :
return result + ffz ( tmp ) ;
2005-10-22 06:45:22 +04:00
found_middle_swap :
return result + ffz ( ext2_swab ( tmp ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-22 06:45:22 +04:00
2005-04-17 02:20:36 +04:00
/* Bitmap functions for the minix filesystem. */
# define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
# define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
# define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
# define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
# define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
# endif /* _PARISC_BITOPS_H */