2005-04-17 02:20:36 +04:00
# ifndef _M68KNOMMU_BITOPS_H
# define _M68KNOMMU_BITOPS_H
/*
* Copyright 1992 , Linus Torvalds .
*/
# include <linux/compiler.h>
# include <asm/byteorder.h> /* swab32 */
# ifdef __KERNEL__
2007-10-19 10:40:26 +04:00
# ifndef _LINUX_BITOPS_H
# error only <linux / bitops.h> can be included directly
# endif
2006-03-26 13:39:29 +04:00
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/sched.h>
# include <asm-generic/bitops/ffz.h>
2005-04-17 02:20:36 +04:00
static __inline__ void set_bit ( int nr , volatile unsigned long * addr )
{
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %0,%%a0; bset %1,(%%a0) "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " , " cc " ) ;
# else
__asm__ __volatile__ ( " bset %1,%0 "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
: " cc " ) ;
# endif
}
# define __set_bit(nr, addr) set_bit(nr, addr)
/*
* clear_bit ( ) doesn ' t provide any barrier for the compiler .
*/
# define smp_mb__before_clear_bit() barrier()
# define smp_mb__after_clear_bit() barrier()
static __inline__ void clear_bit ( int nr , volatile unsigned long * addr )
{
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %0,%%a0; bclr %1,(%%a0) "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " , " cc " ) ;
# else
__asm__ __volatile__ ( " bclr %1,%0 "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
: " cc " ) ;
# endif
}
# define __clear_bit(nr, addr) clear_bit(nr, addr)
static __inline__ void change_bit ( int nr , volatile unsigned long * addr )
{
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %0,%%a0; bchg %1,(%%a0) "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " , " cc " ) ;
# else
__asm__ __volatile__ ( " bchg %1,%0 "
: " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
: " cc " ) ;
# endif
}
# define __change_bit(nr, addr) change_bit(nr, addr)
static __inline__ int test_and_set_bit ( int nr , volatile unsigned long * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0; bset %2,(%%a0); sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " bset %2,%1; sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
# define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
static __inline__ int test_and_clear_bit ( int nr , volatile unsigned long * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0; bclr %2,(%%a0); sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " bclr %2,%1; sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
# define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
static __inline__ int test_and_change_bit ( int nr , volatile unsigned long * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0 \n \t bchg %2,(%%a0) \n \t sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " bchg %2,%1; sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ ( nr ^ 31 ) > > 3 ] )
: " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
# define __test_and_change_bit(nr, addr) test_and_change_bit(nr, addr)
/*
* This routine doesn ' t need to be atomic .
*/
static __inline__ int __constant_test_bit ( int nr , const volatile unsigned long * addr )
{
return ( ( 1UL < < ( nr & 31 ) ) & ( ( ( const volatile unsigned int * ) addr ) [ nr > > 5 ] ) ) ! = 0 ;
}
static __inline__ int __test_bit ( int nr , const volatile unsigned long * addr )
{
int * a = ( int * ) addr ;
int mask ;
a + = nr > > 5 ;
mask = 1 < < ( nr & 0x1f ) ;
return ( ( mask & * a ) ! = 0 ) ;
}
# define test_bit(nr,addr) \
( __builtin_constant_p ( nr ) ? \
__constant_test_bit ( ( nr ) , ( addr ) ) : \
__test_bit ( ( nr ) , ( addr ) ) )
2006-03-26 13:39:29 +04:00
# include <asm-generic/bitops/find.h>
# include <asm-generic/bitops/hweight.h>
2007-10-18 14:06:39 +04:00
# include <asm-generic/bitops/lock.h>
2005-04-17 02:20:36 +04:00
static __inline__ int ext2_set_bit ( int nr , volatile void * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0; bset %2,(%%a0); sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ nr > > 3 ] )
: " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " bset %2,%1; sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ nr > > 3 ] )
: " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
static __inline__ int ext2_clear_bit ( int nr , volatile void * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0; bclr %2,(%%a0); sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ nr > > 3 ] )
: " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " bclr %2,%1; sne %0 "
: " =d " ( retval ) , " +m " ( ( ( volatile char * ) addr ) [ nr > > 3 ] )
: " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
# define ext2_set_bit_atomic(lock, nr, addr) \
( { \
int ret ; \
spin_lock ( lock ) ; \
ret = ext2_set_bit ( ( nr ) , ( addr ) ) ; \
spin_unlock ( lock ) ; \
ret ; \
} )
# define ext2_clear_bit_atomic(lock, nr, addr) \
( { \
int ret ; \
spin_lock ( lock ) ; \
ret = ext2_clear_bit ( ( nr ) , ( addr ) ) ; \
spin_unlock ( lock ) ; \
ret ; \
} )
static __inline__ int ext2_test_bit ( int nr , const volatile void * addr )
{
char retval ;
# ifdef CONFIG_COLDFIRE
__asm__ __volatile__ ( " lea %1,%%a0; btst %2,(%%a0); sne %0 "
: " =d " ( retval )
: " m " ( ( ( const volatile char * ) addr ) [ nr > > 3 ] ) , " d " ( nr )
: " %a0 " ) ;
# else
__asm__ __volatile__ ( " btst %2,%1; sne %0 "
: " =d " ( retval )
: " m " ( ( ( const volatile char * ) addr ) [ nr > > 3 ] ) , " di " ( nr )
/* No clobber */ ) ;
# endif
return retval ;
}
# define ext2_find_first_zero_bit(addr, size) \
ext2_find_next_zero_bit ( ( addr ) , ( size ) , 0 )
static __inline__ unsigned long ext2_find_next_zero_bit ( void * addr , unsigned long size , unsigned long offset )
{
unsigned long * p = ( ( unsigned long * ) addr ) + ( offset > > 5 ) ;
unsigned long result = offset & ~ 31UL ;
unsigned long tmp ;
if ( offset > = size )
return size ;
size - = result ;
offset & = 31UL ;
if ( offset ) {
/* We hold the little endian value in tmp, but then the
* shift is illegal . So we could keep a big endian value
* in tmp , like this :
*
* tmp = __swab32 ( * ( p + + ) ) ;
* tmp | = ~ 0UL > > ( 32 - offset ) ;
*
2008-02-03 18:38:04 +03:00
* but this would decrease performance , so we change the
2005-04-17 02:20:36 +04:00
* shift :
*/
tmp = * ( p + + ) ;
tmp | = __swab32 ( ~ 0UL > > ( 32 - offset ) ) ;
if ( size < 32 )
goto found_first ;
if ( ~ tmp )
goto found_middle ;
size - = 32 ;
result + = 32 ;
}
while ( size & ~ 31UL ) {
if ( ~ ( tmp = * ( p + + ) ) )
goto found_middle ;
result + = 32 ;
size - = 32 ;
}
if ( ! size )
return result ;
tmp = * p ;
found_first :
/* tmp is little endian, so we would have to swab the shift,
* see above . But then we have to swab tmp below for ffz , so
* we might as well do this here .
*/
return result + ffz ( __swab32 ( tmp ) | ( ~ 0UL < < size ) ) ;
found_middle :
return result + ffz ( __swab32 ( tmp ) ) ;
}
2008-01-29 07:58:27 +03:00
# define ext2_find_next_bit(addr, size, off) \
generic_find_next_le_bit ( ( unsigned long * ) ( addr ) , ( size ) , ( off ) )
2006-03-26 13:39:29 +04:00
# include <asm-generic/bitops/minix.h>
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
2006-03-26 13:39:29 +04:00
# include <asm-generic/bitops/fls.h>
# include <asm-generic/bitops/fls64.h>
2005-04-17 02:20:36 +04:00
# endif /* _M68KNOMMU_BITOPS_H */