2005-04-17 02:20:36 +04:00
# ifndef _I386_BITOPS_H
# define _I386_BITOPS_H
/*
* Copyright 1992 , Linus Torvalds .
*/
2007-10-19 10:40:26 +04:00
# ifndef _LINUX_BITOPS_H
# error only <linux / bitops.h> can be included directly
# endif
2005-04-17 02:20:36 +04:00
# include <linux/compiler.h>
2006-03-23 13:59:32 +03:00
# include <asm/alternative.h>
2005-04-17 02:20:36 +04:00
/*
* These have to be done with inline assembly : that way the bit - setting
* is guaranteed to be atomic . All bit operations return 0 if the bit
* was cleared before the operation and ! = 0 if it was not .
*
* bit 0 is the LSB of addr ; bit 32 is the LSB of ( addr + 1 ) .
*/
# define ADDR (*(volatile long *) addr)
/**
* set_bit - Atomically set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This function is atomic and may not be reordered . See __set_bit ( )
* if you do not require the atomic guarantees .
*
* Note : there are no guarantees that this function will not be reordered
2007-05-09 09:14:03 +04:00
* on non x86 architectures , so if you are writing portable code ,
2005-04-17 02:20:36 +04:00
* make sure not to rely on its reordering guarantees .
*
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
2008-01-30 15:30:32 +03:00
static inline void set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ ( LOCK_PREFIX
" btsl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
/**
* __set_bit - Set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* Unlike set_bit ( ) , this function is non - atomic and may be reordered .
* If it ' s called on the same region of memory simultaneously , the effect
* may be that only one operation succeeds .
*/
2008-01-30 15:30:32 +03:00
static inline void __set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ (
" btsl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
/**
* clear_bit - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and may not be reordered . However , it does
* not contain a memory barrier , so if it is used for locking purposes ,
* you should call smp_mb__before_clear_bit ( ) and / or smp_mb__after_clear_bit ( )
* in order to ensure changes are visible on other processors .
*/
2008-01-30 15:30:32 +03:00
static inline void clear_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ ( LOCK_PREFIX
" btrl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
2007-10-19 09:13:02 +04:00
/*
* clear_bit_unlock - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* clear_bit ( ) is atomic and implies release semantics before the memory
* operation . It can be used for an unlock .
*/
static inline void clear_bit_unlock ( unsigned long nr , volatile unsigned long * addr )
{
barrier ( ) ;
clear_bit ( nr , addr ) ;
}
2008-01-30 15:30:32 +03:00
static inline void __clear_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ (
" btrl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
2007-10-19 09:13:02 +04:00
/*
* __clear_bit_unlock - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* __clear_bit ( ) is non - atomic and implies release semantics before the memory
* operation . It can be used for an unlock if no other CPUs can concurrently
* modify other bits in the word .
*
* No memory barrier is required here , because x86 cannot reorder stores past
* older loads . Same principle as spin_unlock .
*/
static inline void __clear_bit_unlock ( unsigned long nr , volatile unsigned long * addr )
{
barrier ( ) ;
__clear_bit ( nr , addr ) ;
}
2005-04-17 02:20:36 +04:00
# define smp_mb__before_clear_bit() barrier()
# define smp_mb__after_clear_bit() barrier()
/**
* __change_bit - Toggle a bit in memory
* @ nr : the bit to change
* @ addr : the address to start counting from
*
* Unlike change_bit ( ) , this function is non - atomic and may be reordered .
* If it ' s called on the same region of memory simultaneously , the effect
* may be that only one operation succeeds .
*/
2008-01-30 15:30:32 +03:00
static inline void __change_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ (
" btcl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
/**
* change_bit - Toggle a bit in memory
* @ nr : Bit to change
* @ addr : Address to start counting from
*
* change_bit ( ) is atomic and may not be reordered . It may be
* reordered on other architectures than x86 .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
2008-01-30 15:30:32 +03:00
static inline void change_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
__asm__ __volatile__ ( LOCK_PREFIX
" btcl %1,%0 "
2006-01-12 00:42:32 +03:00
: " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
}
/**
* test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It may be reordered on other architectures than x86 .
* It also implies a memory barrier .
*/
2008-01-30 15:30:32 +03:00
static inline int test_and_set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
int oldbit ;
__asm__ __volatile__ ( LOCK_PREFIX
" btsl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) : " memory " ) ;
return oldbit ;
}
2007-10-19 09:13:02 +04:00
/**
* test_and_set_bit_lock - Set a bit and return its old value for lock
* @ nr : Bit to set
* @ addr : Address to count from
*
2007-10-26 01:21:49 +04:00
* This is the same as test_and_set_bit on x86 .
2007-10-19 09:13:02 +04:00
*/
2007-10-26 01:21:49 +04:00
static inline int test_and_set_bit_lock ( int nr , volatile unsigned long * addr )
{
return test_and_set_bit ( nr , addr ) ;
}
2007-10-19 09:13:02 +04:00
2005-04-17 02:20:36 +04:00
/**
* __test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is non - atomic and can be reordered .
* If two examples of this operation race , one can appear to succeed
* but actually fail . You must protect multiple accesses with a lock .
*/
2008-01-30 15:30:32 +03:00
static inline int __test_and_set_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
int oldbit ;
__asm__ (
" btsl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
return oldbit ;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It can be reorderdered on other architectures other than x86 .
* It also implies a memory barrier .
*/
2008-01-30 15:30:32 +03:00
static inline int test_and_clear_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
int oldbit ;
__asm__ __volatile__ ( LOCK_PREFIX
" btrl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) : " memory " ) ;
return oldbit ;
}
/**
* __test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation is non - atomic and can be reordered .
* If two examples of this operation race , one can appear to succeed
* but actually fail . You must protect multiple accesses with a lock .
*/
static inline int __test_and_clear_bit ( int nr , volatile unsigned long * addr )
{
int oldbit ;
__asm__ (
" btrl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) ) ;
return oldbit ;
}
/* WARNING: non atomic and it can be reordered! */
static inline int __test_and_change_bit ( int nr , volatile unsigned long * addr )
{
int oldbit ;
__asm__ __volatile__ (
" btcl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) : " memory " ) ;
return oldbit ;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @ nr : Bit to change
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
2008-01-30 15:30:32 +03:00
static inline int test_and_change_bit ( int nr , volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
int oldbit ;
__asm__ __volatile__ ( LOCK_PREFIX
" btcl %2,%1 \n \t sbbl %0,%0 "
2006-01-12 00:42:32 +03:00
: " =r " ( oldbit ) , " +m " ( ADDR )
2005-04-17 02:20:36 +04:00
: " Ir " ( nr ) : " memory " ) ;
return oldbit ;
}
#if 0 /* Fool kernel-doc since it doesn't do macros yet */
/**
* test_bit - Determine whether a bit is set
* @ nr : bit number to test
* @ addr : Address to start counting from
*/
2008-01-30 15:30:32 +03:00
static int test_bit ( int nr , const volatile void * addr ) ;
2005-04-17 02:20:36 +04:00
# endif
2006-01-15 00:21:30 +03:00
static __always_inline int constant_test_bit ( int nr , const volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
return ( ( 1UL < < ( nr & 31 ) ) & ( addr [ nr > > 5 ] ) ) ! = 0 ;
}
2008-01-30 15:30:32 +03:00
static inline int variable_test_bit ( int nr , const volatile unsigned long * addr )
2005-04-17 02:20:36 +04:00
{
int oldbit ;
__asm__ __volatile__ (
" btl %2,%1 \n \t sbbl %0,%0 "
: " =r " ( oldbit )
: " m " ( ADDR ) , " Ir " ( nr ) ) ;
return oldbit ;
}
2008-01-30 15:30:32 +03:00
# define test_bit(nr, addr) \
( __builtin_constant_p ( nr ) ? \
constant_test_bit ( ( nr ) , ( addr ) ) : \
variable_test_bit ( ( nr ) , ( addr ) ) )
2005-04-17 02:20:36 +04:00
# undef ADDR
/**
* find_first_zero_bit - find the first zero bit in a memory region
* @ addr : The address to start the search at
* @ size : The maximum size to search
*
2008-01-30 15:30:32 +03:00
* Returns the bit number of the first zero bit , not the number of the byte
2005-04-17 02:20:36 +04:00
* containing a bit .
*/
static inline int find_first_zero_bit ( const unsigned long * addr , unsigned size )
{
int d0 , d1 , d2 ;
int res ;
if ( ! size )
return 0 ;
/* This looks at memory. Mark it volatile to tell gcc not to move it around */
__asm__ __volatile__ (
" movl $-1,%%eax \n \t "
" xorl %%edx,%%edx \n \t "
" repe; scasl \n \t "
" je 1f \n \t "
" xorl -4(%%edi),%%eax \n \t "
" subl $4,%%edi \n \t "
" bsfl %%eax,%%edx \n "
" 1: \t subl %%ebx,%%edi \n \t "
" shll $3,%%edi \n \t "
" addl %%edi,%%edx "
: " =d " ( res ) , " =&c " ( d0 ) , " =&D " ( d1 ) , " =&a " ( d2 )
: " 1 " ( ( size + 31 ) > > 5 ) , " 2 " ( addr ) , " b " ( addr ) : " memory " ) ;
return res ;
}
/**
* find_next_zero_bit - find the first zero bit in a memory region
* @ addr : The address to base the search on
2008-01-30 15:30:32 +03:00
* @ offset : The bit number to start searching at
2005-04-17 02:20:36 +04:00
* @ size : The maximum size to search
*/
int find_next_zero_bit ( const unsigned long * addr , int size , int offset ) ;
2005-07-28 16:45:06 +04:00
/**
* __ffs - find first bit in word .
* @ word : The word to search
*
* Undefined if no bit exists , so code should check against 0 first .
*/
static inline unsigned long __ffs ( unsigned long word )
{
__asm__ ( " bsfl %1,%0 "
: " =r " ( word )
: " rm " ( word ) ) ;
return word ;
}
2005-04-17 02:20:36 +04:00
/**
* find_first_bit - find the first set bit in a memory region
* @ addr : The address to start the search at
* @ size : The maximum size to search
*
2008-01-30 15:30:32 +03:00
* Returns the bit number of the first set bit , not the number of the byte
2005-04-17 02:20:36 +04:00
* containing a bit .
*/
2006-01-06 11:11:59 +03:00
static inline unsigned find_first_bit ( const unsigned long * addr , unsigned size )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:11:59 +03:00
unsigned x = 0 ;
2005-07-29 19:01:22 +04:00
while ( x < size ) {
unsigned long val = * addr + + ;
if ( val )
return __ffs ( val ) + x ;
2005-07-28 16:45:06 +04:00
x + = ( sizeof ( * addr ) < < 3 ) ;
2005-07-29 19:01:22 +04:00
}
2005-07-28 16:45:06 +04:00
return x ;
2005-04-17 02:20:36 +04:00
}
/**
* find_next_bit - find the first set bit in a memory region
* @ addr : The address to base the search on
2008-01-30 15:30:32 +03:00
* @ offset : The bit number to start searching at
2005-04-17 02:20:36 +04:00
* @ size : The maximum size to search
*/
int find_next_bit ( const unsigned long * addr , int size , int offset ) ;
/**
* ffz - find first zero in word .
* @ word : The word to search
*
* Undefined if no zero exists , so code should check against ~ 0UL first .
*/
static inline unsigned long ffz ( unsigned long word )
{
__asm__ ( " bsfl %1,%0 "
: " =r " ( word )
: " r " ( ~ word ) ) ;
return word ;
}
# ifdef __KERNEL__
[PATCH] bitops: i386: use generic bitops
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:24 +04:00
# include <asm-generic/bitops/sched.h>
2005-04-17 02:20:36 +04:00
/**
* ffs - find first bit set
* @ x : the word to search
*
* This is defined the same way as
* the libc and compiler builtin ffs routines , therefore
2007-02-10 12:45:59 +03:00
* differs in spirit from the above ffz ( ) ( man ffs ) .
2005-04-17 02:20:36 +04:00
*/
static inline int ffs ( int x )
{
int r ;
__asm__ ( " bsfl %1,%0 \n \t "
" jnz 1f \n \t "
" movl $-1,%0 \n "
" 1: " : " =r " ( r ) : " rm " ( x ) ) ;
return r + 1 ;
}
2006-01-06 11:12:12 +03:00
/**
* fls - find last bit set
* @ x : the word to search
*
2007-02-10 12:45:59 +03:00
* This is defined the same way as ffs ( ) .
2006-01-06 11:12:12 +03:00
*/
static inline int fls ( int x )
{
int r ;
__asm__ ( " bsrl %1,%0 \n \t "
" jnz 1f \n \t "
" movl $-1,%0 \n "
" 1: " : " =r " ( r ) : " rm " ( x ) ) ;
return r + 1 ;
}
[PATCH] bitops: i386: use generic bitops
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:24 +04:00
# include <asm-generic/bitops/hweight.h>
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
[PATCH] bitops: i386: use generic bitops
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:24 +04:00
# include <asm-generic/bitops/fls64.h>
2005-04-17 02:20:36 +04:00
# ifdef __KERNEL__
[PATCH] bitops: i386: use generic bitops
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:24 +04:00
# include <asm-generic/bitops/ext2-non-atomic.h>
2008-01-30 15:30:32 +03:00
# define ext2_set_bit_atomic(lock, nr, addr) \
test_and_set_bit ( ( nr ) , ( unsigned long * ) addr )
# define ext2_clear_bit_atomic(lock, nr, addr) \
test_and_clear_bit ( ( nr ) , ( unsigned long * ) addr )
[PATCH] bitops: i386: use generic bitops
- remove generic_fls64()
- remove sched_find_first_bit()
- remove generic_hweight{32,16,8}()
- remove ext2_{set,clear,test,find_first_zero,find_next_zero}_bit()
- remove minix_{test,set,test_and_clear,test,find_first_zero}_bit()
Signed-off-by: Akinobu Mita <mita@miraclelinux.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-26 13:39:24 +04:00
# include <asm-generic/bitops/minix.h>
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
# endif /* _I386_BITOPS_H */