2013-01-18 15:12:16 +05:30
/*
* Copyright ( C ) 2004 , 2007 - 2010 , 2011 - 2012 Synopsys , Inc . ( www . synopsys . com )
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef _ASM_BITOPS_H
# define _ASM_BITOPS_H
# ifndef _LINUX_BITOPS_H
# error only <linux / bitops.h> can be included directly
# endif
# ifndef __ASSEMBLY__
# include <linux/types.h>
# include <linux/compiler.h>
2014-03-12 17:11:00 +01:00
# include <asm/barrier.h>
2015-03-31 22:38:21 +05:30
# ifndef CONFIG_ARC_HAS_LLSC
# include <asm/smp.h>
# endif
2013-01-18 15:12:16 +05:30
2015-05-16 17:49:35 +03:00
# ifdef CONFIG_ARC_HAS_LLSC
2013-01-18 15:12:16 +05:30
2015-03-31 22:38:21 +05:30
/*
* Hardware assisted Atomic - R - M - W
*/
2013-01-18 15:12:16 +05:30
2015-03-31 22:38:21 +05:30
# define BIT_OP(op, c_op, asm_op) \
static inline void op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned int temp ; \
\
m + = nr > > 5 ; \
\
2015-07-03 11:26:22 +05:30
nr & = 0x1f ; \
2015-03-31 22:38:21 +05:30
\
__asm__ __volatile__ ( \
" 1: llock %0, [%1] \n " \
" " # asm_op " %0, %0, %2 \n " \
" scond %0, [%1] \n " \
" bnz 1b \n " \
: " =&r " ( temp ) /* Early clobber, to prevent reg reuse */ \
: " r " ( m ) , /* Not "m": llock only supports reg direct addr mode */ \
" ir " ( nr ) \
: " cc " ) ; \
2013-01-18 15:12:16 +05:30
}
/*
* Semantically :
* Test the bit
* if clear
* set it and return 0 ( old value )
* else
* return 1 ( old value ) .
*
* Since ARC lacks a equivalent h / w primitive , the bit is set unconditionally
* and the old value of bit is returned
*/
2015-03-31 22:38:21 +05:30
# define TEST_N_BIT_OP(op, c_op, asm_op) \
static inline int test_and_ # # op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long old , temp ; \
\
m + = nr > > 5 ; \
\
2015-07-03 11:26:22 +05:30
nr & = 0x1f ; \
2015-03-31 22:38:21 +05:30
\
/* \
* Explicit full memory barrier needed before / after as \
* LLOCK / SCOND themselves don ' t provide any such smenatic \
*/ \
smp_mb ( ) ; \
\
__asm__ __volatile__ ( \
" 1: llock %0, [%2] \n " \
" " # asm_op " %1, %0, %3 \n " \
" scond %1, [%2] \n " \
" bnz 1b \n " \
: " =&r " ( old ) , " =&r " ( temp ) \
: " r " ( m ) , " ir " ( nr ) \
: " cc " ) ; \
\
smp_mb ( ) ; \
\
return ( old & ( 1 < < nr ) ) ! = 0 ; \
2013-01-18 15:12:16 +05:30
}
2015-05-16 17:49:35 +03:00
# elif !defined(CONFIG_ARC_PLAT_EZNPS)
2013-01-18 15:12:16 +05:30
/*
* Non hardware assisted Atomic - R - M - W
* Locking would change to irq - disabling only ( UP ) and spinlocks ( SMP )
*
* There ' s " significant " micro - optimization in writing our own variants of
* bitops ( over generic variants )
*
* ( 1 ) The generic APIs have " signed " @ nr while we have it " unsigned "
* This avoids extra code to be generated for pointer arithmatic , since
* is " not sure " that index is NOT - ve
* ( 2 ) Utilize the fact that ARCompact bit fidding insn ( BSET / BCLR / ASL ) etc
* only consider bottom 5 bits of @ nr , so NO need to mask them off .
* ( GCC Quirk : however for constant @ nr we still need to do the masking
* at compile time )
*/
2015-03-31 22:38:21 +05:30
# define BIT_OP(op, c_op, asm_op) \
static inline void op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long temp , flags ; \
m + = nr > > 5 ; \
\
/* \
* spin lock / unlock provide the needed smp_mb ( ) before / after \
*/ \
bitops_lock ( flags ) ; \
\
temp = * m ; \
2015-07-03 11:26:22 +05:30
* m = temp c_op ( 1UL < < ( nr & 0x1f ) ) ; \
2015-03-31 22:38:21 +05:30
\
bitops_unlock ( flags ) ; \
}
# define TEST_N_BIT_OP(op, c_op, asm_op) \
static inline int test_and_ # # op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long old , flags ; \
m + = nr > > 5 ; \
\
bitops_lock ( flags ) ; \
\
old = * m ; \
2015-07-03 11:26:22 +05:30
* m = old c_op ( 1UL < < ( nr & 0x1f ) ) ; \
2015-03-31 22:38:21 +05:30
\
bitops_unlock ( flags ) ; \
\
2015-07-03 11:26:22 +05:30
return ( old & ( 1UL < < ( nr & 0x1f ) ) ) ! = 0 ; \
2013-01-18 15:12:16 +05:30
}
2015-05-16 17:49:35 +03:00
# else /* CONFIG_ARC_PLAT_EZNPS */
# define BIT_OP(op, c_op, asm_op) \
static inline void op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
m + = nr > > 5 ; \
\
nr = ( 1UL < < ( nr & 0x1f ) ) ; \
if ( asm_op = = CTOP_INST_AAND_DI_R2_R2_R3 ) \
nr = ~ nr ; \
\
__asm__ __volatile__ ( \
" mov r2, %0 \n " \
" mov r3, %1 \n " \
" .word %2 \n " \
: \
: " r " ( nr ) , " r " ( m ) , " i " ( asm_op ) \
: " r2 " , " r3 " , " memory " ) ; \
}
# define TEST_N_BIT_OP(op, c_op, asm_op) \
static inline int test_and_ # # op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long old ; \
\
m + = nr > > 5 ; \
\
nr = old = ( 1UL < < ( nr & 0x1f ) ) ; \
if ( asm_op = = CTOP_INST_AAND_DI_R2_R2_R3 ) \
old = ~ old ; \
\
/* Explicit full memory barrier needed before/after */ \
smp_mb ( ) ; \
\
__asm__ __volatile__ ( \
" mov r2, %0 \n " \
" mov r3, %1 \n " \
" .word %2 \n " \
" mov %0, r2 " \
: " +r " ( old ) \
: " r " ( m ) , " i " ( asm_op ) \
: " r2 " , " r3 " , " memory " ) ; \
\
smp_mb ( ) ; \
\
return ( old & nr ) ! = 0 ; \
}
# endif /* CONFIG_ARC_PLAT_EZNPS */
2013-01-18 15:12:16 +05:30
/***************************************
* Non atomic variants
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2015-03-31 22:38:21 +05:30
# define __BIT_OP(op, c_op, asm_op) \
static inline void __ # # op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long temp ; \
m + = nr > > 5 ; \
\
temp = * m ; \
2015-07-03 11:26:22 +05:30
* m = temp c_op ( 1UL < < ( nr & 0x1f ) ) ; \
2015-03-31 22:38:21 +05:30
}
# define __TEST_N_BIT_OP(op, c_op, asm_op) \
static inline int __test_and_ # # op # # _bit ( unsigned long nr , volatile unsigned long * m ) \
{ \
unsigned long old ; \
m + = nr > > 5 ; \
\
old = * m ; \
2015-07-03 11:26:22 +05:30
* m = old c_op ( 1UL < < ( nr & 0x1f ) ) ; \
2015-03-31 22:38:21 +05:30
\
2015-07-03 11:26:22 +05:30
return ( old & ( 1UL < < ( nr & 0x1f ) ) ) ! = 0 ; \
2015-03-31 22:38:21 +05:30
}
# define BIT_OPS(op, c_op, asm_op) \
\
/* set_bit(), clear_bit(), change_bit() */ \
BIT_OP ( op , c_op , asm_op ) \
\
/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */ \
TEST_N_BIT_OP ( op , c_op , asm_op ) \
\
/* __set_bit(), __clear_bit(), __change_bit() */ \
__BIT_OP ( op , c_op , asm_op ) \
\
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */ \
__TEST_N_BIT_OP ( op , c_op , asm_op )
2015-05-16 17:49:35 +03:00
# ifndef CONFIG_ARC_PLAT_EZNPS
2015-03-31 22:38:21 +05:30
BIT_OPS ( set , | , bset )
BIT_OPS ( clear , & ~ , bclr )
BIT_OPS ( change , ^ , bxor )
2015-05-16 17:49:35 +03:00
# else
BIT_OPS ( set , | , CTOP_INST_AOR_DI_R2_R2_R3 )
BIT_OPS ( clear , & ~ , CTOP_INST_AAND_DI_R2_R2_R3 )
BIT_OPS ( change , ^ , CTOP_INST_AXOR_DI_R2_R2_R3 )
# endif
2013-01-18 15:12:16 +05:30
/*
* This routine doesn ' t need to be atomic .
*/
static inline int
2014-11-07 19:19:37 +05:30
test_bit ( unsigned int nr , const volatile unsigned long * addr )
2013-01-18 15:12:16 +05:30
{
unsigned long mask ;
addr + = nr > > 5 ;
2015-07-03 11:26:22 +05:30
mask = 1UL < < ( nr & 0x1f ) ;
2013-01-18 15:12:16 +05:30
return ( ( mask & * addr ) ! = 0 ) ;
}
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
# ifdef CONFIG_ISA_ARCOMPACT
2013-01-18 15:12:16 +05:30
/*
* Count the number of zeros , starting from MSB
* Helper for fls ( ) friends
* This is a pure count , so ( 1 - 32 ) or ( 0 - 31 ) doesn ' t apply
* It could be 0 to 32 , based on num of 0 ' s in there
* clz ( 0x8000 _0000 ) = 0 , clz ( 0xFFFF _FFFF ) = 0 , clz ( 0 ) = 32 , clz ( 1 ) = 31
*/
static inline __attribute__ ( ( const ) ) int clz ( unsigned int x )
{
unsigned int res ;
__asm__ __volatile__ (
" norm.f %0, %1 \n "
" mov.n %0, 0 \n "
" add.p %0, %0, 1 \n "
: " =r " ( res )
: " r " ( x )
: " cc " ) ;
return res ;
}
2019-01-03 15:26:41 -08:00
static inline int constant_fls ( unsigned int x )
2013-01-18 15:12:16 +05:30
{
int r = 32 ;
if ( ! x )
return 0 ;
if ( ! ( x & 0xffff0000u ) ) {
x < < = 16 ;
r - = 16 ;
}
if ( ! ( x & 0xff000000u ) ) {
x < < = 8 ;
r - = 8 ;
}
if ( ! ( x & 0xf0000000u ) ) {
x < < = 4 ;
r - = 4 ;
}
if ( ! ( x & 0xc0000000u ) ) {
x < < = 2 ;
r - = 2 ;
}
if ( ! ( x & 0x80000000u ) ) {
x < < = 1 ;
r - = 1 ;
}
return r ;
}
/*
* fls = Find Last Set in word
* @ result : [ 1 - 32 ]
* fls ( 1 ) = 1 , fls ( 0x80000000 ) = 32 , fls ( 0 ) = 0
*/
2019-01-03 15:26:41 -08:00
static inline __attribute__ ( ( const ) ) int fls ( unsigned int x )
2013-01-18 15:12:16 +05:30
{
if ( __builtin_constant_p ( x ) )
return constant_fls ( x ) ;
return 32 - clz ( x ) ;
}
/*
* __fls : Similar to fls , but zero based ( 0 - 31 )
*/
static inline __attribute__ ( ( const ) ) int __fls ( unsigned long x )
{
if ( ! x )
return 0 ;
else
return fls ( x ) - 1 ;
}
/*
* ffs = Find First Set in word ( LSB to MSB )
* @ result : [ 1 - 32 ] , 0 if all 0 ' s
*/
# define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
/*
* __ffs : Similar to ffs , but zero based ( 0 - 31 )
*/
ARC: fix __ffs return value to avoid build warnings
| CC mm/nobootmem.o
|In file included from ./include/asm-generic/bug.h:18:0,
| from ./arch/arc/include/asm/bug.h:32,
| from ./include/linux/bug.h:5,
| from ./include/linux/mmdebug.h:5,
| from ./include/linux/gfp.h:5,
| from ./include/linux/slab.h:15,
| from mm/nobootmem.c:14:
|mm/nobootmem.c: In function '__free_pages_memory':
|./include/linux/kernel.h:845:29: warning: comparison of distinct pointer types lacks a cast
| (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ^
|./include/linux/kernel.h:859:4: note: in expansion of macro '__typecheck'
| (__typecheck(x, y) && __no_side_effects(x, y))
| ^~~~~~~~~~~
|./include/linux/kernel.h:869:24: note: in expansion of macro '__safe_cmp'
| __builtin_choose_expr(__safe_cmp(x, y), \
| ^~~~~~~~~~
|./include/linux/kernel.h:878:19: note: in expansion of macro '__careful_cmp'
| #define min(x, y) __careful_cmp(x, y, <)
| ^~~~~~~~~~~~~
|mm/nobootmem.c:104:11: note: in expansion of macro 'min'
| order = min(MAX_ORDER - 1UL, __ffs(start));
Change __ffs return value from 'int' to 'unsigned long' as it
is done in other implementations (like asm-generic, x86, etc...)
to avoid build-time warnings in places where type is strictly
checked.
As __ffs may return values in [0-31] interval changing return
type to unsigned is valid.
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2018-12-13 18:42:57 +03:00
static inline __attribute__ ( ( const ) ) unsigned long __ffs ( unsigned long word )
2013-01-18 15:12:16 +05:30
{
if ( ! word )
return word ;
return ffs ( word ) - 1 ;
}
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
# else /* CONFIG_ISA_ARCV2 */
/*
* fls = Find Last Set in word
* @ result : [ 1 - 32 ]
* fls ( 1 ) = 1 , fls ( 0x80000000 ) = 32 , fls ( 0 ) = 0
*/
static inline __attribute__ ( ( const ) ) int fls ( unsigned long x )
{
int n ;
asm volatile (
" fls.f %0, %1 \n " /* 0:31; 0(Z) if src 0 */
" add.nz %0, %0, 1 \n " /* 0:31 -> 1:32 */
: " =r " ( n ) /* Early clobber not needed */
: " r " ( x )
: " cc " ) ;
return n ;
}
/*
* __fls : Similar to fls , but zero based ( 0 - 31 ) . Also 0 if no bit set
*/
static inline __attribute__ ( ( const ) ) int __fls ( unsigned long x )
{
/* FLS insn has exactly same semantics as the API */
return __builtin_arc_fls ( x ) ;
}
/*
* ffs = Find First Set in word ( LSB to MSB )
* @ result : [ 1 - 32 ] , 0 if all 0 ' s
*/
static inline __attribute__ ( ( const ) ) int ffs ( unsigned long x )
{
int n ;
asm volatile (
" ffs.f %0, %1 \n " /* 0:31; 31(Z) if src 0 */
" add.nz %0, %0, 1 \n " /* 0:31 -> 1:32 */
" mov.z %0, 0 \n " /* 31(Z)-> 0 */
: " =r " ( n ) /* Early clobber not needed */
: " r " ( x )
: " cc " ) ;
return n ;
}
/*
* __ffs : Similar to ffs , but zero based ( 0 - 31 )
*/
ARC: fix __ffs return value to avoid build warnings
| CC mm/nobootmem.o
|In file included from ./include/asm-generic/bug.h:18:0,
| from ./arch/arc/include/asm/bug.h:32,
| from ./include/linux/bug.h:5,
| from ./include/linux/mmdebug.h:5,
| from ./include/linux/gfp.h:5,
| from ./include/linux/slab.h:15,
| from mm/nobootmem.c:14:
|mm/nobootmem.c: In function '__free_pages_memory':
|./include/linux/kernel.h:845:29: warning: comparison of distinct pointer types lacks a cast
| (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ^
|./include/linux/kernel.h:859:4: note: in expansion of macro '__typecheck'
| (__typecheck(x, y) && __no_side_effects(x, y))
| ^~~~~~~~~~~
|./include/linux/kernel.h:869:24: note: in expansion of macro '__safe_cmp'
| __builtin_choose_expr(__safe_cmp(x, y), \
| ^~~~~~~~~~
|./include/linux/kernel.h:878:19: note: in expansion of macro '__careful_cmp'
| #define min(x, y) __careful_cmp(x, y, <)
| ^~~~~~~~~~~~~
|mm/nobootmem.c:104:11: note: in expansion of macro 'min'
| order = min(MAX_ORDER - 1UL, __ffs(start));
Change __ffs return value from 'int' to 'unsigned long' as it
is done in other implementations (like asm-generic, x86, etc...)
to avoid build-time warnings in places where type is strictly
checked.
As __ffs may return values in [0-31] interval changing return
type to unsigned is valid.
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2018-12-13 18:42:57 +03:00
static inline __attribute__ ( ( const ) ) unsigned long __ffs ( unsigned long x )
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
{
ARC: fix __ffs return value to avoid build warnings
| CC mm/nobootmem.o
|In file included from ./include/asm-generic/bug.h:18:0,
| from ./arch/arc/include/asm/bug.h:32,
| from ./include/linux/bug.h:5,
| from ./include/linux/mmdebug.h:5,
| from ./include/linux/gfp.h:5,
| from ./include/linux/slab.h:15,
| from mm/nobootmem.c:14:
|mm/nobootmem.c: In function '__free_pages_memory':
|./include/linux/kernel.h:845:29: warning: comparison of distinct pointer types lacks a cast
| (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ^
|./include/linux/kernel.h:859:4: note: in expansion of macro '__typecheck'
| (__typecheck(x, y) && __no_side_effects(x, y))
| ^~~~~~~~~~~
|./include/linux/kernel.h:869:24: note: in expansion of macro '__safe_cmp'
| __builtin_choose_expr(__safe_cmp(x, y), \
| ^~~~~~~~~~
|./include/linux/kernel.h:878:19: note: in expansion of macro '__careful_cmp'
| #define min(x, y) __careful_cmp(x, y, <)
| ^~~~~~~~~~~~~
|mm/nobootmem.c:104:11: note: in expansion of macro 'min'
| order = min(MAX_ORDER - 1UL, __ffs(start));
Change __ffs return value from 'int' to 'unsigned long' as it
is done in other implementations (like asm-generic, x86, etc...)
to avoid build-time warnings in places where type is strictly
checked.
As __ffs may return values in [0-31] interval changing return
type to unsigned is valid.
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2018-12-13 18:42:57 +03:00
unsigned long n ;
ARCv2: Support for ARCv2 ISA and HS38x cores
The notable features are:
- SMP configurations of upto 4 cores with coherency
- Optional L2 Cache and IO-Coherency
- Revised Interrupt Architecture (multiple priorites, reg banks,
auto stack switch, auto regfile save/restore)
- MMUv4 (PIPT dcache, Huge Pages)
- Instructions for
* 64bit load/store: LDD, STD
* Hardware assisted divide/remainder: DIV, REM
* Function prologue/epilogue: ENTER_S, LEAVE_S
* IRQ enable/disable: CLRI, SETI
* pop count: FFS, FLS
* SETcc, BMSKN, XBFU...
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-05-13 18:30:41 +05:30
asm volatile (
" ffs.f %0, %1 \n " /* 0:31; 31(Z) if src 0 */
" mov.z %0, 0 \n " /* 31(Z)-> 0 */
: " =r " ( n )
: " r " ( x )
: " cc " ) ;
return n ;
}
# endif /* CONFIG_ISA_ARCOMPACT */
2013-01-18 15:12:16 +05:30
/*
* ffz = Find First Zero in word .
* @ return : [ 0 - 31 ] , 32 if all 1 ' s
*/
# define ffz(x) __ffs(~(x))
# include <asm-generic/bitops/hweight.h>
# include <asm-generic/bitops/fls64.h>
# include <asm-generic/bitops/sched.h>
# include <asm-generic/bitops/lock.h>
# include <asm-generic/bitops/find.h>
# include <asm-generic/bitops/le.h>
# include <asm-generic/bitops/ext2-atomic-setbit.h>
# endif /* !__ASSEMBLY__ */
# endif