2019-05-29 07:18:00 -07:00
/* SPDX-License-Identifier: GPL-2.0-only */
2017-07-10 18:02:19 -07:00
/*
* Copyright ( C ) 2012 Regents of the University of California
*/
# ifndef _ASM_RISCV_BITOPS_H
# define _ASM_RISCV_BITOPS_H
# ifndef _LINUX_BITOPS_H
# error "Only <linux / bitops.h> can be included directly"
# endif /* _LINUX_BITOPS_H */
# include <linux/compiler.h>
# include <linux/irqflags.h>
# include <asm/barrier.h>
# include <asm/bitsperlong.h>
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/ffz.h>
# include <asm-generic/bitops/fls.h>
# include <asm-generic/bitops/__fls.h>
# include <asm-generic/bitops/fls64.h>
# include <asm-generic/bitops/find.h>
# include <asm-generic/bitops/sched.h>
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/hweight.h>
# if (BITS_PER_LONG == 64)
# define __AMO(op) "amo" #op ".d"
# elif (BITS_PER_LONG == 32)
# define __AMO(op) "amo" #op ".w"
# else
# error "Unexpected BITS_PER_LONG"
# endif
# define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
( { \
unsigned long __res , __mask ; \
__mask = BIT_MASK ( nr ) ; \
__asm__ __volatile__ ( \
__AMO ( op ) # ord " %0, %2, %1 " \
: " =r " ( __res ) , " +A " ( addr [ BIT_WORD ( nr ) ] ) \
: " r " ( mod ( __mask ) ) \
: " memory " ) ; \
( ( __res & __mask ) ! = 0 ) ; \
} )
# define __op_bit_ord(op, mod, nr, addr, ord) \
__asm__ __volatile__ ( \
__AMO ( op ) # ord " zero, %1, %0 " \
: " +A " ( addr [ BIT_WORD ( nr ) ] ) \
: " r " ( mod ( BIT_MASK ( nr ) ) ) \
: " memory " ) ;
# define __test_and_op_bit(op, mod, nr, addr) \
2017-11-28 14:04:05 -08:00
__test_and_op_bit_ord ( op , mod , nr , addr , . aqrl )
2017-07-10 18:02:19 -07:00
# define __op_bit(op, mod, nr, addr) \
__op_bit_ord ( op , mod , nr , addr , )
/* Bitmask modifiers */
# define __NOP(x) (x)
# define __NOT(x) (~(x))
/**
* test_and_set_bit - Set a bit and return its old value
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation may be reordered on other architectures than x86 .
*/
static inline int test_and_set_bit ( int nr , volatile unsigned long * addr )
{
return __test_and_op_bit ( or , __NOP , nr , addr ) ;
}
/**
* test_and_clear_bit - Clear a bit and return its old value
* @ nr : Bit to clear
* @ addr : Address to count from
*
* This operation can be reordered on other architectures other than x86 .
*/
static inline int test_and_clear_bit ( int nr , volatile unsigned long * addr )
{
return __test_and_op_bit ( and , __NOT , nr , addr ) ;
}
/**
* test_and_change_bit - Change a bit and return its old value
* @ nr : Bit to change
* @ addr : Address to count from
*
* This operation is atomic and cannot be reordered .
* It also implies a memory barrier .
*/
static inline int test_and_change_bit ( int nr , volatile unsigned long * addr )
{
return __test_and_op_bit ( xor , __NOP , nr , addr ) ;
}
/**
* set_bit - Atomically set a bit in memory
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* Note : there are no guarantees that this function will not be reordered
* on non x86 architectures , so if you are writing portable code ,
* make sure not to rely on its reordering guarantees .
*
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void set_bit ( int nr , volatile unsigned long * addr )
{
__op_bit ( or , __NOP , nr , addr ) ;
}
/**
* clear_bit - Clears a bit in memory
* @ nr : Bit to clear
* @ addr : Address to start counting from
*
* Note : there are no guarantees that this function will not be reordered
* on non x86 architectures , so if you are writing portable code ,
* make sure not to rely on its reordering guarantees .
*/
static inline void clear_bit ( int nr , volatile unsigned long * addr )
{
__op_bit ( and , __NOT , nr , addr ) ;
}
/**
* change_bit - Toggle a bit in memory
* @ nr : Bit to change
* @ addr : Address to start counting from
*
* change_bit ( ) may be reordered on other architectures than x86 .
* Note that @ nr may be almost arbitrarily large ; this function is not
* restricted to acting on a single - word quantity .
*/
static inline void change_bit ( int nr , volatile unsigned long * addr )
{
__op_bit ( xor , __NOP , nr , addr ) ;
}
/**
* test_and_set_bit_lock - Set a bit and return its old value , for lock
* @ nr : Bit to set
* @ addr : Address to count from
*
* This operation is atomic and provides acquire barrier semantics .
* It can be used to implement bit locks .
*/
static inline int test_and_set_bit_lock (
unsigned long nr , volatile unsigned long * addr )
{
return __test_and_op_bit_ord ( or , __NOP , nr , addr , . aq ) ;
}
/**
* clear_bit_unlock - Clear a bit in memory , for unlock
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This operation is atomic and provides release barrier semantics .
*/
static inline void clear_bit_unlock (
unsigned long nr , volatile unsigned long * addr )
{
__op_bit_ord ( and , __NOT , nr , addr , . rl ) ;
}
/**
* __clear_bit_unlock - Clear a bit in memory , for unlock
* @ nr : the bit to set
* @ addr : the address to start counting from
*
* This operation is like clear_bit_unlock , however it is not atomic .
* It does provide release barrier semantics so it can be used to unlock
* a bit lock , however it would only be used if no other CPU can modify
* any bits in the memory until the lock is released ( a good example is
* if the bit lock itself protects access to the other bits in the word ) .
*
* On RISC - V systems there seems to be no benefit to taking advantage of the
* non - atomic property here : it ' s a lot more instructions and we still have to
* provide release semantics anyway .
*/
static inline void __clear_bit_unlock (
unsigned long nr , volatile unsigned long * addr )
{
clear_bit_unlock ( nr , addr ) ;
}
# undef __test_and_op_bit
# undef __op_bit
# undef __NOP
# undef __NOT
# undef __AMO
# include <asm-generic/bitops/non-atomic.h>
# include <asm-generic/bitops/le.h>
# include <asm-generic/bitops/ext2-atomic.h>
# endif /* _ASM_RISCV_BITOPS_H */