2005-04-17 02:20:36 +04:00
/*
2009-06-16 12:30:52 +04:00
* Copyright IBM Corp . 1999 , 2009
2005-04-17 02:20:36 +04:00
*
2009-06-16 12:30:52 +04:00
* Author ( s ) : Martin Schwidefsky < schwidefsky @ de . ibm . com >
2005-04-17 02:20:36 +04:00
*/
# ifndef __ASM_SYSTEM_H
# define __ASM_SYSTEM_H
# include <linux/kernel.h>
2008-12-25 15:38:40 +03:00
# include <linux/errno.h>
2005-04-17 02:20:36 +04:00
# include <asm/types.h>
# include <asm/ptrace.h>
# include <asm/setup.h>
2005-06-26 01:55:30 +04:00
# include <asm/processor.h>
2008-04-30 15:38:43 +04:00
# include <asm/lowcore.h>
2005-04-17 02:20:36 +04:00
# ifdef __KERNEL__
struct task_struct ;
extern struct task_struct * __switch_to ( void * , void * ) ;
static inline void save_fp_regs ( s390_fp_regs * fpregs )
{
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" std 0,%O0+8(%R0) \n "
" std 2,%O0+24(%R0) \n "
" std 4,%O0+40(%R0) \n "
" std 6,%O0+56(%R0) "
: " =Q " ( * fpregs ) : " Q " ( * fpregs ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! MACHINE_HAS_IEEE )
return ;
asm volatile (
2010-02-27 00:37:31 +03:00
" stfpc %0 \n "
" std 1,%O0+16(%R0) \n "
" std 3,%O0+32(%R0) \n "
" std 5,%O0+48(%R0) \n "
" std 7,%O0+64(%R0) \n "
" std 8,%O0+72(%R0) \n "
" std 9,%O0+80(%R0) \n "
" std 10,%O0+88(%R0) \n "
" std 11,%O0+96(%R0) \n "
" std 12,%O0+104(%R0) \n "
" std 13,%O0+112(%R0) \n "
" std 14,%O0+120(%R0) \n "
" std 15,%O0+128(%R0) \n "
: " =Q " ( * fpregs ) : " Q " ( * fpregs ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void restore_fp_regs ( s390_fp_regs * fpregs )
{
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" ld 0,%O0+8(%R0) \n "
" ld 2,%O0+24(%R0) \n "
" ld 4,%O0+40(%R0) \n "
" ld 6,%O0+56(%R0) "
: : " Q " ( * fpregs ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! MACHINE_HAS_IEEE )
return ;
asm volatile (
2010-02-27 00:37:31 +03:00
" lfpc %0 \n "
" ld 1,%O0+16(%R0) \n "
" ld 3,%O0+32(%R0) \n "
" ld 5,%O0+48(%R0) \n "
" ld 7,%O0+64(%R0) \n "
" ld 8,%O0+72(%R0) \n "
" ld 9,%O0+80(%R0) \n "
" ld 10,%O0+88(%R0) \n "
" ld 11,%O0+96(%R0) \n "
" ld 12,%O0+104(%R0) \n "
" ld 13,%O0+112(%R0) \n "
" ld 14,%O0+120(%R0) \n "
" ld 15,%O0+128(%R0) \n "
: : " Q " ( * fpregs ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void save_access_regs ( unsigned int * acrs )
{
2010-02-27 00:37:31 +03:00
asm volatile ( " stam 0,15,%0 " : " =Q " ( * acrs ) ) ;
2005-04-17 02:20:36 +04:00
}
static inline void restore_access_regs ( unsigned int * acrs )
{
2010-02-27 00:37:31 +03:00
asm volatile ( " lam 0,15,%0 " : : " Q " ( * acrs ) ) ;
2005-04-17 02:20:36 +04:00
}
# define switch_to(prev,next,last) do { \
if ( prev = = next ) \
break ; \
save_fp_regs ( & prev - > thread . fp_regs ) ; \
restore_fp_regs ( & next - > thread . fp_regs ) ; \
save_access_regs ( & prev - > thread . acrs [ 0 ] ) ; \
restore_access_regs ( & next - > thread . acrs [ 0 ] ) ; \
prev = __switch_to ( prev , next ) ; \
} while ( 0 )
2008-12-31 17:11:39 +03:00
extern void account_vtime ( struct task_struct * , struct task_struct * ) ;
2006-01-15 00:21:03 +03:00
extern void account_tick_vtime ( struct task_struct * ) ;
2005-04-17 02:20:36 +04:00
2006-12-04 17:40:40 +03:00
# ifdef CONFIG_PFAULT
extern void pfault_irq_init ( void ) ;
extern int pfault_init ( void ) ;
extern void pfault_fini ( void ) ;
# else /* CONFIG_PFAULT */
# define pfault_irq_init() do { } while (0)
# define pfault_init() ({-1;})
# define pfault_fini() do { } while (0)
# endif /* CONFIG_PFAULT */
2008-05-07 11:22:59 +04:00
extern void cmma_init ( void ) ;
2010-03-24 13:49:50 +03:00
extern int memcpy_real ( void * , void * , size_t ) ;
2008-05-07 11:22:59 +04:00
2005-06-30 13:58:48 +04:00
# define finish_arch_switch(prev) do { \
2005-04-17 02:20:36 +04:00
set_fs ( current - > thread . mm_segment ) ; \
2008-12-31 17:11:39 +03:00
account_vtime ( prev , current ) ; \
2005-04-17 02:20:36 +04:00
} while ( 0 )
2006-09-28 18:56:43 +04:00
# define nop() asm volatile("nop")
2005-04-17 02:20:36 +04:00
2006-07-17 18:09:18 +04:00
# define xchg(ptr,x) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__ret = ( __typeof__ ( * ( ptr ) ) ) \
__xchg ( ( unsigned long ) ( x ) , ( void * ) ( ptr ) , sizeof ( * ( ptr ) ) ) ; \
__ret ; \
} )
2005-04-17 02:20:36 +04:00
2007-10-12 18:11:41 +04:00
extern void __xchg_called_with_bad_pointer ( void ) ;
2005-04-17 02:20:36 +04:00
static inline unsigned long __xchg ( unsigned long x , void * ptr , int size )
{
unsigned long addr , old ;
int shift ;
switch ( size ) {
case 1 :
addr = ( unsigned long ) ptr ;
shift = ( 3 ^ ( addr & 3 ) ) < < 3 ;
addr ^ = addr & 3 ;
asm volatile (
2010-02-27 00:37:31 +03:00
" l %0,%4 \n "
2006-09-28 18:56:43 +04:00
" 0: lr 0,%0 \n "
" nr 0,%3 \n "
" or 0,%2 \n "
2010-02-27 00:37:31 +03:00
" cs %0,0,%4 \n "
2006-09-28 18:56:43 +04:00
" jl 0b \n "
2010-02-27 00:37:31 +03:00
: " =&d " ( old ) , " =Q " ( * ( int * ) addr )
: " d " ( x < < shift ) , " d " ( ~ ( 255 < < shift ) ) ,
" Q " ( * ( int * ) addr ) : " memory " , " cc " , " 0 " ) ;
2007-10-12 18:11:41 +04:00
return old > > shift ;
2005-04-17 02:20:36 +04:00
case 2 :
addr = ( unsigned long ) ptr ;
shift = ( 2 ^ ( addr & 2 ) ) < < 3 ;
addr ^ = addr & 2 ;
asm volatile (
2010-02-27 00:37:31 +03:00
" l %0,%4 \n "
2006-09-28 18:56:43 +04:00
" 0: lr 0,%0 \n "
" nr 0,%3 \n "
" or 0,%2 \n "
2010-02-27 00:37:31 +03:00
" cs %0,0,%4 \n "
2006-09-28 18:56:43 +04:00
" jl 0b \n "
2010-02-27 00:37:31 +03:00
: " =&d " ( old ) , " =Q " ( * ( int * ) addr )
: " d " ( x < < shift ) , " d " ( ~ ( 65535 < < shift ) ) ,
" Q " ( * ( int * ) addr ) : " memory " , " cc " , " 0 " ) ;
2007-10-12 18:11:41 +04:00
return old > > shift ;
2005-04-17 02:20:36 +04:00
case 4 :
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" l %0,%3 \n "
" 0: cs %0,%2,%3 \n "
2006-09-28 18:56:43 +04:00
" jl 0b \n "
2010-02-27 00:37:31 +03:00
: " =&d " ( old ) , " =Q " ( * ( int * ) ptr )
: " d " ( x ) , " Q " ( * ( int * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2007-10-12 18:11:41 +04:00
return old ;
2005-04-17 02:20:36 +04:00
# ifdef __s390x__
case 8 :
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" lg %0,%3 \n "
" 0: csg %0,%2,%3 \n "
2006-09-28 18:56:43 +04:00
" jl 0b \n "
2005-04-17 02:20:36 +04:00
: " =&d " ( old ) , " =m " ( * ( long * ) ptr )
2010-02-27 00:37:31 +03:00
: " d " ( x ) , " Q " ( * ( long * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2007-10-12 18:11:41 +04:00
return old ;
2005-04-17 02:20:36 +04:00
# endif /* __s390x__ */
2007-10-12 18:11:41 +04:00
}
__xchg_called_with_bad_pointer ( ) ;
return x ;
2005-04-17 02:20:36 +04:00
}
/*
* Atomic compare and exchange . Compare OLD with MEM , if identical ,
* store NEW in MEM . Return the initial value in MEM . Success is
* indicated by comparing RETURN with OLD .
*/
# define __HAVE_ARCH_CMPXCHG 1
2008-02-07 11:16:24 +03:00
# define cmpxchg(ptr, o, n) \
( ( __typeof__ ( * ( ptr ) ) ) __cmpxchg ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) )
2005-04-17 02:20:36 +04:00
2007-10-12 18:11:41 +04:00
extern void __cmpxchg_called_with_bad_pointer ( void ) ;
2005-04-17 02:20:36 +04:00
static inline unsigned long
__cmpxchg ( volatile void * ptr , unsigned long old , unsigned long new , int size )
{
unsigned long addr , prev , tmp ;
int shift ;
switch ( size ) {
case 1 :
addr = ( unsigned long ) ptr ;
shift = ( 3 ^ ( addr & 3 ) ) < < 3 ;
addr ^ = addr & 3 ;
asm volatile (
2010-02-27 00:37:31 +03:00
" l %0,%2 \n "
2006-09-28 18:56:43 +04:00
" 0: nr %0,%5 \n "
" lr %1,%0 \n "
2010-03-24 13:49:52 +03:00
" or %0,%3 \n "
" or %1,%4 \n "
2010-02-27 00:37:31 +03:00
" cs %0,%1,%2 \n "
2006-09-28 18:56:43 +04:00
" jnl 1f \n "
" xr %1,%0 \n "
" nr %1,%5 \n "
" jnz 0b \n "
2005-04-17 02:20:36 +04:00
" 1: "
2010-02-27 00:37:31 +03:00
: " =&d " ( prev ) , " =&d " ( tmp ) , " =Q " ( * ( int * ) ptr )
: " d " ( old < < shift ) , " d " ( new < < shift ) ,
" d " ( ~ ( 255 < < shift ) ) , " Q " ( * ( int * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2005-04-17 02:20:36 +04:00
return prev > > shift ;
case 2 :
addr = ( unsigned long ) ptr ;
shift = ( 2 ^ ( addr & 2 ) ) < < 3 ;
addr ^ = addr & 2 ;
asm volatile (
2010-02-27 00:37:31 +03:00
" l %0,%2 \n "
2006-09-28 18:56:43 +04:00
" 0: nr %0,%5 \n "
" lr %1,%0 \n "
2010-03-24 13:49:52 +03:00
" or %0,%3 \n "
" or %1,%4 \n "
2010-02-27 00:37:31 +03:00
" cs %0,%1,%2 \n "
2006-09-28 18:56:43 +04:00
" jnl 1f \n "
" xr %1,%0 \n "
" nr %1,%5 \n "
" jnz 0b \n "
2005-04-17 02:20:36 +04:00
" 1: "
2010-02-27 00:37:31 +03:00
: " =&d " ( prev ) , " =&d " ( tmp ) , " =Q " ( * ( int * ) ptr )
: " d " ( old < < shift ) , " d " ( new < < shift ) ,
" d " ( ~ ( 65535 < < shift ) ) , " Q " ( * ( int * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2005-04-17 02:20:36 +04:00
return prev > > shift ;
case 4 :
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" cs %0,%3,%1 \n "
: " =&d " ( prev ) , " =Q " ( * ( int * ) ptr )
: " 0 " ( old ) , " d " ( new ) , " Q " ( * ( int * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2005-04-17 02:20:36 +04:00
return prev ;
# ifdef __s390x__
case 8 :
2006-09-28 18:56:43 +04:00
asm volatile (
2010-02-27 00:37:31 +03:00
" csg %0,%3,%1 \n "
: " =&d " ( prev ) , " =Q " ( * ( long * ) ptr )
: " 0 " ( old ) , " d " ( new ) , " Q " ( * ( long * ) ptr )
2006-09-28 18:56:43 +04:00
: " memory " , " cc " ) ;
2005-04-17 02:20:36 +04:00
return prev ;
# endif /* __s390x__ */
}
2007-10-12 18:11:41 +04:00
__cmpxchg_called_with_bad_pointer ( ) ;
return old ;
2005-04-17 02:20:36 +04:00
}
/*
* Force strict CPU ordering .
* And yes , this is required on UP too when we ' re talking
* to devices .
*
* This is very similar to the ppc eieio / sync instruction in that is
* does a checkpoint syncronisation & makes sure that
* all memory ops have completed wrt other CPU ' s ( see 7 - 15 POP DJB ) .
*/
2006-09-28 18:56:43 +04:00
# define eieio() asm volatile("bcr 15,0" : : : "memory")
# define SYNC_OTHER_CORES(x) eieio()
2005-04-17 02:20:36 +04:00
# define mb() eieio()
# define rmb() eieio()
# define wmb() eieio()
# define read_barrier_depends() do { } while(0)
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
# define smp_mb__before_clear_bit() smp_mb()
# define smp_mb__after_clear_bit() smp_mb()
# define set_mb(var, value) do { var = value; mb(); } while (0)
# ifdef __s390x__
2006-09-28 18:56:43 +04:00
# define __ctl_load(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-27 00:37:31 +03:00
" lctlg %1,%2,%0 \n " \
: : " Q " ( * ( addrtype * ) ( & array ) ) , \
" i " ( low ) , " i " ( high ) ) ; \
2005-04-17 02:20:36 +04:00
} )
2006-09-28 18:56:43 +04:00
# define __ctl_store(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-27 00:37:31 +03:00
" stctg %1,%2,%0 \n " \
: " =Q " ( * ( addrtype * ) ( & array ) ) \
: " i " ( low ) , " i " ( high ) ) ; \
2005-04-17 02:20:36 +04:00
} )
# else /* __s390x__ */
2006-09-28 18:56:43 +04:00
# define __ctl_load(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-27 00:37:31 +03:00
" lctl %1,%2,%0 \n " \
: : " Q " ( * ( addrtype * ) ( & array ) ) , \
" i " ( low ) , " i " ( high ) ) ; \
2006-09-28 18:56:43 +04:00
} )
2005-04-17 02:20:36 +04:00
2006-09-28 18:56:43 +04:00
# define __ctl_store(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-27 00:37:31 +03:00
" stctl %1,%2,%0 \n " \
: " =Q " ( * ( addrtype * ) ( & array ) ) \
: " i " ( low ) , " i " ( high ) ) ; \
2005-04-17 02:20:36 +04:00
} )
# endif /* __s390x__ */
2006-09-28 18:56:43 +04:00
# define __ctl_set_bit(cr, bit) ({ \
unsigned long __dummy ; \
__ctl_store ( __dummy , cr , cr ) ; \
__dummy | = 1UL < < ( bit ) ; \
__ctl_load ( __dummy , cr , cr ) ; \
} )
# define __ctl_clear_bit(cr, bit) ({ \
unsigned long __dummy ; \
__ctl_store ( __dummy , cr , cr ) ; \
__dummy & = ~ ( 1UL < < ( bit ) ) ; \
__ctl_load ( __dummy , cr , cr ) ; \
} )
2006-07-03 11:24:46 +04:00
# include <linux/irqflags.h>
2005-04-17 02:20:36 +04:00
2008-02-07 11:16:24 +03:00
# include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local ( volatile void * ptr ,
unsigned long old ,
unsigned long new , int size )
{
switch ( size ) {
case 1 :
case 2 :
case 4 :
# ifdef __s390x__
case 8 :
# endif
return __cmpxchg ( ptr , old , new , size ) ;
default :
return __cmpxchg_local_generic ( ptr , old , new , size ) ;
}
return old ;
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU . Always make
* them available .
*/
# define cmpxchg_local(ptr, o, n) \
( ( __typeof__ ( * ( ptr ) ) ) __cmpxchg_local ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) )
# ifdef __s390x__
# define cmpxchg64_local(ptr, o, n) \
( { \
BUILD_BUG_ON ( sizeof ( * ( ptr ) ) ! = 8 ) ; \
cmpxchg_local ( ( ptr ) , ( o ) , ( n ) ) ; \
} )
# else
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
# endif
2005-06-26 01:55:30 +04:00
/*
* Use to set psw mask except for the first byte which
* won ' t be changed by this function .
*/
static inline void
__set_psw_mask ( unsigned long mask )
{
2010-10-07 17:08:55 +04:00
__load_psw_mask ( mask | ( arch_local_save_flags ( ) & ~ ( - 1UL > > 8 ) ) ) ;
2005-06-26 01:55:30 +04:00
}
2007-02-05 23:18:17 +03:00
# define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
# define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
2005-06-26 01:55:30 +04:00
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_SMP
extern void smp_ctl_set_bit ( int cr , int bit ) ;
extern void smp_ctl_clear_bit ( int cr , int bit ) ;
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
# else
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
# endif /* CONFIG_SMP */
2008-04-30 15:38:43 +04:00
static inline unsigned int stfl ( void )
{
asm volatile (
" .insn s,0xb2b10000,0(0) \n " /* stfl */
" 0: \n "
EX_TABLE ( 0 b , 0 b ) ) ;
return S390_lowcore . stfl_fac_list ;
}
2008-12-25 15:38:40 +03:00
static inline int __stfle ( unsigned long long * list , int doublewords )
{
typedef struct { unsigned long long _ [ doublewords ] ; } addrtype ;
register unsigned long __nr asm ( " 0 " ) = doublewords - 1 ;
asm volatile ( " .insn s,0xb2b00000,%0 " /* stfle */
: " =m " ( * ( addrtype * ) list ) , " +d " ( __nr ) : : " cc " ) ;
return __nr + 1 ;
}
static inline int stfle ( unsigned long long * list , int doublewords )
{
if ( ! ( stfl ( ) & ( 1UL < < 24 ) ) )
return - EOPNOTSUPP ;
return __stfle ( list , doublewords ) ;
}
2008-04-30 15:38:45 +04:00
static inline unsigned short stap ( void )
{
unsigned short cpu_address ;
asm volatile ( " stap %0 " : " =m " ( cpu_address ) ) ;
return cpu_address ;
}
2005-04-17 02:20:36 +04:00
extern void ( * _machine_restart ) ( char * command ) ;
extern void ( * _machine_halt ) ( void ) ;
extern void ( * _machine_power_off ) ( void ) ;
# define arch_align_stack(x) (x)
2009-06-16 12:30:52 +04:00
static inline int tprot ( unsigned long addr )
{
int rc = - EFAULT ;
asm volatile (
" tprot 0(%1),0 \n "
" 0: ipm %0 \n "
" srl %0,28 \n "
" 1: \n "
EX_TABLE ( 0 b , 1 b )
: " +d " ( rc ) : " a " ( addr ) : " cc " ) ;
return rc ;
}
2005-04-17 02:20:36 +04:00
# endif /* __KERNEL__ */
# endif