2005-04-16 15:20:36 -07:00
/*
2009-06-16 10:30:52 +02:00
* Copyright IBM Corp . 1999 , 2009
2005-04-16 15:20:36 -07:00
*
2009-06-16 10:30:52 +02:00
* Author ( s ) : Martin Schwidefsky < schwidefsky @ de . ibm . com >
2005-04-16 15:20:36 -07:00
*/
# ifndef __ASM_SYSTEM_H
# define __ASM_SYSTEM_H
# include <linux/kernel.h>
2008-12-25 13:38:40 +01:00
# include <linux/errno.h>
2005-04-16 15:20:36 -07:00
# include <asm/types.h>
# include <asm/ptrace.h>
# include <asm/setup.h>
2005-06-25 14:55:30 -07:00
# include <asm/processor.h>
2008-04-30 13:38:43 +02:00
# include <asm/lowcore.h>
2011-03-23 10:16:05 +01:00
# include <asm/cmpxchg.h>
2005-04-16 15:20:36 -07:00
# ifdef __KERNEL__
struct task_struct ;
extern struct task_struct * __switch_to ( void * , void * ) ;
2011-01-05 12:48:10 +01:00
extern void update_per_regs ( struct task_struct * task ) ;
2005-04-16 15:20:36 -07:00
static inline void save_fp_regs ( s390_fp_regs * fpregs )
{
2006-09-28 16:56:43 +02:00
asm volatile (
2010-02-26 22:37:31 +01:00
" std 0,%O0+8(%R0) \n "
" std 2,%O0+24(%R0) \n "
" std 4,%O0+40(%R0) \n "
" std 6,%O0+56(%R0) "
: " =Q " ( * fpregs ) : " Q " ( * fpregs ) ) ;
2005-04-16 15:20:36 -07:00
if ( ! MACHINE_HAS_IEEE )
return ;
asm volatile (
2010-02-26 22:37:31 +01:00
" stfpc %0 \n "
" std 1,%O0+16(%R0) \n "
" std 3,%O0+32(%R0) \n "
" std 5,%O0+48(%R0) \n "
" std 7,%O0+64(%R0) \n "
" std 8,%O0+72(%R0) \n "
" std 9,%O0+80(%R0) \n "
" std 10,%O0+88(%R0) \n "
" std 11,%O0+96(%R0) \n "
" std 12,%O0+104(%R0) \n "
" std 13,%O0+112(%R0) \n "
" std 14,%O0+120(%R0) \n "
" std 15,%O0+128(%R0) \n "
: " =Q " ( * fpregs ) : " Q " ( * fpregs ) ) ;
2005-04-16 15:20:36 -07:00
}
static inline void restore_fp_regs ( s390_fp_regs * fpregs )
{
2006-09-28 16:56:43 +02:00
asm volatile (
2010-02-26 22:37:31 +01:00
" ld 0,%O0+8(%R0) \n "
" ld 2,%O0+24(%R0) \n "
" ld 4,%O0+40(%R0) \n "
" ld 6,%O0+56(%R0) "
: : " Q " ( * fpregs ) ) ;
2005-04-16 15:20:36 -07:00
if ( ! MACHINE_HAS_IEEE )
return ;
asm volatile (
2010-02-26 22:37:31 +01:00
" lfpc %0 \n "
" ld 1,%O0+16(%R0) \n "
" ld 3,%O0+32(%R0) \n "
" ld 5,%O0+48(%R0) \n "
" ld 7,%O0+64(%R0) \n "
" ld 8,%O0+72(%R0) \n "
" ld 9,%O0+80(%R0) \n "
" ld 10,%O0+88(%R0) \n "
" ld 11,%O0+96(%R0) \n "
" ld 12,%O0+104(%R0) \n "
" ld 13,%O0+112(%R0) \n "
" ld 14,%O0+120(%R0) \n "
" ld 15,%O0+128(%R0) \n "
: : " Q " ( * fpregs ) ) ;
2005-04-16 15:20:36 -07:00
}
static inline void save_access_regs ( unsigned int * acrs )
{
2010-02-26 22:37:31 +01:00
asm volatile ( " stam 0,15,%0 " : " =Q " ( * acrs ) ) ;
2005-04-16 15:20:36 -07:00
}
static inline void restore_access_regs ( unsigned int * acrs )
{
2010-02-26 22:37:31 +01:00
asm volatile ( " lam 0,15,%0 " : : " Q " ( * acrs ) ) ;
2005-04-16 15:20:36 -07:00
}
2010-10-25 16:10:18 +02:00
# define switch_to(prev,next,last) do { \
if ( prev - > mm ) { \
save_fp_regs ( & prev - > thread . fp_regs ) ; \
save_access_regs ( & prev - > thread . acrs [ 0 ] ) ; \
} \
if ( next - > mm ) { \
restore_fp_regs ( & next - > thread . fp_regs ) ; \
restore_access_regs ( & next - > thread . acrs [ 0 ] ) ; \
2011-01-05 12:48:10 +01:00
update_per_regs ( next ) ; \
2010-10-25 16:10:18 +02:00
} \
prev = __switch_to ( prev , next ) ; \
2005-04-16 15:20:36 -07:00
} while ( 0 )
2008-12-31 15:11:39 +01:00
extern void account_vtime ( struct task_struct * , struct task_struct * ) ;
2006-01-14 13:21:03 -08:00
extern void account_tick_vtime ( struct task_struct * ) ;
2005-04-16 15:20:36 -07:00
2006-12-04 15:40:40 +01:00
# ifdef CONFIG_PFAULT
extern int pfault_init ( void ) ;
extern void pfault_fini ( void ) ;
# else /* CONFIG_PFAULT */
# define pfault_init() ({-1;})
# define pfault_fini() do { } while (0)
# endif /* CONFIG_PFAULT */
2008-05-07 09:22:59 +02:00
extern void cmma_init ( void ) ;
2010-03-24 11:49:50 +01:00
extern int memcpy_real ( void * , void * , size_t ) ;
2008-05-07 09:22:59 +02:00
2005-06-30 02:58:48 -07:00
# define finish_arch_switch(prev) do { \
2005-04-16 15:20:36 -07:00
set_fs ( current - > thread . mm_segment ) ; \
2008-12-31 15:11:39 +01:00
account_vtime ( prev , current ) ; \
2005-04-16 15:20:36 -07:00
} while ( 0 )
2006-09-28 16:56:43 +02:00
# define nop() asm volatile("nop")
2005-04-16 15:20:36 -07:00
/*
* Force strict CPU ordering .
* And yes , this is required on UP too when we ' re talking
* to devices .
*
* This is very similar to the ppc eieio / sync instruction in that is
* does a checkpoint syncronisation & makes sure that
* all memory ops have completed wrt other CPU ' s ( see 7 - 15 POP DJB ) .
*/
2006-09-28 16:56:43 +02:00
# define eieio() asm volatile("bcr 15,0" : : : "memory")
# define SYNC_OTHER_CORES(x) eieio()
2005-04-16 15:20:36 -07:00
# define mb() eieio()
# define rmb() eieio()
# define wmb() eieio()
# define read_barrier_depends() do { } while(0)
# define smp_mb() mb()
# define smp_rmb() rmb()
# define smp_wmb() wmb()
# define smp_read_barrier_depends() read_barrier_depends()
# define smp_mb__before_clear_bit() smp_mb()
# define smp_mb__after_clear_bit() smp_mb()
# define set_mb(var, value) do { var = value; mb(); } while (0)
# ifdef __s390x__
2006-09-28 16:56:43 +02:00
# define __ctl_load(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-26 22:37:31 +01:00
" lctlg %1,%2,%0 \n " \
: : " Q " ( * ( addrtype * ) ( & array ) ) , \
" i " ( low ) , " i " ( high ) ) ; \
2005-04-16 15:20:36 -07:00
} )
2006-09-28 16:56:43 +02:00
# define __ctl_store(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-26 22:37:31 +01:00
" stctg %1,%2,%0 \n " \
: " =Q " ( * ( addrtype * ) ( & array ) ) \
: " i " ( low ) , " i " ( high ) ) ; \
2005-04-16 15:20:36 -07:00
} )
# else /* __s390x__ */
2006-09-28 16:56:43 +02:00
# define __ctl_load(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-26 22:37:31 +01:00
" lctl %1,%2,%0 \n " \
: : " Q " ( * ( addrtype * ) ( & array ) ) , \
" i " ( low ) , " i " ( high ) ) ; \
2006-09-28 16:56:43 +02:00
} )
2005-04-16 15:20:36 -07:00
2006-09-28 16:56:43 +02:00
# define __ctl_store(array, low, high) ({ \
typedef struct { char _ [ sizeof ( array ) ] ; } addrtype ; \
asm volatile ( \
2010-02-26 22:37:31 +01:00
" stctl %1,%2,%0 \n " \
: " =Q " ( * ( addrtype * ) ( & array ) ) \
: " i " ( low ) , " i " ( high ) ) ; \
2005-04-16 15:20:36 -07:00
} )
# endif /* __s390x__ */
2006-09-28 16:56:43 +02:00
# define __ctl_set_bit(cr, bit) ({ \
unsigned long __dummy ; \
__ctl_store ( __dummy , cr , cr ) ; \
__dummy | = 1UL < < ( bit ) ; \
__ctl_load ( __dummy , cr , cr ) ; \
} )
# define __ctl_clear_bit(cr, bit) ({ \
unsigned long __dummy ; \
__ctl_store ( __dummy , cr , cr ) ; \
__dummy & = ~ ( 1UL < < ( bit ) ) ; \
__ctl_load ( __dummy , cr , cr ) ; \
} )
2005-06-25 14:55:30 -07:00
/*
* Use to set psw mask except for the first byte which
* won ' t be changed by this function .
*/
static inline void
__set_psw_mask ( unsigned long mask )
{
2010-10-07 14:08:55 +01:00
__load_psw_mask ( mask | ( arch_local_save_flags ( ) & ~ ( - 1UL > > 8 ) ) ) ;
2005-06-25 14:55:30 -07:00
}
2007-02-05 21:18:17 +01:00
# define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
# define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
2005-06-25 14:55:30 -07:00
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
extern void smp_ctl_set_bit ( int cr , int bit ) ;
extern void smp_ctl_clear_bit ( int cr , int bit ) ;
# define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
# else
# define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
# define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
# endif /* CONFIG_SMP */
2010-10-25 16:10:51 +02:00
# define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
2008-04-30 13:38:43 +02:00
2010-10-25 16:10:51 +02:00
/*
* The test_facility function uses the bit odering where the MSB is bit 0.
* That makes it easier to query facility bits with the bit number as
* documented in the Principles of Operation .
*/
static inline int test_facility ( unsigned long nr )
2008-12-25 13:38:40 +01:00
{
2010-10-25 16:10:51 +02:00
unsigned char * ptr ;
2008-12-25 13:38:40 +01:00
2010-10-25 16:10:51 +02:00
if ( nr > = MAX_FACILITY_BIT )
return 0 ;
ptr = ( unsigned char * ) & S390_lowcore . stfle_fac_list + ( nr > > 3 ) ;
return ( * ptr & ( 0x80 > > ( nr & 7 ) ) ) ! = 0 ;
2008-12-25 13:38:40 +01:00
}
2008-04-30 13:38:45 +02:00
static inline unsigned short stap ( void )
{
unsigned short cpu_address ;
asm volatile ( " stap %0 " : " =m " ( cpu_address ) ) ;
return cpu_address ;
}
2005-04-16 15:20:36 -07:00
extern void ( * _machine_restart ) ( char * command ) ;
extern void ( * _machine_halt ) ( void ) ;
extern void ( * _machine_power_off ) ( void ) ;
2011-01-12 09:55:28 +01:00
extern unsigned long arch_align_stack ( unsigned long sp ) ;
2005-04-16 15:20:36 -07:00
2009-06-16 10:30:52 +02:00
static inline int tprot ( unsigned long addr )
{
int rc = - EFAULT ;
asm volatile (
" tprot 0(%1),0 \n "
" 0: ipm %0 \n "
" srl %0,28 \n "
" 1: \n "
EX_TABLE ( 0 b , 1 b )
: " +d " ( rc ) : " a " ( addr ) : " cc " ) ;
return rc ;
}
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif