2005-04-16 15:20:36 -07:00
/*
* S390 version
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 1999
2005-04-16 15:20:36 -07:00
* Author ( s ) : Hartmut Penner ( hp @ de . ibm . com ) ,
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*
* Derived from " include/asm-i386/processor.h "
* Copyright ( C ) 1994 , Linus Torvalds
*/
# ifndef __ASM_S390_PROCESSOR_H
# define __ASM_S390_PROCESSOR_H
2015-10-06 16:23:39 +02:00
# include <linux/const.h>
2014-04-15 12:55:07 +02:00
# define CIF_MCCK_PENDING 0 /* machine check handling is pending */
# define CIF_ASCE 1 /* user asce needs fixup / uaccess */
2014-09-30 17:37:52 +02:00
# define CIF_NOHZ_DELAY 2 /* delay HZ disable for a tick */
2015-10-06 12:25:59 +02:00
# define CIF_FPU 3 /* restore FPU registers */
2015-08-15 11:42:21 +02:00
# define CIF_IGNORE_IRQ 4 /* ignore interrupt (for udelay) */
2015-11-19 11:09:45 +01:00
# define CIF_ENABLED_WAIT 5 /* in enabled wait state */
2014-04-15 12:55:07 +02:00
2015-10-06 16:23:39 +02:00
# define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING)
# define _CIF_ASCE _BITUL(CIF_ASCE)
# define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY)
# define _CIF_FPU _BITUL(CIF_FPU)
# define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ)
2015-11-19 11:09:45 +01:00
# define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT)
2014-04-15 12:55:07 +02:00
2012-09-05 13:26:11 +02:00
# ifndef __ASSEMBLY__
2008-12-25 13:39:16 +01:00
# include <linux/linkage.h>
2012-03-28 18:30:02 +01:00
# include <linux/irqflags.h>
2009-09-11 10:29:04 +02:00
# include <asm/cpu.h>
2009-04-14 15:36:16 +02:00
# include <asm/page.h>
2005-04-16 15:20:36 -07:00
# include <asm/ptrace.h>
2009-04-14 15:36:16 +02:00
# include <asm/setup.h>
2012-07-31 10:52:05 +02:00
# include <asm/runtime_instr.h>
2015-10-06 12:25:59 +02:00
# include <asm/fpu/types.h>
# include <asm/fpu/internal.h>
2005-04-16 15:20:36 -07:00
2014-04-15 12:55:07 +02:00
static inline void set_cpu_flag ( int flag )
{
2015-10-06 16:23:29 +02:00
S390_lowcore . cpu_flags | = ( 1UL < < flag ) ;
2014-04-15 12:55:07 +02:00
}
static inline void clear_cpu_flag ( int flag )
{
2015-10-06 16:23:29 +02:00
S390_lowcore . cpu_flags & = ~ ( 1UL < < flag ) ;
2014-04-15 12:55:07 +02:00
}
static inline int test_cpu_flag ( int flag )
{
2015-10-06 16:23:29 +02:00
return ! ! ( S390_lowcore . cpu_flags & ( 1UL < < flag ) ) ;
2014-04-15 12:55:07 +02:00
}
2015-11-19 11:09:45 +01:00
/*
* Test CIF flag of another CPU . The caller needs to ensure that
* CPU hotplug can not happen , e . g . by disabling preemption .
*/
static inline int test_cpu_flag_of ( int flag , int cpu )
{
2015-12-31 10:29:00 +01:00
struct lowcore * lc = lowcore_ptr [ cpu ] ;
2015-11-19 11:09:45 +01:00
return ! ! ( lc - > cpu_flags & ( 1UL < < flag ) ) ;
}
2014-09-30 17:37:52 +02:00
# define arch_needs_cpu() test_cpu_flag(CIF_NOHZ_DELAY)
2005-04-16 15:20:36 -07:00
/*
* Default implementation of macro that returns current
* instruction pointer ( " program counter " ) .
*/
2006-09-28 16:56:43 +02:00
# define current_text_addr() ({ void *pc; asm("basr %0,0" : "=a" (pc)); pc; })
2005-04-16 15:20:36 -07:00
2009-09-11 10:29:04 +02:00
static inline void get_cpu_id ( struct cpuid * ptr )
2007-02-21 10:55:18 +01:00
{
2010-02-26 22:37:31 +01:00
asm volatile ( " stidp %0 " : " =Q " ( * ptr ) ) ;
2007-02-21 10:55:18 +01:00
}
2016-04-14 12:35:22 +02:00
void s390_adjust_jiffies ( void ) ;
void s390_update_cpu_mhz ( void ) ;
void cpu_detect_mhz_feature ( void ) ;
2011-10-30 15:17:13 +01:00
extern const struct seq_operations cpuinfo_op ;
extern int sysctl_ieee_emulation_warnings ;
2012-09-06 15:48:11 -04:00
extern void execve_tail ( void ) ;
2005-04-16 15:20:36 -07:00
/*
2009-03-18 13:27:36 +01:00
* User space process size : 2 GB for 31 bit , 4 TB or 8 PT for 64 bit .
2005-04-16 15:20:36 -07:00
*/
2009-03-18 13:27:36 +01:00
# define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
2008-02-09 18:24:36 +01:00
# define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
( 1UL < < 30 ) : ( 1UL < < 41 ) )
# define TASK_SIZE TASK_SIZE_OF(current)
2013-07-26 15:04:03 +02:00
# define TASK_MAX_SIZE (1UL << 53)
2005-04-16 15:20:36 -07:00
2008-02-09 18:24:37 +01:00
# define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
# define STACK_TOP_MAX (1UL << 42)
2008-02-08 04:19:26 -08:00
2005-04-16 15:20:36 -07:00
# define HAVE_ARCH_PICK_MMAP_LAYOUT
typedef struct {
__u32 ar4 ;
} mm_segment_t ;
/*
* Thread structure
*/
struct thread_struct {
unsigned int acrs [ NUM_ACRS ] ;
unsigned long ksp ; /* kernel stack pointer */
mm_segment_t mm_segment ;
2011-07-24 10:48:20 +02:00
unsigned long gmap_addr ; /* address of last gmap fault. */
2016-03-08 12:12:18 +01:00
unsigned int gmap_write_flag ; /* gmap fault write indication */
2016-03-08 12:31:52 +01:00
unsigned int gmap_int_code ; /* int code of last gmap fault */
2013-06-17 16:25:18 +02:00
unsigned int gmap_pfault ; /* signal of a pending guest pfault */
2011-01-05 12:48:10 +01:00
struct per_regs per_user ; /* User specified PER registers */
struct per_event per_event ; /* Cause of the last PER trap */
2012-07-31 11:03:04 +02:00
unsigned long per_flags ; /* Flags to control debug behavior */
2005-04-16 15:20:36 -07:00
/* pfault_wait is used to block the process on a pfault event */
unsigned long pfault_wait ;
2011-05-23 10:24:34 +02:00
struct list_head list ;
2012-07-31 10:52:05 +02:00
/* cpu runtime instrumentation */
struct runtime_instr_cb * ri_cb ;
2012-07-31 11:03:04 +02:00
unsigned char trap_tdb [ 256 ] ; /* Transaction abort diagnose block */
2016-04-01 15:42:15 +02:00
/*
* Warning : ' fpu ' is dynamically - sized . It * MUST * be at
* the end .
*/
struct fpu fpu ; /* FP and VX register save area */
2005-04-16 15:20:36 -07:00
} ;
2013-07-02 22:58:26 +02:00
/* Flag to disable transactions. */
# define PER_FLAG_NO_TE 1UL
/* Flag to enable random transaction aborts. */
# define PER_FLAG_TE_ABORT_RAND 2UL
/* Flag to specify random transaction abort mode:
* - abort each transaction at a random instruction before TEND if set .
* - abort random transactions at a random instruction if cleared .
*/
# define PER_FLAG_TE_ABORT_RAND_TEND 4UL
2012-07-31 11:03:04 +02:00
2005-04-16 15:20:36 -07:00
typedef struct thread_struct thread_struct ;
/*
* Stack layout of a C stack frame .
*/
# ifndef __PACK_STACK
struct stack_frame {
unsigned long back_chain ;
unsigned long empty1 [ 5 ] ;
unsigned long gprs [ 10 ] ;
unsigned int empty2 [ 8 ] ;
} ;
# else
struct stack_frame {
unsigned long empty1 [ 5 ] ;
unsigned int empty2 [ 8 ] ;
unsigned long gprs [ 10 ] ;
unsigned long back_chain ;
} ;
# endif
# define ARCH_MIN_TASKALIGN 8
2007-10-22 12:52:45 +02:00
# define INIT_THREAD { \
. ksp = sizeof ( init_stack ) + ( unsigned long ) & init_stack , \
2016-04-01 15:42:15 +02:00
. fpu . regs = ( void * ) init_task . thread . fpu . fprs , \
2007-10-22 12:52:45 +02:00
}
2005-04-16 15:20:36 -07:00
/*
* Do necessary setup to start up a new thread .
*/
2011-10-30 15:16:50 +01:00
# define start_thread(regs, new_psw, new_stackp) do { \
2013-09-24 09:14:56 +02:00
regs - > psw . mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA ; \
2016-01-18 12:49:44 +01:00
regs - > psw . addr = new_psw ; \
2011-10-30 15:16:50 +01:00
regs - > gprs [ 15 ] = new_stackp ; \
2012-09-06 15:48:11 -04:00
execve_tail ( ) ; \
2008-07-14 09:58:54 +02:00
} while ( 0 )
2011-10-30 15:16:50 +01:00
# define start_thread31(regs, new_psw, new_stackp) do { \
2013-09-24 09:14:56 +02:00
regs - > psw . mask = PSW_USER_BITS | PSW_MASK_BA ; \
2016-01-18 12:49:44 +01:00
regs - > psw . addr = new_psw ; \
2011-10-30 15:16:50 +01:00
regs - > gprs [ 15 ] = new_stackp ; \
s390/mm: fix asce_bits handling with dynamic pagetable levels
There is a race with multi-threaded applications between context switch and
pagetable upgrade. In switch_mm() a new user_asce is built from mm->pgd and
mm->context.asce_bits, w/o holding any locks. A concurrent mmap with a
pagetable upgrade on another thread in crst_table_upgrade() could already
have set new asce_bits, but not yet the new mm->pgd. This would result in a
corrupt user_asce in switch_mm(), and eventually in a kernel panic from a
translation exception.
Fix this by storing the complete asce instead of just the asce_bits, which
can then be read atomically from switch_mm(), so that it either sees the
old value or the new value, but no mixture. Both cases are OK. Having the
old value would result in a page fault on access to the higher level memory,
but the fault handler would see the new mm->pgd, if it was a valid access
after the mmap on the other thread has completed. So as worst-case scenario
we would have a page fault loop for the racing thread until the next time
slice.
Also remove dead code and simplify the upgrade/downgrade path, there are no
upgrades from 2 levels, and only downgrades from 3 levels for compat tasks.
There are also no concurrent upgrades, because the mmap_sem is held with
down_write() in do_mmap, so the flush and table checks during upgrade can
be removed.
Reported-by: Michael Munday <munday@ca.ibm.com>
Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2016-04-15 16:38:40 +02:00
crst_table_downgrade ( current - > mm ) ; \
2012-09-06 15:48:11 -04:00
execve_tail ( ) ; \
2005-04-16 15:20:36 -07:00
} while ( 0 )
/* Forward declaration, a strange C thing */
struct task_struct ;
struct mm_struct ;
2008-02-08 04:18:33 -08:00
struct seq_file ;
2005-04-16 15:20:36 -07:00
2016-10-17 11:08:31 +02:00
typedef int ( * dump_trace_func_t ) ( void * data , unsigned long address , int reliable ) ;
2016-02-09 12:58:54 +01:00
void dump_trace ( dump_trace_func_t func , void * data ,
struct task_struct * task , unsigned long sp ) ;
2015-02-12 13:08:27 +01:00
void show_cacheinfo ( struct seq_file * m ) ;
2012-08-29 14:12:20 +02:00
2005-04-16 15:20:36 -07:00
/* Free all resources held by a thread. */
extern void release_thread ( struct task_struct * ) ;
/*
* Return saved PC of a blocked thread .
*/
extern unsigned long thread_saved_pc ( struct task_struct * t ) ;
unsigned long get_wchan ( struct task_struct * p ) ;
2006-01-12 01:05:49 -08:00
# define task_pt_regs(tsk) ((struct pt_regs *) \
2006-01-12 01:05:50 -08:00
( task_stack_page ( tsk ) + THREAD_SIZE ) - 1 )
2006-01-12 01:05:49 -08:00
# define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
# define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
2005-04-16 15:20:36 -07:00
2013-10-16 09:58:01 +02:00
/* Has task runtime instrumentation enabled ? */
# define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
2016-01-31 17:06:16 +01:00
static inline unsigned long current_stack_pointer ( void )
{
unsigned long sp ;
asm volatile ( " la %0,0(15) " : " = a " (sp)) ;
return sp ;
}
2012-03-28 18:30:02 +01:00
static inline unsigned short stap ( void )
{
unsigned short cpu_address ;
asm volatile ( " stap %0 " : " =m " ( cpu_address ) ) ;
return cpu_address ;
}
2005-04-16 15:20:36 -07:00
/*
* Give up the time slice of the virtual PU .
*/
2016-10-25 11:03:11 +02:00
void cpu_relax_yield ( void ) ;
2005-04-16 15:20:36 -07:00
2016-10-25 11:03:13 +02:00
# define cpu_relax() barrier()
2013-09-28 11:23:59 +02:00
2016-04-14 12:35:22 +02:00
# define ECAG_CACHE_ATTRIBUTE 0
# define ECAG_CPU_ATTRIBUTE 1
static inline unsigned long __ecag ( unsigned int asi , unsigned char parm )
{
unsigned long val ;
asm volatile ( " .insn rsy,0xeb000000004c,%0,0,0(%1) " /* ecag */
: " =d " ( val ) : " a " ( asi < < 8 | parm ) ) ;
return val ;
}
2007-06-19 13:10:06 +02:00
static inline void psw_set_key ( unsigned int key )
{
asm volatile ( " spka 0(%0) " : : " d " (key)) ;
}
2005-06-25 14:55:30 -07:00
/*
* Set PSW to specified value .
*/
static inline void __load_psw ( psw_t psw )
{
2010-02-26 22:37:31 +01:00
asm volatile ( " lpswe %0 " : : " Q " ( psw ) : " cc " ) ;
2005-06-25 14:55:30 -07:00
}
2005-04-16 15:20:36 -07:00
/*
* Set PSW mask to specified value , while leaving the
* PSW addr pointing to the next instruction .
*/
2015-10-12 11:54:03 +02:00
static inline void __load_psw_mask ( unsigned long mask )
2005-04-16 15:20:36 -07:00
{
unsigned long addr ;
psw_t psw ;
2005-06-25 14:55:30 -07:00
2005-04-16 15:20:36 -07:00
psw . mask = mask ;
2006-09-28 16:56:43 +02:00
asm volatile (
" larl %0,1f \n "
2010-02-26 22:37:31 +01:00
" stg %0,%O1+8(%R1) \n "
" lpswe %1 \n "
2005-04-16 15:20:36 -07:00
" 1: "
2010-02-26 22:37:31 +01:00
: " =&d " ( addr ) , " =Q " ( psw ) : " Q " ( psw ) : " memory " , " cc " ) ;
2005-04-16 15:20:36 -07:00
}
2011-10-30 15:16:48 +01:00
2015-07-08 10:20:04 +02:00
/*
* Extract current PSW mask
*/
static inline unsigned long __extract_psw ( void )
{
unsigned int reg1 , reg2 ;
asm volatile ( " epsw %0,%1 " : " =d " ( reg1 ) , " =a " ( reg2 ) ) ;
return ( ( ( unsigned long ) reg1 ) < < 32 ) | ( ( unsigned long ) reg2 ) ;
}
2015-10-12 11:54:03 +02:00
static inline void local_mcck_enable ( void )
{
__load_psw_mask ( __extract_psw ( ) | PSW_MASK_MCHECK ) ;
}
static inline void local_mcck_disable ( void )
{
__load_psw_mask ( __extract_psw ( ) & ~ PSW_MASK_MCHECK ) ;
}
2011-10-30 15:16:48 +01:00
/*
* Rewind PSW instruction address by specified number of bytes .
*/
static inline unsigned long __rewind_psw ( psw_t psw , unsigned long ilc )
{
unsigned long mask ;
mask = ( psw . mask & PSW_MASK_EA ) ? - 1UL :
( psw . mask & PSW_MASK_BA ) ? ( 1UL < < 31 ) - 1 :
( 1UL < < 24 ) - 1 ;
return ( psw . addr - ilc ) & mask ;
}
2014-10-01 10:57:57 +02:00
/*
* Function to stop a processor until the next interrupt occurs
*/
void enabled_wait ( void ) ;
2005-04-16 15:20:36 -07:00
/*
* Function to drop a processor into disabled wait state
*/
2012-01-12 17:17:21 -08:00
static inline void __noreturn disabled_wait ( unsigned long code )
2005-04-16 15:20:36 -07:00
{
2015-10-12 12:28:28 +02:00
psw_t psw ;
psw . mask = PSW_MASK_BASE | PSW_MASK_WAIT | PSW_MASK_BA | PSW_MASK_EA ;
psw . addr = code ;
__load_psw ( psw ) ;
2008-12-25 13:39:16 +01:00
while ( 1 ) ;
2005-04-16 15:20:36 -07:00
}
2007-02-05 21:18:37 +01:00
/*
* Basic Machine Check / Program Check Handler .
*/
extern void s390_base_mcck_handler ( void ) ;
extern void s390_base_pgm_handler ( void ) ;
extern void s390_base_ext_handler ( void ) ;
extern void ( * s390_base_mcck_handler_fn ) ( void ) ;
extern void ( * s390_base_pgm_handler_fn ) ( void ) ;
extern void ( * s390_base_ext_handler_fn ) ( void ) ;
2006-09-25 23:31:33 -07:00
# define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
2012-06-05 09:59:52 +02:00
extern int memcpy_real ( void * , void * , size_t ) ;
extern void memcpy_absolute ( void * , void * , size_t ) ;
# define mem_assign_absolute(dest, val) { \
__typeof__ ( dest ) __tmp = ( val ) ; \
\
BUILD_BUG_ON ( sizeof ( __tmp ) ! = sizeof ( val ) ) ; \
memcpy_absolute ( & ( dest ) , & __tmp , sizeof ( __tmp ) ) ; \
}
2012-09-05 13:26:11 +02:00
# endif /* __ASSEMBLY__ */
# endif /* __ASM_S390_PROCESSOR_H */