2005-04-16 15:20:36 -07:00
/* thread_info.h: i386 low-level thread information
*
* Copyright ( C ) 2002 David Howells ( dhowells @ redhat . com )
* - Incorporating suggestions made by Linus Torvalds and Dave Miller
*/
# ifndef _ASM_THREAD_INFO_H
# define _ASM_THREAD_INFO_H
# ifdef __KERNEL__
# include <linux/compiler.h>
# include <asm/page.h>
# ifndef __ASSEMBLY__
# include <asm/processor.h>
# endif
/*
* low level task data that entry . S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
2008-03-23 01:03:42 -07:00
* - if the contents of this structure are changed ,
* the assembly constants must also be changed
2005-04-16 15:20:36 -07:00
*/
# ifndef __ASSEMBLY__
struct thread_info {
struct task_struct * task ; /* main task structure */
struct exec_domain * exec_domain ; /* execution domain */
unsigned long flags ; /* low level flags */
unsigned long status ; /* thread-synchronous flags */
__u32 cpu ; /* current CPU */
2008-03-23 01:03:42 -07:00
int preempt_count ; /* 0 => preemptable,
< 0 = > BUG */
2005-04-16 15:20:36 -07:00
mm_segment_t addr_limit ; /* thread address space:
2008-03-23 01:03:42 -07:00
0 - 0xBFFFFFFF user - thread
0 - 0xFFFFFFFF kernel - thread
2005-04-16 15:20:36 -07:00
*/
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 02:53:50 -07:00
void * sysenter_return ;
2005-04-16 15:20:36 -07:00
struct restart_block restart_block ;
2008-03-23 01:03:42 -07:00
unsigned long previous_esp ; /* ESP of the previous stack in
case of nested ( IRQ ) stacks
2005-04-16 15:20:36 -07:00
*/
__u8 supervisor_stack [ 0 ] ;
} ;
# else /* !__ASSEMBLY__ */
2005-09-09 19:28:28 +02:00
# include <asm/asm-offsets.h>
2005-04-16 15:20:36 -07:00
# endif
# define PREEMPT_ACTIVE 0x10000000
# ifdef CONFIG_4KSTACKS
# define THREAD_SIZE (4096)
# else
# define THREAD_SIZE (8192)
# endif
# define STACK_WARN (THREAD_SIZE / 8)
/*
* macros / functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially , until the scheduler is functional .
*/
# ifndef __ASSEMBLY__
# define INIT_THREAD_INFO(tsk) \
{ \
. task = & tsk , \
. exec_domain = & default_exec_domain , \
. flags = 0 , \
. cpu = 0 , \
. preempt_count = 1 , \
. addr_limit = KERNEL_DS , \
. restart_block = { \
. fn = do_no_restart_syscall , \
} , \
}
# define init_thread_info (init_thread_union.thread_info)
# define init_stack (init_thread_union.stack)
2006-06-27 02:53:47 -07:00
/* how to get the current stack pointer from C */
2008-01-24 22:16:20 +01:00
register unsigned long current_stack_pointer asm ( " esp " ) __used ;
2006-06-27 02:53:47 -07:00
2005-04-16 15:20:36 -07:00
/* how to get the thread information struct from C */
static inline struct thread_info * current_thread_info ( void )
{
2008-03-23 01:03:42 -07:00
return ( struct thread_info * )
( current_stack_pointer & ~ ( THREAD_SIZE - 1 ) ) ;
2005-04-16 15:20:36 -07:00
}
/* thread information allocation */
# ifdef CONFIG_DEBUG_STACK_USAGE
2008-03-23 01:03:42 -07:00
# define alloc_thread_info(tsk) ((struct thread_info *) \
__get_free_pages ( GFP_KERNEL | __GFP_ZERO , get_order ( THREAD_SIZE ) ) )
2005-04-16 15:20:36 -07:00
# else
2008-03-23 01:03:42 -07:00
# define alloc_thread_info(tsk) ((struct thread_info *) \
2007-05-06 14:49:33 -07:00
__get_free_pages ( GFP_KERNEL , get_order ( THREAD_SIZE ) ) )
2005-04-16 15:20:36 -07:00
# endif
# else /* !__ASSEMBLY__ */
/* how to get the thread information struct from ASM */
2008-03-23 01:03:42 -07:00
# define GET_THREAD_INFO(reg) \
2005-04-16 15:20:36 -07:00
movl $ - THREAD_SIZE , reg ; \
andl % esp , reg
/* use this one if reg already contains %esp */
# define GET_THREAD_INFO_WITH_ESP(reg) \
andl $ - THREAD_SIZE , reg
# endif
/*
* thread information flags
2008-03-23 01:03:42 -07:00
* - these are process state flags that various
* assembly files may need to access
2005-04-16 15:20:36 -07:00
* - pending work - to - be - done flags are in LSW
* - other flags in MSW
*/
# define TIF_SYSCALL_TRACE 0 /* syscall trace active */
2007-07-31 00:38:00 -07:00
# define TIF_SIGPENDING 1 /* signal pending */
# define TIF_NEED_RESCHED 2 /* rescheduling necessary */
2008-03-23 01:03:42 -07:00
# define TIF_SINGLESTEP 3 / * restore singlestep on return to
user mode */
2007-07-31 00:38:00 -07:00
# define TIF_IRET 4 /* return with iret */
# define TIF_SYSCALL_EMU 5 /* syscall emulation active */
# define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */
# define TIF_SECCOMP 7 /* secure computing */
# define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */
2008-01-25 21:08:29 +01:00
# define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */
2006-06-26 13:59:11 +02:00
# define TIF_MEMDIE 16
2006-07-09 21:12:39 -04:00
# define TIF_DEBUG 17 /* uses debug registers */
# define TIF_IO_BITMAP 18 /* uses I/O bitmap */
2006-12-13 00:34:30 -08:00
# define TIF_FREEZE 19 /* is freezing for suspend */
2007-07-15 23:41:33 -07:00
# define TIF_NOTSC 20 /* TSC is not accessible in userland */
2008-01-30 13:30:50 +01:00
# define TIF_FORCED_TF 21 /* true if TF in eflags artificially */
2008-01-30 13:30:54 +01:00
# define TIF_DEBUGCTLMSR 22 /* uses thread_struct.debugctlmsr */
2008-01-30 13:31:09 +01:00
# define TIF_DS_AREA_MSR 23 /* uses thread_struct.ds_area_msr */
# define TIF_BTS_TRACE_TS 24 /* record scheduling event timestamps */
2005-04-16 15:20:36 -07:00
2008-03-23 01:03:42 -07:00
# define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
# define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
# define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
# define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
# define _TIF_IRET (1 << TIF_IRET)
# define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
# define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
# define _TIF_SECCOMP (1 << TIF_SECCOMP)
# define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
# define _TIF_HRTICK_RESCHED (1 << TIF_HRTICK_RESCHED)
# define _TIF_DEBUG (1 << TIF_DEBUG)
# define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
# define _TIF_FREEZE (1 << TIF_FREEZE)
# define _TIF_NOTSC (1 << TIF_NOTSC)
# define _TIF_FORCED_TF (1 << TIF_FORCED_TF)
# define _TIF_DEBUGCTLMSR (1 << TIF_DEBUGCTLMSR)
# define _TIF_DS_AREA_MSR (1 << TIF_DS_AREA_MSR)
# define _TIF_BTS_TRACE_TS (1 << TIF_BTS_TRACE_TS)
2005-04-16 15:20:36 -07:00
/* work to do on interrupt/exception return */
2008-03-23 01:03:42 -07:00
# define _TIF_WORK_MASK \
( 0x0000FFFF & ~ ( _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_EMU ) )
2005-04-16 15:20:36 -07:00
/* work to do on any return to u-space */
# define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP)
2006-07-09 21:12:39 -04:00
/* flags to check in __switch_to() */
2008-03-23 01:03:42 -07:00
# define _TIF_WORK_CTXSW \
( _TIF_IO_BITMAP | _TIF_NOTSC | _TIF_DEBUGCTLMSR | \
_TIF_DS_AREA_MSR | _TIF_BTS_TRACE_TS )
2008-01-30 13:31:09 +01:00
# define _TIF_WORK_CTXSW_PREV _TIF_WORK_CTXSW
# define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW | _TIF_DEBUG)
2006-07-09 21:12:39 -04:00
2005-04-16 15:20:36 -07:00
/*
* Thread - synchronous status .
*
* This is different from the flags in that nobody else
* ever touches our thread - synchronous status , so we don ' t
* have to worry about atomic accesses .
*/
2008-03-23 01:03:42 -07:00
# define TS_USEDFPU 0x0001 / * FPU was used by this task
this quantum ( SMP ) */
# define TS_POLLING 0x0002 / * True if in idle loop
and not sleeping */
2006-06-26 13:59:11 +02:00
2007-05-09 02:35:16 -07:00
# define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif /* _ASM_THREAD_INFO_H */