2008-10-23 09:26:29 +04:00
# ifndef _ASM_X86_PAGE_32_H
# define _ASM_X86_PAGE_32_H
2008-01-30 15:32:44 +03:00
/*
* This handles the memory map .
*
* A __PAGE_OFFSET of 0xC0000000 means that the kernel has
* a virtual address space of one gigabyte , which limits the
* amount of physical memory you can use to about 950 MB .
*
* If you want more physical memory than this then see the CONFIG_HIGHMEM4G
* and CONFIG_HIGHMEM64G options in the kernel configuration .
*/
# define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
2008-04-29 05:52:40 +04:00
# ifdef CONFIG_4KSTACKS
# define THREAD_ORDER 0
# else
# define THREAD_ORDER 1
# endif
# define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
2008-10-04 00:00:32 +04:00
# define STACKFAULT_STACK 0
# define DOUBLEFAULT_STACK 1
# define NMI_STACK 0
# define DEBUG_STACK 0
# define MCE_STACK 0
# define N_EXCEPTION_STACKS 1
2008-04-29 05:52:40 +04:00
2008-01-30 15:32:44 +03:00
# ifdef CONFIG_X86_PAE
2008-06-06 13:21:39 +04:00
/* 44=32+12, the limit we can fit into an unsigned long pfn */
# define __PHYSICAL_MASK_SHIFT 44
2008-01-30 15:32:44 +03:00
# define __VIRTUAL_MASK_SHIFT 32
# define PAGETABLE_LEVELS 3
2005-04-17 02:20:36 +04:00
# ifndef __ASSEMBLY__
2008-01-30 15:32:44 +03:00
typedef u64 pteval_t ;
typedef u64 pmdval_t ;
typedef u64 pudval_t ;
typedef u64 pgdval_t ;
typedef u64 pgprotval_t ;
2005-04-17 02:20:36 +04:00
2008-01-30 15:32:57 +03:00
typedef union {
struct {
unsigned long pte_low , pte_high ;
} ;
pteval_t pte ;
} pte_t ;
2008-01-30 15:32:44 +03:00
# endif / * __ASSEMBLY__
2005-04-17 02:20:36 +04:00
*/
2008-01-30 15:32:44 +03:00
# else /* !CONFIG_X86_PAE */
# define __PHYSICAL_MASK_SHIFT 32
# define __VIRTUAL_MASK_SHIFT 32
# define PAGETABLE_LEVELS 2
# ifndef __ASSEMBLY__
typedef unsigned long pteval_t ;
typedef unsigned long pmdval_t ;
typedef unsigned long pudval_t ;
typedef unsigned long pgdval_t ;
typedef unsigned long pgprotval_t ;
2007-05-02 21:27:13 +04:00
2008-03-23 11:02:57 +03:00
typedef union {
pteval_t pte ;
pteval_t pte_low ;
} pte_t ;
2008-01-30 15:32:44 +03:00
# endif /* __ASSEMBLY__ */
# endif /* CONFIG_X86_PAE */
2008-02-09 02:15:06 +03:00
# ifndef __ASSEMBLY__
typedef struct page * pgtable_t ;
# endif
2008-01-30 15:32:44 +03:00
# ifdef CONFIG_HUGETLB_PAGE
# define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
# endif
2005-04-17 02:20:36 +04:00
# ifndef __ASSEMBLY__
2008-10-03 19:54:25 +04:00
# define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
2008-06-12 15:56:40 +04:00
# ifdef CONFIG_DEBUG_VIRTUAL
2008-06-12 15:56:40 +04:00
extern unsigned long __phys_addr ( unsigned long ) ;
2008-06-12 15:56:40 +04:00
# else
2008-10-03 19:54:25 +04:00
# define __phys_addr(x) __phys_addr_nodebug(x)
2008-06-12 15:56:40 +04:00
# endif
2008-01-30 15:32:44 +03:00
# define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
# ifdef CONFIG_FLATMEM
# define pfn_valid(pfn) ((pfn) < max_mapnr)
# endif /* CONFIG_FLATMEM */
extern int nx_enabled ;
/*
* This much address space is reserved for vmalloc ( ) and iomap ( )
* as well as fixmap mappings .
*/
extern unsigned int __VMALLOC_RESERVE ;
extern int sysctl_legacy_va_layout ;
2008-06-23 14:05:30 +04:00
extern void find_low_pfn_range ( void ) ;
2008-06-24 23:18:14 +04:00
extern unsigned long init_memory_mapping ( unsigned long start ,
unsigned long end ) ;
2008-06-23 14:05:30 +04:00
extern void initmem_init ( unsigned long , unsigned long ) ;
2008-07-23 16:03:57 +04:00
extern void free_initmem ( void ) ;
2008-06-22 13:45:39 +04:00
extern void setup_bootmem_allocator ( void ) ;
2008-01-30 15:32:44 +03:00
# ifdef CONFIG_X86_USE_3DNOW
# include <asm/mmx.h>
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:50 +04:00
2008-01-30 15:32:44 +03:00
static inline void clear_page ( void * page )
{
mmx_clear_page ( page ) ;
}
2005-04-17 02:20:36 +04:00
2008-01-30 15:32:44 +03:00
static inline void copy_page ( void * to , void * from )
{
mmx_copy_page ( to , from ) ;
}
# else /* !CONFIG_X86_USE_3DNOW */
# include <linux/string.h>
2005-04-17 02:20:36 +04:00
2008-01-30 15:32:44 +03:00
static inline void clear_page ( void * page )
{
memset ( page , 0 , PAGE_SIZE ) ;
}
2005-09-04 02:54:30 +04:00
2008-01-30 15:32:44 +03:00
static inline void copy_page ( void * to , void * from )
{
memcpy ( to , from , PAGE_SIZE ) ;
}
# endif /* CONFIG_X86_3DNOW */
# endif /* !__ASSEMBLY__ */
2006-04-27 18:48:08 +04:00
2008-10-23 09:26:29 +04:00
# endif /* _ASM_X86_PAGE_32_H */