2008-10-23 09:26:29 +04:00
# ifndef _ASM_X86_KEXEC_H
# define _ASM_X86_KEXEC_H
2008-01-30 15:31:26 +03:00
2007-10-11 13:20:03 +04:00
# ifdef CONFIG_X86_32
2008-01-30 15:31:26 +03:00
# define PA_CONTROL_PAGE 0
# define VA_CONTROL_PAGE 1
# define PA_PGD 2
2008-10-31 04:48:15 +03:00
# define PA_SWAP_PAGE 3
# define PAGES_NR 4
2007-10-11 13:20:03 +04:00
# else
2008-01-30 15:31:26 +03:00
# define PA_CONTROL_PAGE 0
2009-03-10 05:57:16 +03:00
# define VA_CONTROL_PAGE 1
# define PA_TABLE_PAGE 2
# define PA_SWAP_PAGE 3
# define PAGES_NR 4
2007-10-11 13:20:03 +04:00
# endif
2008-01-30 15:31:26 +03:00
2008-08-15 11:40:23 +04:00
# define KEXEC_CONTROL_CODE_MAX_SIZE 2048
2008-01-30 15:31:26 +03:00
# ifndef __ASSEMBLY__
# include <linux/string.h>
# include <asm/page.h>
# include <asm/ptrace.h>
2014-08-09 01:26:06 +04:00
# include <asm/bootparam.h>
2008-01-30 15:31:26 +03:00
2014-08-09 01:26:09 +04:00
struct kimage ;
2008-01-30 15:31:26 +03:00
/*
* KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return .
* I . e . Maximum page that is mapped directly into kernel memory ,
* and kmap is not required .
*
* So far x86_64 is limited to 40 physical address bits .
*/
# ifdef CONFIG_X86_32
/* Maximum physical address we can use pages from */
# define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
# define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
# define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
2008-08-15 11:40:22 +04:00
# define KEXEC_CONTROL_PAGE_SIZE 4096
2008-01-30 15:31:26 +03:00
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_386
/* We can also handle crash dumps from 64 bit kernel. */
# define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
# else
/* Maximum physical address we can use pages from */
2013-01-25 00:20:02 +04:00
# define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
2008-01-30 15:31:26 +03:00
/* Maximum address we can reach in physical address mode */
2013-01-25 00:20:02 +04:00
# define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
2008-01-30 15:31:26 +03:00
/* Maximum address we can use for the control pages */
2013-01-25 00:20:02 +04:00
# define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
2008-01-30 15:31:26 +03:00
/* Allocate one page for the pdp and the second for the code */
2008-08-15 11:40:22 +04:00
# define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
2008-01-30 15:31:26 +03:00
/* The native architecture */
# define KEXEC_ARCH KEXEC_ARCH_X86_64
# endif
2014-08-09 01:26:09 +04:00
/* Memory to backup during crash kdump */
# define KEXEC_BACKUP_SRC_START (0UL)
# define KEXEC_BACKUP_SRC_END (640 * 1024UL) /* 640K */
2008-01-30 15:31:26 +03:00
/*
* CPU does not save ss and sp on stack if execution is already
* running in kernel mode at the time of NMI occurrence . This code
* fixes it .
*/
static inline void crash_fixup_ss_esp ( struct pt_regs * newregs ,
struct pt_regs * oldregs )
{
# ifdef CONFIG_X86_32
newregs - > sp = ( unsigned long ) & ( oldregs - > sp ) ;
2008-03-23 11:02:32 +03:00
asm volatile ( " xorl %%eax, %%eax \n \t "
" movw %%ss, %%ax \n \t "
: " =a " ( newregs - > ss ) ) ;
2008-01-30 15:31:26 +03:00
# endif
}
/*
* This function is responsible for capturing register states if coming
* via panic otherwise just fix up the ss and sp if coming via kernel
* mode exception .
*/
static inline void crash_setup_regs ( struct pt_regs * newregs ,
struct pt_regs * oldregs )
{
if ( oldregs ) {
memcpy ( newregs , oldregs , sizeof ( * newregs ) ) ;
crash_fixup_ss_esp ( newregs , oldregs ) ;
} else {
# ifdef CONFIG_X86_32
2008-03-23 11:02:32 +03:00
asm volatile ( " movl %%ebx,%0 " : " =m " ( newregs - > bx ) ) ;
asm volatile ( " movl %%ecx,%0 " : " =m " ( newregs - > cx ) ) ;
asm volatile ( " movl %%edx,%0 " : " =m " ( newregs - > dx ) ) ;
asm volatile ( " movl %%esi,%0 " : " =m " ( newregs - > si ) ) ;
asm volatile ( " movl %%edi,%0 " : " =m " ( newregs - > di ) ) ;
asm volatile ( " movl %%ebp,%0 " : " =m " ( newregs - > bp ) ) ;
asm volatile ( " movl %%eax,%0 " : " =m " ( newregs - > ax ) ) ;
asm volatile ( " movl %%esp,%0 " : " =m " ( newregs - > sp ) ) ;
asm volatile ( " movl %%ss, %%eax; " : " =a " ( newregs - > ss ) ) ;
asm volatile ( " movl %%cs, %%eax; " : " =a " ( newregs - > cs ) ) ;
asm volatile ( " movl %%ds, %%eax; " : " =a " ( newregs - > ds ) ) ;
asm volatile ( " movl %%es, %%eax; " : " =a " ( newregs - > es ) ) ;
asm volatile ( " pushfl; popl %0 " : " =m " ( newregs - > flags ) ) ;
2008-01-30 15:31:26 +03:00
# else
2008-03-23 11:02:32 +03:00
asm volatile ( " movq %%rbx,%0 " : " =m " ( newregs - > bx ) ) ;
asm volatile ( " movq %%rcx,%0 " : " =m " ( newregs - > cx ) ) ;
asm volatile ( " movq %%rdx,%0 " : " =m " ( newregs - > dx ) ) ;
asm volatile ( " movq %%rsi,%0 " : " =m " ( newregs - > si ) ) ;
asm volatile ( " movq %%rdi,%0 " : " =m " ( newregs - > di ) ) ;
asm volatile ( " movq %%rbp,%0 " : " =m " ( newregs - > bp ) ) ;
asm volatile ( " movq %%rax,%0 " : " =m " ( newregs - > ax ) ) ;
asm volatile ( " movq %%rsp,%0 " : " =m " ( newregs - > sp ) ) ;
asm volatile ( " movq %%r8,%0 " : " =m " ( newregs - > r8 ) ) ;
asm volatile ( " movq %%r9,%0 " : " =m " ( newregs - > r9 ) ) ;
asm volatile ( " movq %%r10,%0 " : " =m " ( newregs - > r10 ) ) ;
asm volatile ( " movq %%r11,%0 " : " =m " ( newregs - > r11 ) ) ;
asm volatile ( " movq %%r12,%0 " : " =m " ( newregs - > r12 ) ) ;
asm volatile ( " movq %%r13,%0 " : " =m " ( newregs - > r13 ) ) ;
asm volatile ( " movq %%r14,%0 " : " =m " ( newregs - > r14 ) ) ;
asm volatile ( " movq %%r15,%0 " : " =m " ( newregs - > r15 ) ) ;
asm volatile ( " movl %%ss, %%eax; " : " =a " ( newregs - > ss ) ) ;
asm volatile ( " movl %%cs, %%eax; " : " =a " ( newregs - > cs ) ) ;
asm volatile ( " pushfq; popq %0 " : " =m " ( newregs - > flags ) ) ;
2008-01-30 15:31:26 +03:00
# endif
newregs - > ip = ( unsigned long ) current_text_addr ( ) ;
}
}
# ifdef CONFIG_X86_32
2008-07-26 06:45:07 +04:00
asmlinkage unsigned long
2008-01-30 15:31:26 +03:00
relocate_kernel ( unsigned long indirection_page ,
unsigned long control_page ,
unsigned long start_address ,
2008-07-26 06:45:07 +04:00
unsigned int has_pae ,
unsigned int preserve_context ) ;
2008-01-30 15:31:26 +03:00
# else
2009-03-10 05:57:16 +03:00
unsigned long
2008-01-30 15:31:26 +03:00
relocate_kernel ( unsigned long indirection_page ,
unsigned long page_list ,
2009-03-10 05:57:16 +03:00
unsigned long start_address ,
unsigned int preserve_context ) ;
2008-01-30 15:31:26 +03:00
# endif
2008-10-31 04:48:08 +03:00
# define ARCH_HAS_KIMAGE_ARCH
2009-02-03 09:22:48 +03:00
# ifdef CONFIG_X86_32
2008-10-31 04:48:08 +03:00
struct kimage_arch {
pgd_t * pgd ;
# ifdef CONFIG_X86_PAE
pmd_t * pmd0 ;
pmd_t * pmd1 ;
# endif
pte_t * pte0 ;
pte_t * pte1 ;
} ;
2009-02-03 09:22:48 +03:00
# else
struct kimage_arch {
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
2014-08-09 01:26:09 +04:00
/* Details of backup region */
unsigned long backup_src_start ;
unsigned long backup_src_sz ;
/* Physical address of backup segment */
unsigned long backup_load_addr ;
/* Core ELF header buffer */
void * elf_headers ;
unsigned long elf_headers_sz ;
unsigned long elf_load_addr ;
2009-02-03 09:22:48 +03:00
} ;
2014-08-09 01:26:09 +04:00
# endif /* CONFIG_X86_32 */
2014-08-09 01:26:06 +04:00
2014-08-09 01:26:09 +04:00
# ifdef CONFIG_X86_64
/*
* Number of elements and order of elements in this structure should match
* with the ones in arch / x86 / purgatory / entry64 . S . If you make a change here
* make an appropriate change in purgatory too .
*/
2014-08-09 01:26:06 +04:00
struct kexec_entry64_regs {
uint64_t rax ;
uint64_t rcx ;
uint64_t rdx ;
2014-08-09 01:26:09 +04:00
uint64_t rbx ;
2014-08-09 01:26:06 +04:00
uint64_t rsp ;
uint64_t rbp ;
2014-08-09 01:26:09 +04:00
uint64_t rsi ;
uint64_t rdi ;
2014-08-09 01:26:06 +04:00
uint64_t r8 ;
uint64_t r9 ;
uint64_t r10 ;
uint64_t r11 ;
uint64_t r12 ;
uint64_t r13 ;
uint64_t r14 ;
uint64_t r15 ;
uint64_t rip ;
} ;
2008-10-31 04:48:08 +03:00
# endif
2012-12-11 13:11:34 +04:00
typedef void crash_vmclear_fn ( void ) ;
extern crash_vmclear_fn __rcu * crash_vmclear_loaded_vmcss ;
2012-12-06 19:40:47 +04:00
2008-01-30 15:31:26 +03:00
# endif /* __ASSEMBLY__ */
2008-10-23 09:26:29 +04:00
# endif /* _ASM_X86_KEXEC_H */