2005-12-04 10:39:37 +03:00
/*
* Routines for doing kexec - based kdump .
*
* Copyright ( C ) 2005 , IBM Corp .
*
* Created by : Michael Ellerman
*
* This source code is licensed under the GNU General Public License ,
* Version 2. See the file COPYING for more details .
*/
# undef DEBUG
2005-12-04 10:39:43 +03:00
# include <linux/crash_dump.h>
# include <linux/bootmem.h>
2005-12-04 10:39:37 +03:00
# include <asm/kdump.h>
# include <asm/lmb.h>
# include <asm/firmware.h>
2005-12-04 10:39:51 +03:00
# include <asm/uaccess.h>
2005-12-04 10:39:37 +03:00
# ifdef DEBUG
# include <asm/udbg.h>
# define DBG(fmt...) udbg_printf(fmt)
# else
# define DBG(fmt...)
# endif
2007-08-15 14:53:26 +04:00
void __init reserve_kdump_trampoline ( void )
2006-05-17 12:00:49 +04:00
{
lmb_reserve ( 0 , KDUMP_RESERVE_LIMIT ) ;
}
2005-12-04 10:39:37 +03:00
static void __init create_trampoline ( unsigned long addr )
{
/* The maximum range of a single instruction branch, is the current
* instruction ' s address + ( 32 MB - 4 ) bytes . For the trampoline we
* need to branch to current address + 32 MB . So we insert a nop at
* the trampoline address , then the next instruction ( + 4 bytes )
* does a branch to ( 32 MB - 4 ) . The net effect is that when we
* branch to " addr " we jump to ( " addr " + 32 MB ) . Although it requires
* two instructions it doesn ' t require any registers .
*/
create_instruction ( addr , 0x60000000 ) ; /* nop */
create_branch ( addr + 4 , addr + PHYSICAL_START , 0 ) ;
}
2006-05-17 12:00:49 +04:00
void __init setup_kdump_trampoline ( void )
2005-12-04 10:39:37 +03:00
{
unsigned long i ;
2006-05-17 12:00:49 +04:00
DBG ( " -> setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
for ( i = KDUMP_TRAMPOLINE_START ; i < KDUMP_TRAMPOLINE_END ; i + = 8 ) {
create_trampoline ( i ) ;
}
2007-09-18 11:25:12 +04:00
# ifdef CONFIG_PPC_PSERIES
2005-12-04 10:39:37 +03:00
create_trampoline ( __pa ( system_reset_fwnmi ) - PHYSICAL_START ) ;
create_trampoline ( __pa ( machine_check_fwnmi ) - PHYSICAL_START ) ;
2007-09-18 11:25:12 +04:00
# endif /* CONFIG_PPC_PSERIES */
2005-12-04 10:39:37 +03:00
2006-05-17 12:00:49 +04:00
DBG ( " <- setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
}
2005-12-04 10:39:43 +03:00
2006-01-11 07:30:07 +03:00
# ifdef CONFIG_PROC_VMCORE
2005-12-04 10:39:43 +03:00
static int __init parse_elfcorehdr ( char * p )
{
if ( p )
elfcorehdr_addr = memparse ( p , & p ) ;
2006-03-31 14:30:33 +04:00
return 1 ;
2005-12-04 10:39:43 +03:00
}
__setup ( " elfcorehdr= " , parse_elfcorehdr ) ;
2006-01-11 07:30:07 +03:00
# endif
2005-12-04 10:39:43 +03:00
static int __init parse_savemaxmem ( char * p )
{
if ( p )
saved_max_pfn = ( memparse ( p , & p ) > > PAGE_SHIFT ) - 1 ;
2006-03-31 14:30:33 +04:00
return 1 ;
2005-12-04 10:39:43 +03:00
}
__setup ( " savemaxmem= " , parse_savemaxmem ) ;
2005-12-04 10:39:51 +03:00
2006-08-02 05:13:50 +04:00
/**
2005-12-04 10:39:51 +03:00
* copy_oldmem_page - copy one page from " oldmem "
* @ pfn : page frame number to be copied
* @ buf : target memory address for the copy ; this can be in kernel address
* space or user address space ( see @ userbuf )
* @ csize : number of bytes to copy
* @ offset : offset in bytes into the page ( based on pfn ) to begin the copy
* @ userbuf : if set , @ buf is in user address space , use copy_to_user ( ) ,
* otherwise @ buf is in kernel address space , use memcpy ( ) .
*
* Copy a page from " oldmem " . For this page , there is no pte mapped
* in the current kernel . We stitch up a pte , similar to kmap_atomic .
*/
ssize_t copy_oldmem_page ( unsigned long pfn , char * buf ,
size_t csize , unsigned long offset , int userbuf )
{
void * vaddr ;
if ( ! csize )
return 0 ;
vaddr = __ioremap ( pfn < < PAGE_SHIFT , PAGE_SIZE , 0 ) ;
if ( userbuf ) {
if ( copy_to_user ( ( char __user * ) buf , ( vaddr + offset ) , csize ) ) {
iounmap ( vaddr ) ;
return - EFAULT ;
}
} else
memcpy ( buf , ( vaddr + offset ) , csize ) ;
iounmap ( vaddr ) ;
return csize ;
}