2005-12-04 10:39:37 +03:00
/*
* Routines for doing kexec - based kdump .
*
* Copyright ( C ) 2005 , IBM Corp .
*
* Created by : Michael Ellerman
*
* This source code is licensed under the GNU General Public License ,
* Version 2. See the file COPYING for more details .
*/
# undef DEBUG
2005-12-04 10:39:43 +03:00
# include <linux/crash_dump.h>
# include <linux/bootmem.h>
2010-07-12 08:36:09 +04:00
# include <linux/memblock.h>
2008-06-24 05:32:21 +04:00
# include <asm/code-patching.h>
2005-12-04 10:39:37 +03:00
# include <asm/kdump.h>
2008-02-14 03:56:49 +03:00
# include <asm/prom.h>
2005-12-04 10:39:37 +03:00
# include <asm/firmware.h>
2005-12-04 10:39:51 +03:00
# include <asm/uaccess.h>
2010-08-24 18:23:44 +04:00
# include <asm/rtas.h>
2005-12-04 10:39:37 +03:00
# ifdef DEBUG
# include <asm/udbg.h>
# define DBG(fmt...) udbg_printf(fmt)
# else
# define DBG(fmt...)
# endif
2011-12-15 02:57:15 +04:00
# ifndef CONFIG_NONSTATIC_KERNEL
2007-08-15 14:53:26 +04:00
void __init reserve_kdump_trampoline ( void )
2006-05-17 12:00:49 +04:00
{
2010-07-12 08:36:09 +04:00
memblock_reserve ( 0 , KDUMP_RESERVE_LIMIT ) ;
2006-05-17 12:00:49 +04:00
}
2005-12-04 10:39:37 +03:00
static void __init create_trampoline ( unsigned long addr )
{
2008-06-24 05:32:22 +04:00
unsigned int * p = ( unsigned int * ) addr ;
2005-12-04 10:39:37 +03:00
/* The maximum range of a single instruction branch, is the current
* instruction ' s address + ( 32 MB - 4 ) bytes . For the trampoline we
* need to branch to current address + 32 MB . So we insert a nop at
* the trampoline address , then the next instruction ( + 4 bytes )
* does a branch to ( 32 MB - 4 ) . The net effect is that when we
* branch to " addr " we jump to ( " addr " + 32 MB ) . Although it requires
* two instructions it doesn ' t require any registers .
*/
2009-02-10 23:10:44 +03:00
patch_instruction ( p , PPC_INST_NOP ) ;
2008-06-24 05:32:22 +04:00
patch_branch ( + + p , addr + PHYSICAL_START , 0 ) ;
2005-12-04 10:39:37 +03:00
}
2006-05-17 12:00:49 +04:00
void __init setup_kdump_trampoline ( void )
2005-12-04 10:39:37 +03:00
{
unsigned long i ;
2006-05-17 12:00:49 +04:00
DBG ( " -> setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
for ( i = KDUMP_TRAMPOLINE_START ; i < KDUMP_TRAMPOLINE_END ; i + = 8 ) {
create_trampoline ( i ) ;
}
2007-09-18 11:25:12 +04:00
# ifdef CONFIG_PPC_PSERIES
2005-12-04 10:39:37 +03:00
create_trampoline ( __pa ( system_reset_fwnmi ) - PHYSICAL_START ) ;
create_trampoline ( __pa ( machine_check_fwnmi ) - PHYSICAL_START ) ;
2007-09-18 11:25:12 +04:00
# endif /* CONFIG_PPC_PSERIES */
2005-12-04 10:39:37 +03:00
2006-05-17 12:00:49 +04:00
DBG ( " <- setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
}
2011-12-15 02:57:15 +04:00
# endif /* CONFIG_NONSTATIC_KERNEL */
2005-12-04 10:39:43 +03:00
static int __init parse_savemaxmem ( char * p )
{
if ( p )
saved_max_pfn = ( memparse ( p , & p ) > > PAGE_SHIFT ) - 1 ;
2006-03-31 14:30:33 +04:00
return 1 ;
2005-12-04 10:39:43 +03:00
}
__setup ( " savemaxmem= " , parse_savemaxmem ) ;
2005-12-04 10:39:51 +03:00
2008-07-31 10:54:28 +04:00
static size_t copy_oldmem_vaddr ( void * vaddr , char * buf , size_t csize ,
unsigned long offset , int userbuf )
{
if ( userbuf ) {
if ( copy_to_user ( ( char __user * ) buf , ( vaddr + offset ) , csize ) )
return - EFAULT ;
} else
memcpy ( buf , ( vaddr + offset ) , csize ) ;
return csize ;
}
2006-08-02 05:13:50 +04:00
/**
2005-12-04 10:39:51 +03:00
* copy_oldmem_page - copy one page from " oldmem "
* @ pfn : page frame number to be copied
* @ buf : target memory address for the copy ; this can be in kernel address
* space or user address space ( see @ userbuf )
* @ csize : number of bytes to copy
* @ offset : offset in bytes into the page ( based on pfn ) to begin the copy
* @ userbuf : if set , @ buf is in user address space , use copy_to_user ( ) ,
* otherwise @ buf is in kernel address space , use memcpy ( ) .
*
* Copy a page from " oldmem " . For this page , there is no pte mapped
* in the current kernel . We stitch up a pte , similar to kmap_atomic .
*/
ssize_t copy_oldmem_page ( unsigned long pfn , char * buf ,
size_t csize , unsigned long offset , int userbuf )
{
void * vaddr ;
if ( ! csize )
return 0 ;
2010-07-21 15:14:54 +04:00
csize = min_t ( size_t , csize , PAGE_SIZE ) ;
2005-12-04 10:39:51 +03:00
2010-07-21 15:14:54 +04:00
if ( ( min_low_pfn < pfn ) & & ( pfn < max_pfn ) ) {
2008-07-31 10:54:28 +04:00
vaddr = __va ( pfn < < PAGE_SHIFT ) ;
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
} else {
vaddr = __ioremap ( pfn < < PAGE_SHIFT , PAGE_SIZE , 0 ) ;
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
iounmap ( vaddr ) ;
}
2005-12-04 10:39:51 +03:00
return csize ;
}
2010-08-24 18:23:44 +04:00
# ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region , so
* we have to be careful when shrinking the crashkernel region .
*/
void crash_free_reserved_phys_range ( unsigned long begin , unsigned long end )
{
unsigned long addr ;
const u32 * basep , * sizep ;
unsigned int rtas_start = 0 , rtas_end = 0 ;
basep = of_get_property ( rtas . dev , " linux,rtas-base " , NULL ) ;
sizep = of_get_property ( rtas . dev , " rtas-size " , NULL ) ;
if ( basep & & sizep ) {
rtas_start = * basep ;
rtas_end = * basep + * sizep ;
}
for ( addr = begin ; addr < end ; addr + = PAGE_SIZE ) {
/* Does this page overlap with the RTAS region? */
if ( addr < = rtas_end & & ( ( addr + PAGE_SIZE ) > rtas_start ) )
continue ;
ClearPageReserved ( pfn_to_page ( addr > > PAGE_SHIFT ) ) ;
init_page_count ( pfn_to_page ( addr > > PAGE_SHIFT ) ) ;
free_page ( ( unsigned long ) __va ( addr ) ) ;
totalram_pages + + ;
}
}
# endif