2005-12-04 18:39:37 +11:00
/*
* Routines for doing kexec - based kdump .
*
* Copyright ( C ) 2005 , IBM Corp .
*
* Created by : Michael Ellerman
*
* This source code is licensed under the GNU General Public License ,
* Version 2. See the file COPYING for more details .
*/
# undef DEBUG
2005-12-04 18:39:43 +11:00
# include <linux/crash_dump.h>
# include <linux/bootmem.h>
2014-09-24 15:57:12 +10:00
# include <linux/io.h>
2010-07-12 14:36:09 +10:00
# include <linux/memblock.h>
2008-06-24 11:32:21 +10:00
# include <asm/code-patching.h>
2005-12-04 18:39:37 +11:00
# include <asm/kdump.h>
2008-02-13 16:56:49 -08:00
# include <asm/prom.h>
2005-12-04 18:39:37 +11:00
# include <asm/firmware.h>
2005-12-04 18:39:51 +11:00
# include <asm/uaccess.h>
2010-08-24 14:23:44 +00:00
# include <asm/rtas.h>
2005-12-04 18:39:37 +11:00
# ifdef DEBUG
# include <asm/udbg.h>
# define DBG(fmt...) udbg_printf(fmt)
# else
# define DBG(fmt...)
# endif
2011-12-14 22:57:15 +00:00
# ifndef CONFIG_NONSTATIC_KERNEL
2007-08-15 20:53:26 +10:00
void __init reserve_kdump_trampoline ( void )
2006-05-17 18:00:49 +10:00
{
2010-07-12 14:36:09 +10:00
memblock_reserve ( 0 , KDUMP_RESERVE_LIMIT ) ;
2006-05-17 18:00:49 +10:00
}
2005-12-04 18:39:37 +11:00
static void __init create_trampoline ( unsigned long addr )
{
2008-06-24 11:32:22 +10:00
unsigned int * p = ( unsigned int * ) addr ;
2005-12-04 18:39:37 +11:00
/* The maximum range of a single instruction branch, is the current
* instruction ' s address + ( 32 MB - 4 ) bytes . For the trampoline we
* need to branch to current address + 32 MB . So we insert a nop at
* the trampoline address , then the next instruction ( + 4 bytes )
* does a branch to ( 32 MB - 4 ) . The net effect is that when we
* branch to " addr " we jump to ( " addr " + 32 MB ) . Although it requires
* two instructions it doesn ' t require any registers .
*/
2009-02-10 20:10:44 +00:00
patch_instruction ( p , PPC_INST_NOP ) ;
2008-06-24 11:32:22 +10:00
patch_branch ( + + p , addr + PHYSICAL_START , 0 ) ;
2005-12-04 18:39:37 +11:00
}
2006-05-17 18:00:49 +10:00
void __init setup_kdump_trampoline ( void )
2005-12-04 18:39:37 +11:00
{
unsigned long i ;
2006-05-17 18:00:49 +10:00
DBG ( " -> setup_kdump_trampoline() \n " ) ;
2005-12-04 18:39:37 +11:00
for ( i = KDUMP_TRAMPOLINE_START ; i < KDUMP_TRAMPOLINE_END ; i + = 8 ) {
create_trampoline ( i ) ;
}
2007-09-18 17:25:12 +10:00
# ifdef CONFIG_PPC_PSERIES
2005-12-04 18:39:37 +11:00
create_trampoline ( __pa ( system_reset_fwnmi ) - PHYSICAL_START ) ;
create_trampoline ( __pa ( machine_check_fwnmi ) - PHYSICAL_START ) ;
2007-09-18 17:25:12 +10:00
# endif /* CONFIG_PPC_PSERIES */
2005-12-04 18:39:37 +11:00
2006-05-17 18:00:49 +10:00
DBG ( " <- setup_kdump_trampoline() \n " ) ;
2005-12-04 18:39:37 +11:00
}
2011-12-14 22:57:15 +00:00
# endif /* CONFIG_NONSTATIC_KERNEL */
2005-12-04 18:39:43 +11:00
2008-07-31 16:54:28 +10:00
static size_t copy_oldmem_vaddr ( void * vaddr , char * buf , size_t csize ,
unsigned long offset , int userbuf )
{
if ( userbuf ) {
if ( copy_to_user ( ( char __user * ) buf , ( vaddr + offset ) , csize ) )
return - EFAULT ;
} else
memcpy ( buf , ( vaddr + offset ) , csize ) ;
return csize ;
}
2006-08-02 11:13:50 +10:00
/**
2005-12-04 18:39:51 +11:00
* copy_oldmem_page - copy one page from " oldmem "
* @ pfn : page frame number to be copied
* @ buf : target memory address for the copy ; this can be in kernel address
* space or user address space ( see @ userbuf )
* @ csize : number of bytes to copy
* @ offset : offset in bytes into the page ( based on pfn ) to begin the copy
* @ userbuf : if set , @ buf is in user address space , use copy_to_user ( ) ,
* otherwise @ buf is in kernel address space , use memcpy ( ) .
*
* Copy a page from " oldmem " . For this page , there is no pte mapped
* in the current kernel . We stitch up a pte , similar to kmap_atomic .
*/
ssize_t copy_oldmem_page ( unsigned long pfn , char * buf ,
size_t csize , unsigned long offset , int userbuf )
{
void * vaddr ;
2014-02-24 17:30:55 +01:00
phys_addr_t paddr ;
2005-12-04 18:39:51 +11:00
if ( ! csize )
return 0 ;
2010-07-21 11:14:54 +00:00
csize = min_t ( size_t , csize , PAGE_SIZE ) ;
2014-02-24 17:30:55 +01:00
paddr = pfn < < PAGE_SHIFT ;
2005-12-04 18:39:51 +11:00
2014-02-24 17:30:55 +01:00
if ( memblock_is_region_memory ( paddr , csize ) ) {
vaddr = __va ( paddr ) ;
2008-07-31 16:54:28 +10:00
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
} else {
2014-02-24 17:30:55 +01:00
vaddr = __ioremap ( paddr , PAGE_SIZE , 0 ) ;
2008-07-31 16:54:28 +10:00
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
iounmap ( vaddr ) ;
}
2005-12-04 18:39:51 +11:00
return csize ;
}
2010-08-24 14:23:44 +00:00
# ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region , so
* we have to be careful when shrinking the crashkernel region .
*/
void crash_free_reserved_phys_range ( unsigned long begin , unsigned long end )
{
unsigned long addr ;
2013-12-12 15:59:41 +11:00
const __be32 * basep , * sizep ;
2010-08-24 14:23:44 +00:00
unsigned int rtas_start = 0 , rtas_end = 0 ;
basep = of_get_property ( rtas . dev , " linux,rtas-base " , NULL ) ;
sizep = of_get_property ( rtas . dev , " rtas-size " , NULL ) ;
if ( basep & & sizep ) {
2013-12-12 15:59:41 +11:00
rtas_start = be32_to_cpup ( basep ) ;
rtas_end = rtas_start + be32_to_cpup ( sizep ) ;
2010-08-24 14:23:44 +00:00
}
for ( addr = begin ; addr < end ; addr + = PAGE_SIZE ) {
/* Does this page overlap with the RTAS region? */
if ( addr < = rtas_end & & ( ( addr + PAGE_SIZE ) > rtas_start ) )
continue ;
2013-04-29 15:06:47 -07:00
free_reserved_page ( pfn_to_page ( addr > > PAGE_SHIFT ) ) ;
2010-08-24 14:23:44 +00:00
}
}
# endif