2019-06-03 08:44:46 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2005-12-04 10:39:37 +03:00
/*
* Routines for doing kexec - based kdump .
*
* Copyright ( C ) 2005 , IBM Corp .
*
* Created by : Michael Ellerman
*/
# undef DEBUG
2005-12-04 10:39:43 +03:00
# include <linux/crash_dump.h>
2014-09-24 09:57:12 +04:00
# include <linux/io.h>
2010-07-12 08:36:09 +04:00
# include <linux/memblock.h>
2008-06-24 05:32:21 +04:00
# include <asm/code-patching.h>
2005-12-04 10:39:37 +03:00
# include <asm/kdump.h>
2008-02-14 03:56:49 +03:00
# include <asm/prom.h>
2005-12-04 10:39:37 +03:00
# include <asm/firmware.h>
2016-12-24 22:46:01 +03:00
# include <linux/uaccess.h>
2010-08-24 18:23:44 +04:00
# include <asm/rtas.h>
2020-05-06 06:40:26 +03:00
# include <asm/inst.h>
2005-12-04 10:39:37 +03:00
# ifdef DEBUG
# include <asm/udbg.h>
# define DBG(fmt...) udbg_printf(fmt)
# else
# define DBG(fmt...)
# endif
2011-12-15 02:57:15 +04:00
# ifndef CONFIG_NONSTATIC_KERNEL
2007-08-15 14:53:26 +04:00
void __init reserve_kdump_trampoline ( void )
2006-05-17 12:00:49 +04:00
{
2010-07-12 08:36:09 +04:00
memblock_reserve ( 0 , KDUMP_RESERVE_LIMIT ) ;
2006-05-17 12:00:49 +04:00
}
2005-12-04 10:39:37 +03:00
static void __init create_trampoline ( unsigned long addr )
{
2020-05-06 06:40:31 +03:00
struct ppc_inst * p = ( struct ppc_inst * ) addr ;
2008-06-24 05:32:22 +04:00
2005-12-04 10:39:37 +03:00
/* The maximum range of a single instruction branch, is the current
* instruction ' s address + ( 32 MB - 4 ) bytes . For the trampoline we
* need to branch to current address + 32 MB . So we insert a nop at
* the trampoline address , then the next instruction ( + 4 bytes )
* does a branch to ( 32 MB - 4 ) . The net effect is that when we
* branch to " addr " we jump to ( " addr " + 32 MB ) . Although it requires
* two instructions it doesn ' t require any registers .
*/
2020-05-06 06:40:26 +03:00
patch_instruction ( p , ppc_inst ( PPC_INST_NOP ) ) ;
2020-05-15 05:12:55 +03:00
patch_branch ( ( void * ) p + 4 , addr + PHYSICAL_START , 0 ) ;
2005-12-04 10:39:37 +03:00
}
2006-05-17 12:00:49 +04:00
void __init setup_kdump_trampoline ( void )
2005-12-04 10:39:37 +03:00
{
unsigned long i ;
2006-05-17 12:00:49 +04:00
DBG ( " -> setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
for ( i = KDUMP_TRAMPOLINE_START ; i < KDUMP_TRAMPOLINE_END ; i + = 8 ) {
create_trampoline ( i ) ;
}
2007-09-18 11:25:12 +04:00
# ifdef CONFIG_PPC_PSERIES
2005-12-04 10:39:37 +03:00
create_trampoline ( __pa ( system_reset_fwnmi ) - PHYSICAL_START ) ;
create_trampoline ( __pa ( machine_check_fwnmi ) - PHYSICAL_START ) ;
2007-09-18 11:25:12 +04:00
# endif /* CONFIG_PPC_PSERIES */
2005-12-04 10:39:37 +03:00
2006-05-17 12:00:49 +04:00
DBG ( " <- setup_kdump_trampoline() \n " ) ;
2005-12-04 10:39:37 +03:00
}
2011-12-15 02:57:15 +04:00
# endif /* CONFIG_NONSTATIC_KERNEL */
2005-12-04 10:39:43 +03:00
2008-07-31 10:54:28 +04:00
static size_t copy_oldmem_vaddr ( void * vaddr , char * buf , size_t csize ,
unsigned long offset , int userbuf )
{
if ( userbuf ) {
if ( copy_to_user ( ( char __user * ) buf , ( vaddr + offset ) , csize ) )
return - EFAULT ;
} else
memcpy ( buf , ( vaddr + offset ) , csize ) ;
return csize ;
}
2006-08-02 05:13:50 +04:00
/**
2005-12-04 10:39:51 +03:00
* copy_oldmem_page - copy one page from " oldmem "
* @ pfn : page frame number to be copied
* @ buf : target memory address for the copy ; this can be in kernel address
* space or user address space ( see @ userbuf )
* @ csize : number of bytes to copy
* @ offset : offset in bytes into the page ( based on pfn ) to begin the copy
* @ userbuf : if set , @ buf is in user address space , use copy_to_user ( ) ,
* otherwise @ buf is in kernel address space , use memcpy ( ) .
*
* Copy a page from " oldmem " . For this page , there is no pte mapped
* in the current kernel . We stitch up a pte , similar to kmap_atomic .
*/
ssize_t copy_oldmem_page ( unsigned long pfn , char * buf ,
size_t csize , unsigned long offset , int userbuf )
{
void * vaddr ;
2014-02-24 20:30:55 +04:00
phys_addr_t paddr ;
2005-12-04 10:39:51 +03:00
if ( ! csize )
return 0 ;
2010-07-21 15:14:54 +04:00
csize = min_t ( size_t , csize , PAGE_SIZE ) ;
2014-02-24 20:30:55 +04:00
paddr = pfn < < PAGE_SHIFT ;
2005-12-04 10:39:51 +03:00
2014-02-24 20:30:55 +04:00
if ( memblock_is_region_memory ( paddr , csize ) ) {
vaddr = __va ( paddr ) ;
2008-07-31 10:54:28 +04:00
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
} else {
2018-10-09 16:51:41 +03:00
vaddr = ioremap_cache ( paddr , PAGE_SIZE ) ;
2008-07-31 10:54:28 +04:00
csize = copy_oldmem_vaddr ( vaddr , buf , csize , offset , userbuf ) ;
iounmap ( vaddr ) ;
}
2005-12-04 10:39:51 +03:00
return csize ;
}
2010-08-24 18:23:44 +04:00
# ifdef CONFIG_PPC_RTAS
/*
* The crashkernel region will almost always overlap the RTAS region , so
* we have to be careful when shrinking the crashkernel region .
*/
void crash_free_reserved_phys_range ( unsigned long begin , unsigned long end )
{
unsigned long addr ;
2013-12-12 08:59:41 +04:00
const __be32 * basep , * sizep ;
2010-08-24 18:23:44 +04:00
unsigned int rtas_start = 0 , rtas_end = 0 ;
basep = of_get_property ( rtas . dev , " linux,rtas-base " , NULL ) ;
sizep = of_get_property ( rtas . dev , " rtas-size " , NULL ) ;
if ( basep & & sizep ) {
2013-12-12 08:59:41 +04:00
rtas_start = be32_to_cpup ( basep ) ;
rtas_end = rtas_start + be32_to_cpup ( sizep ) ;
2010-08-24 18:23:44 +04:00
}
for ( addr = begin ; addr < end ; addr + = PAGE_SIZE ) {
/* Does this page overlap with the RTAS region? */
if ( addr < = rtas_end & & ( ( addr + PAGE_SIZE ) > rtas_start ) )
continue ;
2013-04-30 02:06:47 +04:00
free_reserved_page ( pfn_to_page ( addr > > PAGE_SHIFT ) ) ;
2010-08-24 18:23:44 +04:00
}
}
# endif