2018-07-19 13:11:28 +02:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/string.h>
2019-02-03 21:35:45 +01:00
# include <linux/elf.h>
2019-02-03 21:37:20 +01:00
# include <asm/sections.h>
2018-04-11 11:56:55 +02:00
# include <asm/setup.h>
2019-02-03 21:35:45 +01:00
# include <asm/kexec.h>
2018-07-25 15:01:11 +02:00
# include <asm/sclp.h>
2019-02-03 21:37:20 +01:00
# include <asm/diag.h>
2019-04-01 19:11:03 +02:00
# include <asm/uv.h>
2018-07-19 13:11:28 +02:00
# include "compressed/decompressor.h"
# include "boot.h"
2018-04-10 14:14:02 +02:00
extern char __boot_data_start [ ] , __boot_data_end [ ] ;
2019-04-01 19:10:45 +02:00
extern char __boot_data_preserved_start [ ] , __boot_data_preserved_end [ ] ;
2019-02-03 21:37:20 +01:00
unsigned long __bootdata_preserved ( __kaslr_offset ) ;
2018-04-10 14:14:02 +02:00
2019-02-03 21:37:20 +01:00
/*
* Some code and data needs to stay below 2 GB , even when the kernel would be
* relocated above 2 GB , because it has to use 31 bit addresses .
* Such code and data is part of the . dma section , and its location is passed
* over to the decompressed / relocated kernel via the . boot . preserved . data
* section .
*/
extern char _sdma [ ] , _edma [ ] ;
extern char _stext_dma [ ] , _etext_dma [ ] ;
extern struct exception_table_entry _start_dma_ex_table [ ] ;
extern struct exception_table_entry _stop_dma_ex_table [ ] ;
unsigned long __bootdata_preserved ( __sdma ) = __pa ( & _sdma ) ;
unsigned long __bootdata_preserved ( __edma ) = __pa ( & _edma ) ;
unsigned long __bootdata_preserved ( __stext_dma ) = __pa ( & _stext_dma ) ;
unsigned long __bootdata_preserved ( __etext_dma ) = __pa ( & _etext_dma ) ;
struct exception_table_entry *
__bootdata_preserved ( __start_dma_ex_table ) = _start_dma_ex_table ;
struct exception_table_entry *
__bootdata_preserved ( __stop_dma_ex_table ) = _stop_dma_ex_table ;
int _diag210_dma ( struct diag210 * addr ) ;
int _diag26c_dma ( void * req , void * resp , enum diag26c_sc subcode ) ;
int _diag14_dma ( unsigned long rx , unsigned long ry1 , unsigned long subcode ) ;
void _diag0c_dma ( struct hypfs_diag0c_entry * entry ) ;
void _diag308_reset_dma ( void ) ;
struct diag_ops __bootdata_preserved ( diag_dma_ops ) = {
. diag210 = _diag210_dma ,
. diag26c = _diag26c_dma ,
. diag14 = _diag14_dma ,
. diag0c = _diag0c_dma ,
. diag308_reset = _diag308_reset_dma
} ;
2019-08-12 14:50:34 -07:00
static struct diag210 _diag210_tmp_dma __section ( . dma . data ) ;
2019-02-03 21:37:20 +01:00
struct diag210 * __bootdata_preserved ( __diag210_tmp_dma ) = & _diag210_tmp_dma ;
void _swsusp_reset_dma ( void ) ;
unsigned long __bootdata_preserved ( __swsusp_reset_dma ) = __pa ( _swsusp_reset_dma ) ;
2018-07-25 15:01:11 +02:00
void error ( char * x )
{
sclp_early_printk ( " \n \n " ) ;
sclp_early_printk ( x ) ;
sclp_early_printk ( " \n \n -- System halted " ) ;
2019-04-30 12:33:45 +02:00
disabled_wait ( ) ;
2018-07-25 15:01:11 +02:00
}
2018-04-11 11:56:55 +02:00
# ifdef CONFIG_KERNEL_UNCOMPRESSED
unsigned long mem_safe_offset ( void )
{
return vmlinux . default_lma + vmlinux . image_size + vmlinux . bss_size ;
}
# endif
2019-02-21 14:23:04 +01:00
static void rescue_initrd ( unsigned long addr )
2018-04-11 11:56:55 +02:00
{
if ( ! IS_ENABLED ( CONFIG_BLK_DEV_INITRD ) )
return ;
if ( ! INITRD_START | | ! INITRD_SIZE )
return ;
2019-02-21 14:23:04 +01:00
if ( addr < = INITRD_START )
2018-04-11 11:56:55 +02:00
return ;
2019-02-21 14:23:04 +01:00
memmove ( ( void * ) addr , ( void * ) INITRD_START , INITRD_SIZE ) ;
INITRD_START = addr ;
2018-04-11 11:56:55 +02:00
}
2018-04-10 14:14:02 +02:00
static void copy_bootdata ( void )
{
if ( __boot_data_end - __boot_data_start ! = vmlinux . bootdata_size )
error ( " .boot.data section size mismatch " ) ;
memcpy ( ( void * ) vmlinux . bootdata_off , __boot_data_start , vmlinux . bootdata_size ) ;
2019-04-01 19:10:45 +02:00
if ( __boot_data_preserved_end - __boot_data_preserved_start ! = vmlinux . bootdata_preserved_size )
error ( " .boot.preserved.data section size mismatch " ) ;
memcpy ( ( void * ) vmlinux . bootdata_preserved_off , __boot_data_preserved_start , vmlinux . bootdata_preserved_size ) ;
2018-04-10 14:14:02 +02:00
}
2019-02-03 21:35:45 +01:00
static void handle_relocs ( unsigned long offset )
{
Elf64_Rela * rela_start , * rela_end , * rela ;
int r_type , r_sym , rc ;
Elf64_Addr loc , val ;
Elf64_Sym * dynsym ;
rela_start = ( Elf64_Rela * ) vmlinux . rela_dyn_start ;
rela_end = ( Elf64_Rela * ) vmlinux . rela_dyn_end ;
dynsym = ( Elf64_Sym * ) vmlinux . dynsym_start ;
for ( rela = rela_start ; rela < rela_end ; rela + + ) {
loc = rela - > r_offset + offset ;
2019-10-21 19:56:00 +02:00
val = rela - > r_addend ;
2019-02-03 21:35:45 +01:00
r_sym = ELF64_R_SYM ( rela - > r_info ) ;
2019-10-21 19:56:00 +02:00
if ( r_sym ) {
if ( dynsym [ r_sym ] . st_shndx ! = SHN_UNDEF )
val + = dynsym [ r_sym ] . st_value + offset ;
} else {
/*
* 0 = = undefined symbol table index ( STN_UNDEF ) ,
* used for R_390_RELATIVE , only add KASLR offset
*/
val + = offset ;
}
2019-02-03 21:35:45 +01:00
r_type = ELF64_R_TYPE ( rela - > r_info ) ;
rc = arch_kexec_do_relocs ( r_type , ( void * ) loc , val , 0 ) ;
if ( rc )
error ( " Unknown relocation type " ) ;
}
}
2019-08-11 20:55:18 +02:00
static void clear_bss_section ( void )
{
memset ( ( void * ) vmlinux . default_lma + vmlinux . image_size , 0 , vmlinux . bss_size ) ;
}
2018-07-19 13:11:28 +02:00
void startup_kernel ( void )
{
2019-02-03 21:37:20 +01:00
unsigned long random_lma ;
2019-02-21 14:23:04 +01:00
unsigned long safe_addr ;
2018-07-19 16:51:25 +02:00
void * img ;
2018-07-19 13:11:28 +02:00
2019-02-21 14:23:04 +01:00
store_ipl_parmblock ( ) ;
safe_addr = mem_safe_offset ( ) ;
safe_addr = read_ipl_report ( safe_addr ) ;
2019-04-01 19:11:03 +02:00
uv_query_info ( ) ;
2019-02-21 14:23:04 +01:00
rescue_initrd ( safe_addr ) ;
2018-05-23 11:07:13 +02:00
sclp_early_read_info ( ) ;
2018-05-15 13:28:53 +02:00
setup_boot_command_line ( ) ;
2019-02-27 16:52:42 +01:00
parse_boot_command_line ( ) ;
2018-05-15 13:28:53 +02:00
setup_memory_end ( ) ;
2018-04-11 11:56:55 +02:00
detect_memory ( ) ;
2019-02-03 21:37:20 +01:00
random_lma = __kaslr_offset = 0 ;
if ( IS_ENABLED ( CONFIG_RANDOMIZE_BASE ) & & kaslr_enabled ) {
random_lma = get_random_base ( safe_addr ) ;
if ( random_lma ) {
__kaslr_offset = random_lma - vmlinux . default_lma ;
img = ( void * ) vmlinux . default_lma ;
vmlinux . default_lma + = __kaslr_offset ;
vmlinux . entry + = __kaslr_offset ;
vmlinux . bootdata_off + = __kaslr_offset ;
vmlinux . bootdata_preserved_off + = __kaslr_offset ;
vmlinux . rela_dyn_start + = __kaslr_offset ;
vmlinux . rela_dyn_end + = __kaslr_offset ;
vmlinux . dynsym_start + = __kaslr_offset ;
}
}
2018-07-19 13:11:28 +02:00
if ( ! IS_ENABLED ( CONFIG_KERNEL_UNCOMPRESSED ) ) {
2018-07-19 16:51:25 +02:00
img = decompress_kernel ( ) ;
memmove ( ( void * ) vmlinux . default_lma , img , vmlinux . image_size ) ;
2019-02-03 21:37:20 +01:00
} else if ( __kaslr_offset )
memcpy ( ( void * ) vmlinux . default_lma , img , vmlinux . image_size ) ;
2019-08-11 20:55:18 +02:00
clear_bss_section ( ) ;
2018-04-10 14:14:02 +02:00
copy_bootdata ( ) ;
2019-02-03 21:35:45 +01:00
if ( IS_ENABLED ( CONFIG_RELOCATABLE ) )
2019-02-03 21:37:20 +01:00
handle_relocs ( __kaslr_offset ) ;
if ( __kaslr_offset ) {
s390/kaslr: store KASLR offset for early dumps
The KASLR offset is added to vmcoreinfo in arch_crash_save_vmcoreinfo(),
so that it can be found by crash when processing kernel dumps.
However, arch_crash_save_vmcoreinfo() is called during a subsys_initcall,
so if the kernel crashes before that, we have no vmcoreinfo and no KASLR
offset.
Fix this by storing the KASLR offset in the lowcore, where the vmcore_info
pointer will be stored, and where it can be found by crash. In order to
make it distinguishable from a real vmcore_info pointer, mark it as uneven
(KASLR offset itself is aligned to THREAD_SIZE).
When arch_crash_save_vmcoreinfo() stores the real vmcore_info pointer in
the lowcore, it overwrites the KASLR offset. At that point, the KASLR
offset is not yet added to vmcoreinfo, so we also need to move the
mem_assign_absolute() behind the vmcoreinfo_append_str().
Fixes: b2d24b97b2a9 ("s390/kernel: add support for kernel address space layout randomization (KASLR)")
Cc: <stable@vger.kernel.org> # v5.2+
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
2019-11-19 12:30:53 +01:00
/*
* Save KASLR offset for early dumps , before vmcore_info is set .
* Mark as uneven to distinguish from real vmcore_info pointer .
*/
S390_lowcore . vmcore_info = __kaslr_offset | 0x1UL ;
2019-02-03 21:37:20 +01:00
/* Clear non-relocated kernel */
if ( IS_ENABLED ( CONFIG_KERNEL_UNCOMPRESSED ) )
memset ( img , 0 , vmlinux . image_size ) ;
}
2018-07-19 16:51:25 +02:00
vmlinux . entry ( ) ;
2018-07-19 13:11:28 +02:00
}