2018-11-15 08:52:48 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* kexec_file for arm64
*
* Copyright ( C ) 2018 Linaro Limited
* Author : AKASHI Takahiro < takahiro . akashi @ linaro . org >
*
2018-11-15 08:52:49 +03:00
* Most code is derived from arm64 port of kexec - tools
2018-11-15 08:52:48 +03:00
*/
# define pr_fmt(fmt) "kexec_file: " fmt
2018-11-15 08:52:49 +03:00
# include <linux/ioport.h>
# include <linux/kernel.h>
2018-11-15 08:52:48 +03:00
# include <linux/kexec.h>
2018-11-15 08:52:49 +03:00
# include <linux/libfdt.h>
# include <linux/memblock.h>
2021-02-21 20:49:23 +03:00
# include <linux/of.h>
2018-11-15 08:52:49 +03:00
# include <linux/of_fdt.h>
2019-12-16 05:12:47 +03:00
# include <linux/slab.h>
2018-11-15 08:52:49 +03:00
# include <linux/string.h>
# include <linux/types.h>
2018-12-11 13:05:46 +03:00
# include <linux/vmalloc.h>
2018-11-15 08:52:48 +03:00
const struct kexec_file_ops * const kexec_file_loaders [ ] = {
2018-11-15 08:52:50 +03:00
& kexec_image_ops ,
2018-11-15 08:52:48 +03:00
NULL
} ;
2018-11-15 08:52:49 +03:00
int arch_kimage_file_post_load_cleanup ( struct kimage * image )
{
2021-02-21 20:49:23 +03:00
kvfree ( image - > arch . dtb ) ;
2018-11-15 08:52:49 +03:00
image - > arch . dtb = NULL ;
2021-02-21 20:49:19 +03:00
vfree ( image - > elf_headers ) ;
image - > elf_headers = NULL ;
image - > elf_headers_sz = 0 ;
2019-12-16 05:12:47 +03:00
2018-11-15 08:52:49 +03:00
return kexec_image_post_load_cleanup_default ( image ) ;
}
2019-12-16 05:12:47 +03:00
static int prepare_elf_headers ( void * * addr , unsigned long * sz )
{
struct crash_mem * cmem ;
unsigned int nr_ranges ;
int ret ;
u64 i ;
phys_addr_t start , end ;
nr_ranges = 1 ; /* for exclusion of crashkernel region */
2020-10-14 02:57:59 +03:00
for_each_mem_range ( i , & start , & end )
2019-12-16 05:12:47 +03:00
nr_ranges + + ;
2020-06-18 00:34:07 +03:00
cmem = kmalloc ( struct_size ( cmem , ranges , nr_ranges ) , GFP_KERNEL ) ;
2019-12-16 05:12:47 +03:00
if ( ! cmem )
return - ENOMEM ;
cmem - > max_nr_ranges = nr_ranges ;
cmem - > nr_ranges = 0 ;
2020-10-14 02:57:59 +03:00
for_each_mem_range ( i , & start , & end ) {
2019-12-16 05:12:47 +03:00
cmem - > ranges [ cmem - > nr_ranges ] . start = start ;
cmem - > ranges [ cmem - > nr_ranges ] . end = end - 1 ;
cmem - > nr_ranges + + ;
}
/* Exclude crashkernel region */
ret = crash_exclude_mem_range ( cmem , crashk_res . start , crashk_res . end ) ;
if ( ! ret )
ret = crash_prepare_elf64_headers ( cmem , true , addr , sz ) ;
kfree ( cmem ) ;
return ret ;
}
2020-11-03 23:11:06 +03:00
/*
* Tries to add the initrd and DTB to the image . If it is not possible to find
* valid locations , this function will undo changes to the image and return non
* zero .
*/
2018-11-15 08:52:49 +03:00
int load_other_segments ( struct kimage * image ,
unsigned long kernel_load_addr ,
unsigned long kernel_size ,
char * initrd , unsigned long initrd_len ,
char * cmdline )
{
struct kexec_buf kbuf ;
2019-12-16 05:12:47 +03:00
void * headers , * dtb = NULL ;
2020-11-03 23:11:06 +03:00
unsigned long headers_sz , initrd_load_addr = 0 , dtb_len ,
orig_segments = image - > nr_segments ;
2018-11-15 08:52:49 +03:00
int ret = 0 ;
kbuf . image = image ;
/* not allocate anything below the kernel */
kbuf . buf_min = kernel_load_addr + kernel_size ;
2019-12-16 05:12:47 +03:00
/* load elf core header */
if ( image - > type = = KEXEC_TYPE_CRASH ) {
ret = prepare_elf_headers ( & headers , & headers_sz ) ;
if ( ret ) {
pr_err ( " Preparing elf core header failed \n " ) ;
goto out_err ;
}
kbuf . buffer = headers ;
kbuf . bufsz = headers_sz ;
kbuf . mem = KEXEC_BUF_MEM_UNKNOWN ;
kbuf . memsz = headers_sz ;
kbuf . buf_align = SZ_64K ; /* largest supported page size */
kbuf . buf_max = ULONG_MAX ;
kbuf . top_down = true ;
ret = kexec_add_buffer ( & kbuf ) ;
if ( ret ) {
vfree ( headers ) ;
goto out_err ;
}
2021-02-21 20:49:19 +03:00
image - > elf_headers = headers ;
image - > elf_load_addr = kbuf . mem ;
image - > elf_headers_sz = headers_sz ;
2019-12-16 05:12:47 +03:00
pr_debug ( " Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx \n " ,
2021-02-21 20:49:19 +03:00
image - > elf_load_addr , kbuf . bufsz , kbuf . memsz ) ;
2019-12-16 05:12:47 +03:00
}
2018-11-15 08:52:49 +03:00
/* load initrd */
if ( initrd ) {
kbuf . buffer = initrd ;
kbuf . bufsz = initrd_len ;
2019-07-11 14:57:32 +03:00
kbuf . mem = KEXEC_BUF_MEM_UNKNOWN ;
2018-11-15 08:52:49 +03:00
kbuf . memsz = initrd_len ;
kbuf . buf_align = 0 ;
/* within 1GB-aligned window of up to 32GB in size */
kbuf . buf_max = round_down ( kernel_load_addr , SZ_1G )
+ ( unsigned long ) SZ_1G * 32 ;
kbuf . top_down = false ;
ret = kexec_add_buffer ( & kbuf ) ;
if ( ret )
goto out_err ;
initrd_load_addr = kbuf . mem ;
pr_debug ( " Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx \n " ,
2020-04-30 19:31:41 +03:00
initrd_load_addr , kbuf . bufsz , kbuf . memsz ) ;
2018-11-15 08:52:49 +03:00
}
/* load dtb */
2021-02-21 20:49:23 +03:00
dtb = of_kexec_alloc_and_setup_fdt ( image , initrd_load_addr ,
initrd_len , cmdline , 0 ) ;
if ( ! dtb ) {
2018-11-15 08:52:49 +03:00
pr_err ( " Preparing for new dtb failed \n " ) ;
2021-12-10 04:01:21 +03:00
ret = - EINVAL ;
2018-11-15 08:52:49 +03:00
goto out_err ;
}
2021-02-21 20:49:23 +03:00
/* trim it */
fdt_pack ( dtb ) ;
2018-11-15 08:52:49 +03:00
dtb_len = fdt_totalsize ( dtb ) ;
kbuf . buffer = dtb ;
kbuf . bufsz = dtb_len ;
2019-07-11 14:57:32 +03:00
kbuf . mem = KEXEC_BUF_MEM_UNKNOWN ;
2018-11-15 08:52:49 +03:00
kbuf . memsz = dtb_len ;
/* not across 2MB boundary */
kbuf . buf_align = SZ_2M ;
kbuf . buf_max = ULONG_MAX ;
kbuf . top_down = true ;
ret = kexec_add_buffer ( & kbuf ) ;
if ( ret )
goto out_err ;
image - > arch . dtb = dtb ;
image - > arch . dtb_mem = kbuf . mem ;
pr_debug ( " Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx \n " ,
2020-04-30 19:31:41 +03:00
kbuf . mem , kbuf . bufsz , kbuf . memsz ) ;
2018-11-15 08:52:49 +03:00
return 0 ;
out_err :
2020-11-03 23:11:06 +03:00
image - > nr_segments = orig_segments ;
2021-02-21 20:49:23 +03:00
kvfree ( dtb ) ;
2018-11-15 08:52:49 +03:00
return ret ;
}