2019-05-23 11:14:57 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2017-07-10 18:00:26 -07:00
/*
* Copyright ( C ) 2009 Sunplus Core Technology Co . , Ltd .
* Chen Liqin < liqin . chen @ sunplusct . com >
* Lennox Wu < lennox . wu @ sunplusct . com >
* Copyright ( C ) 2012 Regents of the University of California
2020-10-12 17:24:10 +03:00
* Copyright ( C ) 2020 FORTH - ICS / CARV
* Nick Kossifidis < mick @ ics . forth . gr >
2017-07-10 18:00:26 -07:00
*/
# include <linux/init.h>
# include <linux/mm.h>
# include <linux/memblock.h>
# include <linux/sched.h>
# include <linux/console.h>
# include <linux/screen_info.h>
# include <linux/of_fdt.h>
# include <linux/of_platform.h>
# include <linux/sched/task.h>
2020-03-17 18:11:44 -07:00
# include <linux/smp.h>
2020-09-17 15:37:15 -07:00
# include <linux/efi.h>
2021-04-19 03:55:38 +03:00
# include <linux/crash_dump.h>
2017-07-10 18:00:26 -07:00
2020-03-17 18:11:44 -07:00
# include <asm/cpu_ops.h>
2020-09-17 15:37:11 -07:00
# include <asm/early_ioremap.h>
2021-04-13 02:35:14 -04:00
# include <asm/pgtable.h>
2017-07-10 18:00:26 -07:00
# include <asm/setup.h>
2020-11-04 16:04:38 -08:00
# include <asm/set_memory.h>
2017-07-10 18:00:26 -07:00
# include <asm/sections.h>
2020-03-17 18:11:35 -07:00
# include <asm/sbi.h>
2017-07-10 18:00:26 -07:00
# include <asm/tlbflush.h>
# include <asm/thread_info.h>
2020-01-06 10:38:32 -08:00
# include <asm/kasan.h>
2020-09-17 15:37:15 -07:00
# include <asm/efi.h>
2017-07-10 18:00:26 -07:00
2019-10-17 15:00:17 -07:00
# include "head.h"
2020-09-17 15:37:15 -07:00
# if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
2020-10-21 19:36:07 -07:00
struct screen_info screen_info __section ( " .data " ) = {
2017-07-10 18:00:26 -07:00
. orig_video_lines = 30 ,
. orig_video_cols = 80 ,
. orig_video_mode = 0 ,
. orig_video_ega_bx = 0 ,
. orig_video_isVGA = 1 ,
. orig_video_points = 8
} ;
# endif
2020-02-04 19:19:47 +08:00
/*
* The lucky hart to first increment this variable will boot the other cores .
* This is used before the kernel initializes the BSS so it can ' t be in the
* BSS .
*/
2021-04-13 02:35:14 -04:00
atomic_t hart_lottery __section ( " .sdata " )
# ifdef CONFIG_XIP_KERNEL
= ATOMIC_INIT ( 0xC001BEEF )
# endif
;
2018-10-02 12:15:05 -07:00
unsigned long boot_cpu_hartid ;
2020-03-17 18:11:44 -07:00
static DEFINE_PER_CPU ( struct cpu , cpu_devices ) ;
2017-07-10 18:00:26 -07:00
2020-10-12 17:24:10 +03:00
/*
* Place kernel memory regions on the resource tree so that
* kexec - tools can retrieve them from / proc / iomem . While there
* also add " System RAM " regions for compatibility with other
* archs , and the rest of the known regions for completeness .
*/
2021-04-19 03:55:37 +03:00
static struct resource kimage_res = { . name = " Kernel image " , } ;
2020-10-12 17:24:10 +03:00
static struct resource code_res = { . name = " Kernel code " , } ;
static struct resource data_res = { . name = " Kernel data " , } ;
static struct resource rodata_res = { . name = " Kernel rodata " , } ;
static struct resource bss_res = { . name = " Kernel bss " , } ;
2021-04-19 03:55:39 +03:00
# ifdef CONFIG_CRASH_DUMP
static struct resource elfcorehdr_res = { . name = " ELF Core hdr " , } ;
# endif
2020-10-12 17:24:10 +03:00
static int __init add_resource ( struct resource * parent ,
struct resource * res )
{
int ret = 0 ;
ret = insert_resource ( parent , res ) ;
if ( ret < 0 ) {
pr_err ( " Failed to add a %s resource at %llx \n " ,
res - > name , ( unsigned long long ) res - > start ) ;
return ret ;
}
return 1 ;
}
2021-04-19 03:55:37 +03:00
static int __init add_kernel_resources ( void )
2020-10-12 17:24:10 +03:00
{
int ret = 0 ;
/*
* The memory region of the kernel image is continuous and
2021-04-19 03:55:37 +03:00
* was reserved on setup_bootmem , register it here as a
* resource , with the various segments of the image as
* child nodes .
2020-10-12 17:24:10 +03:00
*/
2021-04-19 03:55:37 +03:00
code_res . start = __pa_symbol ( _text ) ;
code_res . end = __pa_symbol ( _etext ) - 1 ;
code_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
2020-10-12 17:24:10 +03:00
2021-04-19 03:55:37 +03:00
rodata_res . start = __pa_symbol ( __start_rodata ) ;
rodata_res . end = __pa_symbol ( __end_rodata ) - 1 ;
rodata_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
2020-10-12 17:24:10 +03:00
2021-04-19 03:55:37 +03:00
data_res . start = __pa_symbol ( _data ) ;
data_res . end = __pa_symbol ( _edata ) - 1 ;
data_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
bss_res . start = __pa_symbol ( __bss_start ) ;
bss_res . end = __pa_symbol ( __bss_stop ) - 1 ;
bss_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
kimage_res . start = code_res . start ;
kimage_res . end = bss_res . end ;
kimage_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
ret = add_resource ( & iomem_resource , & kimage_res ) ;
2020-10-12 17:24:10 +03:00
if ( ret < 0 )
return ret ;
2021-04-19 03:55:37 +03:00
ret = add_resource ( & kimage_res , & code_res ) ;
2020-10-12 17:24:10 +03:00
if ( ret < 0 )
return ret ;
2021-04-19 03:55:37 +03:00
ret = add_resource ( & kimage_res , & rodata_res ) ;
2020-10-12 17:24:10 +03:00
if ( ret < 0 )
return ret ;
2021-04-19 03:55:37 +03:00
ret = add_resource ( & kimage_res , & data_res ) ;
2020-10-12 17:24:10 +03:00
if ( ret < 0 )
return ret ;
2021-04-19 03:55:37 +03:00
ret = add_resource ( & kimage_res , & bss_res ) ;
2020-10-12 17:24:10 +03:00
return ret ;
}
static void __init init_resources ( void )
{
struct memblock_region * region = NULL ;
struct resource * res = NULL ;
2021-01-11 15:45:01 -08:00
struct resource * mem_res = NULL ;
size_t mem_res_sz = 0 ;
2021-04-19 03:55:37 +03:00
int num_resources = 0 , res_idx = 0 ;
int ret = 0 ;
2020-10-12 17:24:10 +03:00
2021-04-19 03:55:37 +03:00
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
num_resources = memblock . memory . cnt + memblock . reserved . cnt + 1 ;
res_idx = num_resources - 1 ;
2020-10-12 17:24:10 +03:00
2021-04-19 03:55:37 +03:00
mem_res_sz = num_resources * sizeof ( * mem_res ) ;
2021-01-11 15:45:01 -08:00
mem_res = memblock_alloc ( mem_res_sz , SMP_CACHE_BYTES ) ;
if ( ! mem_res )
panic ( " %s: Failed to allocate %zu bytes \n " , __func__ , mem_res_sz ) ;
2021-04-19 03:55:37 +03:00
2020-10-12 17:24:10 +03:00
/*
* Start by adding the reserved regions , if they overlap
* with / memory regions , insert_resource later on will take
* care of it .
*/
2021-04-19 03:55:37 +03:00
ret = add_kernel_resources ( ) ;
if ( ret < 0 )
goto error ;
2021-04-19 03:55:38 +03:00
# ifdef CONFIG_KEXEC_CORE
if ( crashk_res . start ! = crashk_res . end ) {
ret = add_resource ( & iomem_resource , & crashk_res ) ;
if ( ret < 0 )
goto error ;
}
# endif
2021-04-19 03:55:39 +03:00
# ifdef CONFIG_CRASH_DUMP
if ( elfcorehdr_size > 0 ) {
elfcorehdr_res . start = elfcorehdr_addr ;
elfcorehdr_res . end = elfcorehdr_addr + elfcorehdr_size - 1 ;
elfcorehdr_res . flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
add_resource ( & iomem_resource , & elfcorehdr_res ) ;
}
# endif
2020-10-12 17:24:10 +03:00
for_each_reserved_mem_region ( region ) {
2021-04-19 03:55:37 +03:00
res = & mem_res [ res_idx - - ] ;
2020-10-12 17:24:10 +03:00
res - > name = " Reserved " ;
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
res - > start = __pfn_to_phys ( memblock_region_reserved_base_pfn ( region ) ) ;
res - > end = __pfn_to_phys ( memblock_region_reserved_end_pfn ( region ) ) - 1 ;
/*
* Ignore any other reserved regions within
* system memory .
*/
2021-01-11 15:45:01 -08:00
if ( memblock_is_memory ( res - > start ) ) {
2021-04-19 03:55:37 +03:00
/* Re-use this pre-allocated resource */
res_idx + + ;
2020-10-12 17:24:10 +03:00
continue ;
2021-01-11 15:45:01 -08:00
}
2020-10-12 17:24:10 +03:00
ret = add_resource ( & iomem_resource , res ) ;
if ( ret < 0 )
goto error ;
}
/* Add /memory regions to the resource tree */
for_each_mem_region ( region ) {
2021-04-19 03:55:37 +03:00
res = & mem_res [ res_idx - - ] ;
2020-10-12 17:24:10 +03:00
if ( unlikely ( memblock_is_nomap ( region ) ) ) {
res - > name = " Reserved " ;
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
} else {
res - > name = " System RAM " ;
res - > flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
}
res - > start = __pfn_to_phys ( memblock_region_memory_base_pfn ( region ) ) ;
res - > end = __pfn_to_phys ( memblock_region_memory_end_pfn ( region ) ) - 1 ;
ret = add_resource ( & iomem_resource , res ) ;
if ( ret < 0 )
goto error ;
}
2021-04-19 03:55:37 +03:00
/* Clean-up any unused pre-allocated resources */
2021-08-07 19:54:50 +02:00
if ( res_idx > = 0 )
2021-11-05 13:43:22 -07:00
memblock_free ( mem_res , ( res_idx + 1 ) * sizeof ( * mem_res ) ) ;
2020-10-12 17:24:10 +03:00
return ;
error :
/* Better an empty resource tree than an inconsistent one */
release_child_resources ( & iomem_resource ) ;
2021-11-05 13:43:22 -07:00
memblock_free ( mem_res , mem_res_sz ) ;
2020-10-12 17:24:10 +03:00
}
2020-09-17 15:37:10 -07:00
static void __init parse_dtb ( void )
2017-07-10 18:00:26 -07:00
{
2020-09-17 15:37:10 -07:00
/* Early scan of device tree from init memory */
2020-11-25 19:44:15 +08:00
if ( early_init_dt_scan ( dtb_early_va ) ) {
const char * name = of_flat_dt_get_machine_name ( ) ;
if ( name ) {
pr_info ( " Machine model: %s \n " , name ) ;
dump_stack_set_arch_desc ( " %s (DT) " , name ) ;
}
2018-12-17 19:15:12 -08:00
return ;
2020-11-25 19:44:15 +08:00
}
2018-12-17 19:15:12 -08:00
pr_err ( " No DTB passed to the kernel \n " ) ;
# ifdef CONFIG_CMDLINE_FORCE
2021-08-07 15:14:27 +08:00
strscpy ( boot_command_line , CONFIG_CMDLINE , COMMAND_LINE_SIZE ) ;
2018-12-17 19:15:12 -08:00
pr_info ( " Forcing kernel command line to: %s \n " , boot_command_line ) ;
# endif
2017-07-10 18:00:26 -07:00
}
void __init setup_arch ( char * * cmdline_p )
{
2020-09-17 15:37:10 -07:00
parse_dtb ( ) ;
2021-07-07 18:08:54 -07:00
setup_initial_init_mm ( _stext , _etext , _edata , _end ) ;
2017-07-10 18:00:26 -07:00
2019-01-07 19:19:14 +05:30
* cmdline_p = boot_command_line ;
2020-09-17 15:37:11 -07:00
early_ioremap_setup ( ) ;
2020-11-06 13:23:59 +05:30
jump_label_init ( ) ;
2019-01-07 19:19:14 +05:30
parse_early_param ( ) ;
2020-09-17 15:37:15 -07:00
efi_init ( ) ;
2017-07-10 18:00:26 -07:00
paging_init ( ) ;
2020-04-14 13:43:24 +09:00
# if IS_ENABLED(CONFIG_BUILTIN_DTB)
unflatten_and_copy_device_tree ( ) ;
# else
2021-04-13 02:35:14 -04:00
if ( early_init_dt_verify ( __va ( XIP_FIXUP ( dtb_early_pa ) ) ) )
2020-09-17 15:37:10 -07:00
unflatten_device_tree ( ) ;
else
pr_err ( " No DTB found in kernel mappings \n " ) ;
2020-04-14 13:43:24 +09:00
# endif
2020-11-18 16:38:27 -08:00
misc_mem_init ( ) ;
2018-10-02 16:52:28 +08:00
2021-04-19 03:55:38 +03:00
init_resources ( ) ;
2020-11-26 10:40:38 +08:00
sbi_init ( ) ;
2020-11-04 16:04:36 -08:00
2020-01-06 10:38:32 -08:00
# ifdef CONFIG_KASAN
kasan_init ( ) ;
# endif
2017-07-10 18:00:26 -07:00
# ifdef CONFIG_SMP
setup_smp ( ) ;
# endif
riscv_fill_hwcap ( ) ;
}
2020-03-17 18:11:44 -07:00
static int __init topology_init ( void )
{
2020-11-18 16:38:29 -08:00
int i , ret ;
for_each_online_node ( i )
register_one_node ( i ) ;
2020-03-17 18:11:44 -07:00
for_each_possible_cpu ( i ) {
struct cpu * cpu = & per_cpu ( cpu_devices , i ) ;
cpu - > hotpluggable = cpu_has_hotplug ( i ) ;
2020-11-18 16:38:29 -08:00
ret = register_cpu ( cpu , i ) ;
if ( unlikely ( ret ) )
pr_warn ( " Warning: %s: register_cpu %d failed (%d) \n " ,
__func__ , i , ret ) ;
2020-03-17 18:11:44 -07:00
}
return 0 ;
}
subsys_initcall ( topology_init ) ;
2020-11-04 16:04:38 -08:00
void free_initmem ( void )
{
2021-01-29 11:00:36 -08:00
if ( IS_ENABLED ( CONFIG_STRICT_KERNEL_RWX ) )
2021-06-24 14:00:41 +02:00
set_kernel_memory ( lm_alias ( __init_begin ) , lm_alias ( __init_end ) ,
IS_ENABLED ( CONFIG_64BIT ) ?
set_memory_rw : set_memory_rw_nx ) ;
2021-01-29 11:00:36 -08:00
2020-11-04 16:04:38 -08:00
free_initmem_default ( POISON_FREE_INITMEM ) ;
}