2005-04-16 15:20:36 -07:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1995 Linus Torvalds
* Copyright ( C ) 1995 Waldorf Electronics
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2000 , 01 , 02 , 03 Ralf Baechle
* Copyright ( C ) 1996 Stoned Elipot
* Copyright ( C ) 1999 Silicon Graphics , Inc .
2013-01-22 12:59:30 +01:00
* Copyright ( C ) 2000 , 2001 , 2002 , 2007 Maciej W . Rozycki
2005-04-16 15:20:36 -07:00
*/
# include <linux/init.h>
# include <linux/ioport.h>
2011-07-23 16:30:40 -04:00
# include <linux/export.h>
2006-07-10 04:44:13 -07:00
# include <linux/screen_info.h>
2011-12-08 10:22:09 -08:00
# include <linux/memblock.h>
2005-04-16 15:20:36 -07:00
# include <linux/bootmem.h>
# include <linux/initrd.h>
# include <linux/root_dev.h>
# include <linux/highmem.h>
# include <linux/console.h>
2006-03-27 01:16:04 -08:00
# include <linux/pfn.h>
2007-06-30 00:55:48 +09:00
# include <linux/debugfs.h>
2012-10-11 18:14:58 +02:00
# include <linux/kexec.h>
2013-04-13 13:15:47 +02:00
# include <linux/sizes.h>
2014-07-16 16:51:32 +01:00
# include <linux/device.h>
# include <linux/dma-contiguous.h>
2016-05-11 00:50:03 +02:00
# include <linux/decompress/generic.h>
2016-11-23 14:43:46 +01:00
# include <linux/of_fdt.h>
2005-04-16 15:20:36 -07:00
# include <asm/addrspace.h>
# include <asm/bootinfo.h>
2007-10-23 12:43:11 +01:00
# include <asm/bugs.h>
2005-07-13 11:48:45 +00:00
# include <asm/cache.h>
2015-01-29 11:14:13 +00:00
# include <asm/cdmm.h>
2005-04-16 15:20:36 -07:00
# include <asm/cpu.h>
2015-09-22 10:10:55 -07:00
# include <asm/debug.h>
2018-06-15 13:08:45 +02:00
# include <asm/dma-coherence.h>
2005-04-16 15:20:36 -07:00
# include <asm/sections.h>
# include <asm/setup.h>
2007-11-19 12:23:51 +00:00
# include <asm/smp-ops.h>
2010-10-13 00:52:46 -06:00
# include <asm/prom.h>
2005-04-16 15:20:36 -07:00
2015-09-11 17:46:14 +03:00
# ifdef CONFIG_MIPS_ELF_APPENDED_DTB
const char __section ( . appended_dtb ) __appended_dtb [ 0x100000 ] ;
# endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
2005-07-13 11:48:45 +00:00
struct cpuinfo_mips cpu_data [ NR_CPUS ] __read_mostly ;
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( cpu_data ) ;
# ifdef CONFIG_VT
struct screen_info screen_info ;
# endif
/*
* Setup information
*
* These are initialized so they are in the . data section
*/
2005-07-13 11:48:45 +00:00
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN ;
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( mips_machtype ) ;
struct boot_mem_map boot_mem_map ;
2009-11-21 22:34:41 +02:00
static char __initdata command_line [ COMMAND_LINE_SIZE ] ;
char __initdata arcs_cmdline [ COMMAND_LINE_SIZE ] ;
# ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline [ COMMAND_LINE_SIZE ] = CONFIG_CMDLINE ;
# endif
2005-04-16 15:20:36 -07:00
/*
* mips_io_port_base is the begin of the address space to which x86 style
* I / O ports are mapped .
*/
2010-10-14 12:36:49 -07:00
const unsigned long mips_io_port_base = - 1 ;
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( mips_io_port_base ) ;
static struct resource code_resource = { . name = " Kernel code " , } ;
static struct resource data_resource = { . name = " Kernel data " , } ;
2017-10-12 12:50:34 -07:00
static struct resource bss_resource = { . name = " Kernel bss " , } ;
2005-04-16 15:20:36 -07:00
2013-04-13 13:15:47 +02:00
static void * detect_magic __initdata = detect_memory_region ;
2018-07-27 18:23:20 -07:00
# ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
unsigned long ARCH_PFN_OFFSET ;
EXPORT_SYMBOL ( ARCH_PFN_OFFSET ) ;
# endif
2014-11-22 00:22:09 +01:00
void __init add_memory_region ( phys_addr_t start , phys_addr_t size , long type )
2005-04-16 15:20:36 -07:00
{
int x = boot_mem_map . nr_map ;
2012-11-15 12:53:59 +01:00
int i ;
2005-04-16 15:20:36 -07:00
2016-08-09 13:21:48 +01:00
/*
* If the region reaches the top of the physical address space , adjust
* the size slightly so that ( start + size ) doesn ' t overflow
*/
2018-06-14 15:28:02 -07:00
if ( start + size - 1 = = PHYS_ADDR_MAX )
2016-08-09 13:21:48 +01:00
- - size ;
2006-08-11 17:51:48 +02:00
/* Sanity check */
if ( start + size < start ) {
2014-10-04 09:50:42 -07:00
pr_warn ( " Trying to add an invalid memory region, skipped \n " ) ;
2006-08-11 17:51:48 +02:00
return ;
}
2005-04-16 15:20:36 -07:00
/*
2012-11-15 12:53:59 +01:00
* Try to merge with existing entry , if any .
2005-04-16 15:20:36 -07:00
*/
2012-11-15 12:53:59 +01:00
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
struct boot_mem_map_entry * entry = boot_mem_map . map + i ;
unsigned long top ;
if ( entry - > type ! = type )
continue ;
if ( start + size < entry - > addr )
continue ; /* no overlap */
if ( entry - > addr + entry - > size < start )
continue ; /* no overlap */
top = max ( entry - > addr + entry - > size , start + size ) ;
entry - > addr = min ( entry - > addr , start ) ;
entry - > size = top - entry - > addr ;
2005-04-16 15:20:36 -07:00
return ;
}
2012-11-15 12:53:59 +01:00
if ( boot_mem_map . nr_map = = BOOT_MEM_MAP_MAX ) {
2008-07-28 13:12:52 +01:00
pr_err ( " Ooops! Too many entries in the memory map! \n " ) ;
2005-04-16 15:20:36 -07:00
return ;
}
boot_mem_map . map [ x ] . addr = start ;
boot_mem_map . map [ x ] . size = size ;
boot_mem_map . map [ x ] . type = type ;
boot_mem_map . nr_map + + ;
}
2014-11-22 00:22:09 +01:00
void __init detect_memory_region ( phys_addr_t start , phys_addr_t sz_min , phys_addr_t sz_max )
2013-04-13 13:15:47 +02:00
{
void * dm = & detect_magic ;
2014-11-22 00:22:09 +01:00
phys_addr_t size ;
2013-04-13 13:15:47 +02:00
for ( size = sz_min ; size < sz_max ; size < < = 1 ) {
if ( ! memcmp ( dm , dm + size , sizeof ( detect_magic ) ) )
break ;
}
pr_debug ( " Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB) \n " ,
( ( unsigned long long ) size ) / SZ_1M ,
( unsigned long long ) start ,
( ( unsigned long long ) sz_min ) / SZ_1M ,
( ( unsigned long long ) sz_max ) / SZ_1M ) ;
add_memory_region ( start , size , BOOT_MEM_RAM ) ;
}
2018-01-02 19:52:21 +01:00
static bool __init __maybe_unused memory_region_available ( phys_addr_t start ,
phys_addr_t size )
2016-11-23 14:43:45 +01:00
{
int i ;
bool in_ram = false , free = true ;
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
phys_addr_t start_ , end_ ;
start_ = boot_mem_map . map [ i ] . addr ;
end_ = boot_mem_map . map [ i ] . addr + boot_mem_map . map [ i ] . size ;
switch ( boot_mem_map . map [ i ] . type ) {
case BOOT_MEM_RAM :
if ( start > = start_ & & start + size < = end_ )
in_ram = true ;
break ;
case BOOT_MEM_RESERVED :
if ( ( start > = start_ & & start < end_ ) | |
( start < start_ & & start + size > = start_ ) )
free = false ;
break ;
default :
continue ;
}
}
return in_ram & & free ;
}
2005-04-16 15:20:36 -07:00
static void __init print_memory_map ( void )
{
int i ;
const int field = 2 * sizeof ( unsigned long ) ;
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
2008-07-28 13:12:52 +01:00
printk ( KERN_INFO " memory: %0*Lx @ %0*Lx " ,
2005-04-16 15:20:36 -07:00
field , ( unsigned long long ) boot_mem_map . map [ i ] . size ,
field , ( unsigned long long ) boot_mem_map . map [ i ] . addr ) ;
switch ( boot_mem_map . map [ i ] . type ) {
case BOOT_MEM_RAM :
2008-07-28 13:12:52 +01:00
printk ( KERN_CONT " (usable) \n " ) ;
2005-04-16 15:20:36 -07:00
break ;
2011-11-22 14:38:03 +00:00
case BOOT_MEM_INIT_RAM :
printk ( KERN_CONT " (usable after init) \n " ) ;
break ;
2005-04-16 15:20:36 -07:00
case BOOT_MEM_ROM_DATA :
2008-07-28 13:12:52 +01:00
printk ( KERN_CONT " (ROM data) \n " ) ;
2005-04-16 15:20:36 -07:00
break ;
case BOOT_MEM_RESERVED :
2008-07-28 13:12:52 +01:00
printk ( KERN_CONT " (reserved) \n " ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
2008-07-28 13:12:52 +01:00
printk ( KERN_CONT " type %lu \n " , boot_mem_map . map [ i ] . type ) ;
2005-04-16 15:20:36 -07:00
break ;
}
}
}
2006-08-11 17:51:49 +02:00
/*
* Manage initrd
*/
# ifdef CONFIG_BLK_DEV_INITRD
2006-08-11 17:51:53 +02:00
static int __init rd_start_early ( char * p )
2005-04-16 15:20:36 -07:00
{
2006-08-11 17:51:53 +02:00
unsigned long start = memparse ( p , & p ) ;
2005-04-16 15:20:36 -07:00
2005-09-03 15:56:16 -07:00
# ifdef CONFIG_64BIT
2006-10-19 13:20:04 +02:00
/* Guess if the sign extension was forgotten by bootloader */
if ( start < XKPHYS )
start = ( int ) start ;
2005-04-16 15:20:36 -07:00
# endif
2006-08-11 17:51:53 +02:00
initrd_start = start ;
initrd_end + = start ;
return 0 ;
}
early_param ( " rd_start " , rd_start_early ) ;
static int __init rd_size_early ( char * p )
{
initrd_end + = memparse ( p , & p ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2006-08-11 17:51:53 +02:00
early_param ( " rd_size " , rd_size_early ) ;
2005-04-16 15:20:36 -07:00
2006-10-19 13:20:04 +02:00
/* it returns the next free pfn after initrd */
2006-08-11 17:51:49 +02:00
static unsigned long __init init_initrd ( void )
{
2006-10-19 13:20:04 +02:00
unsigned long end ;
2006-08-11 17:51:49 +02:00
/*
2006-08-11 17:51:53 +02:00
* Board specific code or command line parser should have
* already set up initrd_start and initrd_end . In these cases
* perfom sanity checks and use them if all looks good .
2006-08-11 17:51:49 +02:00
*/
2009-12-17 01:57:07 +00:00
if ( ! initrd_start | | initrd_end < = initrd_start )
2006-10-19 13:20:04 +02:00
goto disable ;
if ( initrd_start & ~ PAGE_MASK ) {
2008-07-28 13:12:52 +01:00
pr_err ( " initrd start must be page aligned \n " ) ;
2006-10-19 13:20:04 +02:00
goto disable ;
2006-08-11 17:51:49 +02:00
}
2006-10-19 13:20:04 +02:00
if ( initrd_start < PAGE_OFFSET ) {
2008-07-28 13:12:52 +01:00
pr_err ( " initrd start < PAGE_OFFSET \n " ) ;
2006-10-19 13:20:04 +02:00
goto disable ;
}
/*
* Sanitize initrd addresses . For example firmware
* can ' t guess if they need to pass them through
* 64 - bits values if the kernel has been built in pure
* 32 - bit . We need also to switch from KSEG0 to XKPHYS
* addresses now , so the code can now safely use __pa ( ) .
*/
end = __pa ( initrd_end ) ;
initrd_end = ( unsigned long ) __va ( end ) ;
initrd_start = ( unsigned long ) __va ( __pa ( initrd_start ) ) ;
ROOT_DEV = Root_RAM0 ;
return PFN_UP ( end ) ;
disable :
initrd_start = 0 ;
initrd_end = 0 ;
return 0 ;
2006-08-11 17:51:49 +02:00
}
2016-05-11 00:50:03 +02:00
/* In some conditions (e.g. big endian bootloader with a little endian
kernel ) , the initrd might appear byte swapped . Try to detect this and
byte swap it if needed . */
static void __init maybe_bswap_initrd ( void )
{
# if defined(CONFIG_CPU_CAVIUM_OCTEON)
u64 buf ;
/* Check for CPIO signature */
if ( ! memcmp ( ( void * ) initrd_start , " 070701 " , 6 ) )
return ;
/* Check for compressed initrd */
if ( decompress_method ( ( unsigned char * ) initrd_start , 8 , NULL ) )
return ;
/* Try again with a byte swapped header */
buf = swab64p ( ( u64 * ) initrd_start ) ;
if ( ! memcmp ( & buf , " 070701 " , 6 ) | |
decompress_method ( ( unsigned char * ) ( & buf ) , 8 , NULL ) ) {
unsigned long i ;
pr_info ( " Byteswapped initrd detected \n " ) ;
for ( i = initrd_start ; i < ALIGN ( initrd_end , 8 ) ; i + = 8 )
swab64s ( ( u64 * ) i ) ;
}
# endif
}
2006-08-11 17:51:49 +02:00
static void __init finalize_initrd ( void )
{
unsigned long size = initrd_end - initrd_start ;
if ( size = = 0 ) {
printk ( KERN_INFO " Initrd not found or empty " ) ;
goto disable ;
}
2006-10-19 13:20:01 +02:00
if ( __pa ( initrd_end ) > PFN_PHYS ( max_low_pfn ) ) {
2008-07-28 13:12:52 +01:00
printk ( KERN_ERR " Initrd extends beyond end of memory " ) ;
2006-08-11 17:51:49 +02:00
goto disable ;
}
2016-05-11 00:50:03 +02:00
maybe_bswap_initrd ( ) ;
2008-02-07 00:15:17 -08:00
reserve_bootmem ( __pa ( initrd_start ) , size , BOOTMEM_DEFAULT ) ;
2006-08-11 17:51:49 +02:00
initrd_below_start_ok = 1 ;
2008-07-28 13:12:52 +01:00
pr_info ( " Initial ramdisk at: 0x%lx (%lu bytes) \n " ,
initrd_start , size ) ;
2006-08-11 17:51:49 +02:00
return ;
disable :
2008-07-28 13:12:52 +01:00
printk ( KERN_CONT " - disabling initrd \n " ) ;
2006-08-11 17:51:49 +02:00
initrd_start = 0 ;
initrd_end = 0 ;
}
# else /* !CONFIG_BLK_DEV_INITRD */
2006-10-13 11:22:52 +01:00
static unsigned long __init init_initrd ( void )
{
return 0 ;
}
2006-08-11 17:51:49 +02:00
# define finalize_initrd() do {} while (0)
# endif
2006-08-11 17:51:48 +02:00
/*
* Initialize the bootmem allocator . It also setup initrd related data
* if needed .
*/
2014-06-26 11:41:28 +08:00
# if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
2006-08-11 17:51:49 +02:00
2006-08-11 17:51:48 +02:00
static void __init bootmem_init ( void )
2005-04-16 15:20:36 -07:00
{
2006-08-11 17:51:49 +02:00
init_initrd ( ) ;
finalize_initrd ( ) ;
}
# else /* !CONFIG_SGI_IP27 */
2016-11-23 14:43:45 +01:00
static unsigned long __init bootmap_bytes ( unsigned long pages )
{
unsigned long bytes = DIV_ROUND_UP ( pages , 8 ) ;
return ALIGN ( bytes , sizeof ( long ) ) ;
}
2006-08-11 17:51:49 +02:00
static void __init bootmem_init ( void )
{
2008-01-08 00:41:13 +09:00
unsigned long reserved_end ;
2007-01-10 09:44:04 +01:00
unsigned long mapstart = ~ 0UL ;
2005-04-16 15:20:36 -07:00
unsigned long bootmap_size ;
2018-06-14 15:28:02 -07:00
phys_addr_t ramstart = PHYS_ADDR_MAX ;
2016-11-23 14:43:45 +01:00
bool bootmap_valid = false ;
2005-04-16 15:20:36 -07:00
int i ;
/*
2010-09-08 15:50:43 +10:00
* Sanity check any INITRD first . We don ' t take it into account
* for bootmem setup initially , rely on the end - of - kernel - code
* as our memory range starting point . Once bootmem is inited we
* will reserve the area used for the initrd .
2005-04-16 15:20:36 -07:00
*/
2010-09-08 15:50:43 +10:00
init_initrd ( ) ;
reserved_end = ( unsigned long ) PFN_UP ( __pa_symbol ( & _end ) ) ;
2005-04-16 15:20:36 -07:00
2007-01-10 09:44:04 +01:00
/*
* max_low_pfn is not a number of pages . The number of pages
* of the system is given by ' max_low_pfn - min_low_pfn ' .
*/
min_low_pfn = ~ 0UL ;
max_low_pfn = 0 ;
2006-08-11 17:51:48 +02:00
/*
2018-02-01 12:37:21 +01:00
* Find the highest page frame number we have available
* and the lowest used RAM address
2006-08-11 17:51:48 +02:00
*/
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
unsigned long start , end ;
if ( boot_mem_map . map [ i ] . type ! = BOOT_MEM_RAM )
continue ;
start = PFN_UP ( boot_mem_map . map [ i ] . addr ) ;
end = PFN_DOWN ( boot_mem_map . map [ i ] . addr
2006-08-11 17:51:48 +02:00
+ boot_mem_map . map [ i ] . size ) ;
2005-04-16 15:20:36 -07:00
2018-02-01 12:37:21 +01:00
ramstart = min ( ramstart , boot_mem_map . map [ i ] . addr ) ;
2016-11-01 13:59:09 +00:00
# ifndef CONFIG_HIGHMEM
/*
* Skip highmem here so we get an accurate max_low_pfn if low
* memory stops short of high memory .
* If the region overlaps HIGHMEM_START , end is clipped so
* max_pfn excludes the highmem portion .
*/
if ( start > = PFN_DOWN ( HIGHMEM_START ) )
continue ;
if ( end > PFN_DOWN ( HIGHMEM_START ) )
end = PFN_DOWN ( HIGHMEM_START ) ;
# endif
2007-01-10 09:44:04 +01:00
if ( end > max_low_pfn )
max_low_pfn = end ;
if ( start < min_low_pfn )
min_low_pfn = start ;
2006-08-11 17:51:48 +02:00
if ( end < = reserved_end )
2005-04-16 15:20:36 -07:00
continue ;
2015-07-02 17:16:01 +02:00
# ifdef CONFIG_BLK_DEV_INITRD
2015-09-03 08:36:35 +02:00
/* Skip zones before initrd and initrd itself */
2015-07-02 17:16:01 +02:00
if ( initrd_end & & end < = ( unsigned long ) PFN_UP ( __pa ( initrd_end ) ) )
continue ;
# endif
2006-08-11 17:51:48 +02:00
if ( start > = mapstart )
continue ;
mapstart = max ( reserved_end , start ) ;
2005-04-16 15:20:36 -07:00
}
2018-07-27 18:23:20 -07:00
if ( min_low_pfn > = max_low_pfn )
panic ( " Incorrect memory mapping !!! " ) ;
# ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
ARCH_PFN_OFFSET = PFN_UP ( ramstart ) ;
# else
2018-02-01 12:37:21 +01:00
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
if ( ramstart > PHYS_OFFSET )
add_memory_region ( PHYS_OFFSET , ramstart - PHYS_OFFSET ,
BOOT_MEM_RESERVED ) ;
2007-01-10 09:44:05 +01:00
if ( min_low_pfn > ARCH_PFN_OFFSET ) {
2008-07-28 13:12:52 +01:00
pr_info ( " Wasting %lu bytes for tracking %lu unused pages \n " ,
( min_low_pfn - ARCH_PFN_OFFSET ) * sizeof ( struct page ) ,
min_low_pfn - ARCH_PFN_OFFSET ) ;
2018-01-02 19:53:15 +01:00
} else if ( ARCH_PFN_OFFSET - min_low_pfn > 0UL ) {
2008-07-28 13:12:52 +01:00
pr_info ( " %lu free pages won't be used \n " ,
ARCH_PFN_OFFSET - min_low_pfn ) ;
2007-01-10 09:44:04 +01:00
}
2007-01-10 09:44:05 +01:00
min_low_pfn = ARCH_PFN_OFFSET ;
2018-07-27 18:23:20 -07:00
# endif
2007-01-10 09:44:04 +01:00
2005-04-16 15:20:36 -07:00
/*
* Determine low and high memory ranges
*/
2008-04-18 10:56:07 +01:00
max_pfn = max_low_pfn ;
2007-01-10 09:44:04 +01:00
if ( max_low_pfn > PFN_DOWN ( HIGHMEM_START ) ) {
2006-08-11 17:51:48 +02:00
# ifdef CONFIG_HIGHMEM
highstart_pfn = PFN_DOWN ( HIGHMEM_START ) ;
2007-01-10 09:44:04 +01:00
highend_pfn = max_low_pfn ;
2005-04-16 15:20:36 -07:00
# endif
2007-01-10 09:44:04 +01:00
max_low_pfn = PFN_DOWN ( HIGHMEM_START ) ;
2005-04-16 15:20:36 -07:00
}
2015-09-03 08:36:35 +02:00
# ifdef CONFIG_BLK_DEV_INITRD
/*
* mapstart should be after initrd_end
*/
if ( initrd_end )
mapstart = max ( mapstart , ( unsigned long ) PFN_UP ( __pa ( initrd_end ) ) ) ;
# endif
2005-04-16 15:20:36 -07:00
/*
2016-11-23 14:43:45 +01:00
* check that mapstart doesn ' t overlap with any of
* memory regions that have been reserved through eg . DTB
2005-04-16 15:20:36 -07:00
*/
2016-11-23 14:43:45 +01:00
bootmap_size = bootmap_bytes ( max_low_pfn - min_low_pfn ) ;
bootmap_valid = memory_region_available ( PFN_PHYS ( mapstart ) ,
bootmap_size ) ;
for ( i = 0 ; i < boot_mem_map . nr_map & & ! bootmap_valid ; i + + ) {
unsigned long mapstart_addr ;
switch ( boot_mem_map . map [ i ] . type ) {
case BOOT_MEM_RESERVED :
mapstart_addr = PFN_ALIGN ( boot_mem_map . map [ i ] . addr +
boot_mem_map . map [ i ] . size ) ;
if ( PHYS_PFN ( mapstart_addr ) < mapstart )
break ;
bootmap_valid = memory_region_available ( mapstart_addr ,
bootmap_size ) ;
if ( bootmap_valid )
mapstart = PHYS_PFN ( mapstart_addr ) ;
break ;
default :
break ;
}
}
2007-11-03 02:05:43 +00:00
2016-11-23 14:43:45 +01:00
if ( ! bootmap_valid )
panic ( " No memory area to place a bootmap bitmap " ) ;
/*
* Initialize the boot - time allocator with low memory only .
*/
if ( bootmap_size ! = init_bootmem_node ( NODE_DATA ( 0 ) , mapstart ,
min_low_pfn , max_low_pfn ) )
panic ( " Unexpected memory size required for bootmap " ) ;
2007-11-03 02:05:43 +00:00
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
unsigned long start , end ;
start = PFN_UP ( boot_mem_map . map [ i ] . addr ) ;
end = PFN_DOWN ( boot_mem_map . map [ i ] . addr
+ boot_mem_map . map [ i ] . size ) ;
2008-01-08 00:41:13 +09:00
if ( start < = min_low_pfn )
start = min_low_pfn ;
2007-11-03 02:05:43 +00:00
if ( start > = end )
continue ;
# ifndef CONFIG_HIGHMEM
if ( end > max_low_pfn )
end = max_low_pfn ;
/*
* . . . finally , is the area going away ?
*/
if ( end < = start )
continue ;
# endif
2011-12-08 10:22:09 -08:00
memblock_add_node ( PFN_PHYS ( start ) , PFN_PHYS ( end - start ) , 0 ) ;
2007-11-03 02:05:43 +00:00
}
2005-04-16 15:20:36 -07:00
/*
* Register fully available low RAM pages with the bootmem allocator .
*/
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
2006-08-11 17:51:48 +02:00
unsigned long start , end , size ;
2005-04-16 15:20:36 -07:00
2011-11-22 14:38:03 +00:00
start = PFN_UP ( boot_mem_map . map [ i ] . addr ) ;
end = PFN_DOWN ( boot_mem_map . map [ i ] . addr
+ boot_mem_map . map [ i ] . size ) ;
2005-04-16 15:20:36 -07:00
/*
* Reserve usable memory .
*/
2011-11-22 14:38:03 +00:00
switch ( boot_mem_map . map [ i ] . type ) {
case BOOT_MEM_RAM :
break ;
case BOOT_MEM_INIT_RAM :
memory_present ( 0 , start , end ) ;
2005-04-16 15:20:36 -07:00
continue ;
2011-11-22 14:38:03 +00:00
default :
/* Not usable memory */
2016-11-23 14:43:44 +01:00
if ( start > min_low_pfn & & end < max_low_pfn )
reserve_bootmem ( boot_mem_map . map [ i ] . addr ,
boot_mem_map . map [ i ] . size ,
BOOTMEM_DEFAULT ) ;
2011-11-22 14:38:03 +00:00
continue ;
}
2005-04-16 15:20:36 -07:00
/*
2006-08-11 17:51:48 +02:00
* We are rounding up the start address of usable memory
* and at the end of the usable range downwards .
2005-04-16 15:20:36 -07:00
*/
2006-08-11 17:51:48 +02:00
if ( start > = max_low_pfn )
2005-04-16 15:20:36 -07:00
continue ;
2006-08-11 17:51:48 +02:00
if ( start < reserved_end )
start = reserved_end ;
if ( end > max_low_pfn )
end = max_low_pfn ;
2005-04-16 15:20:36 -07:00
/*
2006-08-11 17:51:48 +02:00
* . . . finally , is the area going away ?
2005-04-16 15:20:36 -07:00
*/
2006-08-11 17:51:48 +02:00
if ( end < = start )
2005-04-16 15:20:36 -07:00
continue ;
2006-08-11 17:51:48 +02:00
size = end - start ;
2005-04-16 15:20:36 -07:00
/* Register lowmem ranges */
2006-08-11 17:51:48 +02:00
free_bootmem ( PFN_PHYS ( start ) , size < < PAGE_SHIFT ) ;
memory_present ( 0 , start , end ) ;
2005-04-16 15:20:36 -07:00
}
2006-08-11 17:51:48 +02:00
/*
* Reserve the bootmap memory .
*/
2008-02-07 00:15:17 -08:00
reserve_bootmem ( PFN_PHYS ( mapstart ) , bootmap_size , BOOTMEM_DEFAULT ) ;
2006-08-11 17:51:48 +02:00
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 10:05:38 +01:00
# ifdef CONFIG_RELOCATABLE
/*
* The kernel reserves all memory below its _end symbol as bootmem ,
* but the kernel may now be at a much higher address . The memory
* between the original and new locations may be returned to the system .
*/
if ( __pa_symbol ( _text ) > __pa_symbol ( VMLINUX_LOAD_ADDRESS ) ) {
unsigned long offset ;
2016-03-31 10:05:42 +01:00
extern void show_kernel_relocation ( const char * level ) ;
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 10:05:38 +01:00
offset = __pa_symbol ( _text ) - __pa_symbol ( VMLINUX_LOAD_ADDRESS ) ;
free_bootmem ( __pa_symbol ( VMLINUX_LOAD_ADDRESS ) , offset ) ;
2016-03-31 10:05:42 +01:00
# if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
/*
* This information is necessary when debugging the kernel
* But is a security vulnerability otherwise !
*/
show_kernel_relocation ( KERN_INFO ) ;
# endif
MIPS: bootmem: When relocatable, free memory below kernel
The kernel reserves all memory before the _end symbol as bootmem,
however, once the kernel can be relocated elsewhere in memory this may
result in a large amount of wasted memory. The assumption is that the
memory between the link and relocated address of the kernel may be
released back to the available memory pool.
Memory statistics for a Malta with the kernel relocating by
16Mb, without the patch:
Memory: 105952K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 25120K reserved, 0K cma-reserved)
And with the patch:
Memory: 122336K/131072K available (4604K kernel code, 242K rwdata,
892K rodata, 1280K init, 183K bss, 8736K reserved, 0K cma-reserved)
The 16Mb offset is removed from the reserved region and added back to
the available region.
Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com>
Cc: Aaro Koskinen <aaro.koskinen@nokia.com>
Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
Cc: Alexander Sverdlin <alexander.sverdlin@gmail.com>
Cc: Jaedon Shin <jaedon.shin@gmail.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Jonas Gorski <jogo@openwrt.org>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: kernel-hardening@lists.openwall.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/12986/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-03-31 10:05:38 +01:00
}
# endif
2006-08-11 17:51:49 +02:00
/*
* Reserve initrd memory if needed .
*/
finalize_initrd ( ) ;
2005-04-16 15:20:36 -07:00
}
2006-08-11 17:51:49 +02:00
# endif /* CONFIG_SGI_IP27 */
2006-06-18 01:32:22 +01:00
/*
2008-02-03 16:54:53 +02:00
* arch_mem_init - initialize memory management subsystem
2006-06-18 01:32:22 +01:00
*
* o plat_mem_setup ( ) detects the memory configuration and will record detected
* memory areas using add_memory_region .
*
* At this stage the memory configuration of the system is known to the
2008-02-03 16:54:53 +02:00
* kernel but generic memory management system is still entirely uninitialized .
2006-06-18 01:32:22 +01:00
*
* o bootmem_init ( )
* o sparse_init ( )
* o paging_init ( )
2015-07-07 10:14:59 +09:00
* o dma_contiguous_reserve ( )
2006-06-18 01:32:22 +01:00
*
* At this stage the bootmem allocator is ready to use .
*
* NOTE : historically plat_mem_setup did the entire platform initialization .
2013-01-22 12:59:30 +01:00
* This was rather impractical because it meant plat_mem_setup had to
2006-06-18 01:32:22 +01:00
* get away without any kind of memory allocator . To keep old code from
2014-09-03 07:36:51 +02:00
* breaking plat_setup was just renamed to plat_mem_setup and a second platform
2006-06-18 01:32:22 +01:00
* initialization hook for anything else was introduced .
*/
2009-09-17 02:25:07 +02:00
static int usermem __initdata ;
2006-08-11 17:51:53 +02:00
static int __init early_parse_mem ( char * p )
{
2014-12-12 19:51:15 +01:00
phys_addr_t start , size ;
2006-08-11 17:51:53 +02:00
/*
* If a user specifies memory size , we
* blow away any automatically generated
* size .
*/
if ( usermem = = 0 ) {
boot_mem_map . nr_map = 0 ;
usermem = 1 ;
2013-01-22 12:59:30 +01:00
}
2006-08-11 17:51:53 +02:00
start = 0 ;
size = memparse ( p , & p ) ;
if ( * p = = ' @ ' )
start = memparse ( p + 1 , & p ) ;
add_memory_region ( start , size , BOOT_MEM_RAM ) ;
2016-11-23 14:43:49 +01:00
2006-08-11 17:51:53 +02:00
return 0 ;
}
early_param ( " mem " , early_parse_mem ) ;
2006-06-18 01:32:22 +01:00
2017-06-19 17:50:08 +02:00
static int __init early_parse_memmap ( char * p )
{
char * oldp ;
u64 start_at , mem_size ;
if ( ! p )
return - EINVAL ;
if ( ! strncmp ( p , " exactmap " , 8 ) ) {
pr_err ( " \" memmap=exactmap \" invalid on MIPS \n " ) ;
return 0 ;
}
oldp = p ;
mem_size = memparse ( p , & p ) ;
if ( p = = oldp )
return - EINVAL ;
if ( * p = = ' @ ' ) {
start_at = memparse ( p + 1 , & p ) ;
add_memory_region ( start_at , mem_size , BOOT_MEM_RAM ) ;
} else if ( * p = = ' # ' ) {
pr_err ( " \" memmap=nn#ss \" (force ACPI data) invalid on MIPS \n " ) ;
return - EINVAL ;
} else if ( * p = = ' $ ' ) {
start_at = memparse ( p + 1 , & p ) ;
add_memory_region ( start_at , mem_size , BOOT_MEM_RESERVED ) ;
} else {
pr_err ( " \" memmap \" invalid format! \n " ) ;
return - EINVAL ;
}
if ( * p = = ' \0 ' ) {
usermem = 1 ;
return 0 ;
} else
return - EINVAL ;
}
early_param ( " memmap " , early_parse_memmap ) ;
2013-02-12 19:41:48 +00:00
# ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr , setup_elfcorehdr_size ;
static int __init early_parse_elfcorehdr ( char * p )
{
int i ;
setup_elfcorehdr = memparse ( p , & p ) ;
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
unsigned long start = boot_mem_map . map [ i ] . addr ;
unsigned long end = ( boot_mem_map . map [ i ] . addr +
boot_mem_map . map [ i ] . size ) ;
if ( setup_elfcorehdr > = start & & setup_elfcorehdr < end ) {
/*
* Reserve from the elf core header to the end of
* the memory segment , that should all be kdump
* reserved memory .
*/
setup_elfcorehdr_size = end - setup_elfcorehdr ;
break ;
}
}
/*
* If we don ' t find it in the memory map , then we shouldn ' t
* have to worry about it , as the new kernel won ' t use it .
*/
return 0 ;
}
early_param ( " elfcorehdr " , early_parse_elfcorehdr ) ;
# endif
2014-11-22 00:22:09 +01:00
static void __init arch_mem_addpart ( phys_addr_t mem , phys_addr_t end , int type )
2006-06-18 01:32:22 +01:00
{
2014-11-22 00:22:09 +01:00
phys_addr_t size ;
2013-02-12 19:41:47 +00:00
int i ;
2011-11-22 14:38:03 +00:00
2013-02-12 19:41:47 +00:00
size = end - mem ;
if ( ! size )
return ;
/* Make sure it is in the boot_mem_map */
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
if ( mem > = boot_mem_map . map [ i ] . addr & &
mem < ( boot_mem_map . map [ i ] . addr +
boot_mem_map . map [ i ] . size ) )
return ;
}
add_memory_region ( mem , size , type ) ;
}
2011-11-22 14:38:03 +00:00
2013-09-04 23:26:24 +05:30
# ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem ( void )
{
unsigned long long total ;
total = max_pfn - min_low_pfn ;
return total < < PAGE_SHIFT ;
}
static void __init mips_parse_crashkernel ( void )
{
unsigned long long total_mem ;
unsigned long long crash_size , crash_base ;
int ret ;
total_mem = get_total_mem ( ) ;
ret = parse_crashkernel ( boot_command_line , total_mem ,
& crash_size , & crash_base ) ;
if ( ret ! = 0 | | crash_size < = 0 )
return ;
2016-11-23 14:43:50 +01:00
if ( ! memory_region_available ( crash_base , crash_size ) ) {
pr_warn ( " Invalid memory region reserved for crash kernel \n " ) ;
return ;
}
2013-09-04 23:26:24 +05:30
crashk_res . start = crash_base ;
crashk_res . end = crash_base + crash_size - 1 ;
}
static void __init request_crashkernel ( struct resource * res )
{
int ret ;
2016-11-23 14:43:43 +01:00
if ( crashk_res . start = = crashk_res . end )
return ;
2013-09-04 23:26:24 +05:30
ret = request_resource ( res , & crashk_res ) ;
if ( ! ret )
pr_info ( " Reserving %ldMB of memory at %ldMB for crashkernel \n " ,
( unsigned long ) ( ( crashk_res . end -
crashk_res . start + 1 ) > > 20 ) ,
( unsigned long ) ( crashk_res . start > > 20 ) ) ;
}
# else /* !defined(CONFIG_KEXEC) */
static void __init mips_parse_crashkernel ( void )
{
}
static void __init request_crashkernel ( struct resource * res )
{
}
# endif /* !defined(CONFIG_KEXEC) */
2015-10-12 13:13:02 +02:00
# define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
# define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
2015-12-21 12:47:35 +09:00
# define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
2016-04-28 11:03:09 +02:00
# define BUILTIN_EXTEND_WITH_PROM \
IS_ENABLED ( CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND )
2015-10-12 13:13:02 +02:00
2013-02-12 19:41:47 +00:00
static void __init arch_mem_init ( char * * cmdline_p )
{
2014-07-16 16:51:32 +01:00
struct memblock_region * reg ;
2006-08-11 17:51:53 +02:00
extern void plat_mem_setup ( void ) ;
2015-10-12 13:13:02 +02:00
# if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
2009-11-21 22:34:41 +02:00
strlcpy ( boot_command_line , builtin_cmdline , COMMAND_LINE_SIZE ) ;
# else
2015-10-12 13:13:02 +02:00
if ( ( USE_PROM_CMDLINE & & arcs_cmdline [ 0 ] ) | |
( USE_DTB_CMDLINE & & ! boot_command_line [ 0 ] ) )
strlcpy ( boot_command_line , arcs_cmdline , COMMAND_LINE_SIZE ) ;
if ( EXTEND_WITH_PROM & & arcs_cmdline [ 0 ] ) {
2016-04-28 11:03:08 +02:00
if ( boot_command_line [ 0 ] )
strlcat ( boot_command_line , " " , COMMAND_LINE_SIZE ) ;
2015-10-12 13:13:02 +02:00
strlcat ( boot_command_line , arcs_cmdline , COMMAND_LINE_SIZE ) ;
}
# if defined(CONFIG_CMDLINE_BOOL)
2009-11-21 22:34:41 +02:00
if ( builtin_cmdline [ 0 ] ) {
2016-04-28 11:03:08 +02:00
if ( boot_command_line [ 0 ] )
strlcat ( boot_command_line , " " , COMMAND_LINE_SIZE ) ;
2015-10-12 13:13:02 +02:00
strlcat ( boot_command_line , builtin_cmdline , COMMAND_LINE_SIZE ) ;
2009-11-21 22:34:41 +02:00
}
2016-04-28 11:03:09 +02:00
if ( BUILTIN_EXTEND_WITH_PROM & & arcs_cmdline [ 0 ] ) {
if ( boot_command_line [ 0 ] )
strlcat ( boot_command_line , " " , COMMAND_LINE_SIZE ) ;
strlcat ( boot_command_line , arcs_cmdline , COMMAND_LINE_SIZE ) ;
}
2009-11-21 22:34:41 +02:00
# endif
# endif
2018-01-16 16:47:57 +01:00
/* call board setup routine */
plat_mem_setup ( ) ;
/*
* Make sure all kernel memory is in the maps . The " UP " and
* " DOWN " are opposite for initdata since if it crosses over
* into another memory section you don ' t want that to be
* freed when the initdata is freed .
*/
arch_mem_addpart ( PFN_DOWN ( __pa_symbol ( & _text ) ) < < PAGE_SHIFT ,
PFN_UP ( __pa_symbol ( & _edata ) ) < < PAGE_SHIFT ,
BOOT_MEM_RAM ) ;
arch_mem_addpart ( PFN_UP ( __pa_symbol ( & __init_begin ) ) < < PAGE_SHIFT ,
PFN_DOWN ( __pa_symbol ( & __init_end ) ) < < PAGE_SHIFT ,
BOOT_MEM_INIT_RAM ) ;
pr_info ( " Determined physical RAM map: \n " ) ;
print_memory_map ( ) ;
2009-11-21 22:34:41 +02:00
strlcpy ( command_line , boot_command_line , COMMAND_LINE_SIZE ) ;
2006-06-18 01:32:22 +01:00
* cmdline_p = command_line ;
2006-08-11 17:51:53 +02:00
parse_early_param ( ) ;
if ( usermem ) {
2008-07-28 13:12:52 +01:00
pr_info ( " User-defined physical RAM map: \n " ) ;
2006-08-11 17:51:53 +02:00
print_memory_map ( ) ;
}
2016-11-23 14:43:46 +01:00
early_init_fdt_reserve_self ( ) ;
early_init_fdt_scan_reserved_mem ( ) ;
2006-06-18 01:32:22 +01:00
bootmem_init ( ) ;
2013-02-12 19:41:48 +00:00
# ifdef CONFIG_PROC_VMCORE
if ( setup_elfcorehdr & & setup_elfcorehdr_size ) {
printk ( KERN_INFO " kdump reserved memory at %lx-%lx \n " ,
setup_elfcorehdr , setup_elfcorehdr_size ) ;
reserve_bootmem ( setup_elfcorehdr , setup_elfcorehdr_size ,
BOOTMEM_DEFAULT ) ;
}
# endif
2013-09-04 23:26:24 +05:30
mips_parse_crashkernel ( ) ;
2012-10-11 18:14:58 +02:00
# ifdef CONFIG_KEXEC
if ( crashk_res . start ! = crashk_res . end )
reserve_bootmem ( crashk_res . start ,
crashk_res . end - crashk_res . start + 1 ,
BOOTMEM_DEFAULT ) ;
# endif
2010-10-13 00:52:46 -06:00
device_tree_init ( ) ;
2006-06-18 01:32:22 +01:00
sparse_init ( ) ;
2010-10-01 13:27:33 -07:00
plat_swiotlb_setup ( ) ;
2014-07-16 16:51:32 +01:00
dma_contiguous_reserve ( PFN_PHYS ( max_low_pfn ) ) ;
/* Tell bootmem about cma reserved memblock section */
for_each_memblock ( reserved , reg )
2014-10-28 11:28:34 +00:00
if ( reg - > size ! = 0 )
reserve_bootmem ( reg - > base , reg - > size , BOOTMEM_DEFAULT ) ;
2016-03-17 20:37:10 +08:00
reserve_bootmem_region ( __pa_symbol ( & __nosave_begin ) ,
__pa_symbol ( & __nosave_end ) ) ; /* Reserve for hibernation */
2006-06-18 01:32:22 +01:00
}
2006-08-11 17:51:51 +02:00
static void __init resource_init ( void )
2005-04-16 15:20:36 -07:00
{
int i ;
2006-06-20 12:47:53 +01:00
if ( UNCAC_BASE ! = IO_BASE )
return ;
2006-10-19 13:20:03 +02:00
code_resource . start = __pa_symbol ( & _text ) ;
code_resource . end = __pa_symbol ( & _etext ) - 1 ;
data_resource . start = __pa_symbol ( & _etext ) ;
data_resource . end = __pa_symbol ( & _edata ) - 1 ;
2017-10-12 12:50:34 -07:00
bss_resource . start = __pa_symbol ( & __bss_start ) ;
bss_resource . end = __pa_symbol ( & __bss_stop ) - 1 ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < boot_mem_map . nr_map ; i + + ) {
struct resource * res ;
unsigned long start , end ;
start = boot_mem_map . map [ i ] . addr ;
end = boot_mem_map . map [ i ] . addr + boot_mem_map . map [ i ] . size - 1 ;
2006-08-11 17:51:52 +02:00
if ( start > = HIGHMEM_START )
2005-04-16 15:20:36 -07:00
continue ;
2006-08-11 17:51:52 +02:00
if ( end > = HIGHMEM_START )
end = HIGHMEM_START - 1 ;
2005-04-16 15:20:36 -07:00
res = alloc_bootmem ( sizeof ( struct resource ) ) ;
2016-01-26 21:57:22 +01:00
res - > start = start ;
res - > end = end ;
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
2005-04-16 15:20:36 -07:00
switch ( boot_mem_map . map [ i ] . type ) {
case BOOT_MEM_RAM :
2011-11-22 14:38:03 +00:00
case BOOT_MEM_INIT_RAM :
2005-04-16 15:20:36 -07:00
case BOOT_MEM_ROM_DATA :
res - > name = " System RAM " ;
2016-01-26 21:57:22 +01:00
res - > flags | = IORESOURCE_SYSRAM ;
2005-04-16 15:20:36 -07:00
break ;
case BOOT_MEM_RESERVED :
default :
res - > name = " reserved " ;
}
request_resource ( & iomem_resource , res ) ;
/*
* We don ' t know which RAM region contains kernel data ,
* so we try it repeatedly and let the resource manager
* test it .
*/
request_resource ( res , & code_resource ) ;
request_resource ( res , & data_resource ) ;
2017-10-12 12:50:34 -07:00
request_resource ( res , & bss_resource ) ;
2012-10-11 18:14:58 +02:00
request_crashkernel ( res ) ;
2005-04-16 15:20:36 -07:00
}
}
2014-06-26 11:41:25 +08:00
# ifdef CONFIG_SMP
static void __init prefill_possible_map ( void )
{
int i , possible = num_possible_cpus ( ) ;
if ( possible > nr_cpu_ids )
possible = nr_cpu_ids ;
for ( i = 0 ; i < possible ; i + + )
set_cpu_possible ( i , true ) ;
for ( ; i < NR_CPUS ; i + + )
set_cpu_possible ( i , false ) ;
nr_cpu_ids = possible ;
}
# else
static inline void prefill_possible_map ( void ) { }
# endif
2005-04-16 15:20:36 -07:00
void __init setup_arch ( char * * cmdline_p )
{
cpu_probe ( ) ;
2016-02-08 09:46:31 -08:00
mips_cm_probe ( ) ;
2005-04-16 15:20:36 -07:00
prom_init ( ) ;
2007-03-01 11:56:43 +00:00
2015-01-29 11:14:13 +00:00
setup_early_fdc_console ( ) ;
2007-03-01 11:56:43 +00:00
# ifdef CONFIG_EARLY_PRINTK
2008-05-29 17:57:08 +03:00
setup_early_printk ( ) ;
2007-03-01 11:56:43 +00:00
# endif
2005-04-16 15:20:36 -07:00
cpu_report ( ) ;
2007-10-23 12:43:11 +01:00
check_bugs_early ( ) ;
2005-04-16 15:20:36 -07:00
# if defined(CONFIG_VT)
# if defined(CONFIG_VGA_CONSOLE)
2007-02-05 00:10:11 +00:00
conswitchp = & vga_con ;
2005-04-16 15:20:36 -07:00
# elif defined(CONFIG_DUMMY_CONSOLE)
2007-02-05 00:10:11 +00:00
conswitchp = & dummy_con ;
2005-04-16 15:20:36 -07:00
# endif
# endif
2006-06-18 01:32:22 +01:00
arch_mem_init ( cmdline_p ) ;
2005-04-16 15:20:36 -07:00
resource_init ( ) ;
2006-02-23 12:23:27 +00:00
plat_smp_setup ( ) ;
2014-06-26 11:41:25 +08:00
prefill_possible_map ( ) ;
2012-05-15 00:04:50 -07:00
cpu_cache_init ( ) ;
2016-09-02 15:17:31 +01:00
paging_init ( ) ;
2005-04-16 15:20:36 -07:00
}
2007-01-24 01:21:05 +09:00
unsigned long kernelsp [ NR_CPUS ] ;
unsigned long fw_arg0 , fw_arg1 , fw_arg2 , fw_arg3 ;
2007-06-30 00:55:48 +09:00
2016-06-20 11:27:37 +02:00
# ifdef CONFIG_USE_OF
unsigned long fw_passed_dtb ;
# endif
2007-06-30 00:55:48 +09:00
# ifdef CONFIG_DEBUG_FS
struct dentry * mips_debugfs_dir ;
static int __init debugfs_mips ( void )
{
struct dentry * d ;
d = debugfs_create_dir ( " mips " , NULL ) ;
2008-10-17 19:12:35 +08:00
if ( ! d )
return - ENOMEM ;
2007-06-30 00:55:48 +09:00
mips_debugfs_dir = d ;
return 0 ;
}
arch_initcall ( debugfs_mips ) ;
# endif
2018-06-15 13:08:45 +02:00
# if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT ;
EXPORT_SYMBOL_GPL ( coherentio ) ;
int hw_coherentio = 0 ; /* Actual hardware supported DMA coherency setting. */
static int __init setcoherentio ( char * str )
{
coherentio = IO_COHERENCE_ENABLED ;
pr_info ( " Hardware DMA cache coherency (command line) \n " ) ;
return 0 ;
}
early_param ( " coherentio " , setcoherentio ) ;
static int __init setnocoherentio ( char * str )
{
coherentio = IO_COHERENCE_DISABLED ;
pr_info ( " Software DMA cache coherency (command line) \n " ) ;
return 0 ;
}
early_param ( " nocoherentio " , setnocoherentio ) ;
# endif