2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / kernel / setup . c
*
* Copyright ( C ) 1995 - 2001 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2011-07-22 10:58:34 -04:00
# include <linux/export.h>
2005-04-16 15:20:36 -07:00
# include <linux/kernel.h>
# include <linux/stddef.h>
# include <linux/ioport.h>
# include <linux/delay.h>
# include <linux/utsname.h>
# include <linux/initrd.h>
# include <linux/console.h>
# include <linux/bootmem.h>
# include <linux/seq_file.h>
2006-07-10 04:44:13 -07:00
# include <linux/screen_info.h>
ARM: default machine descriptor for multiplatform
Since we now have default implementations for init_time and init_irq,
the init_machine callback is the only one that is not yet optional,
but since simple DT based platforms all have the same
of_platform_populate function call in there, we can consolidate them
as well, and then actually boot with a completely empty machine_desc.
Unofortunately we cannot just default to an empty init_machine: We
cannot call of_platform_populate before init_machine because that
does not work in case of auxdata, and we cannot call it after
init_machine either because the machine might need to run code
after adding the devices.
To take the final step, this adds support for booting without defining
any machine_desc whatsoever.
For the case that CONFIG_MULTIPLATFORM is enabled, it adds a
global machine descriptor that never matches any machine but is
used as a fallback if nothing else matches. We assume that without
CONFIG_MULTIPLATFORM, we only want to boot on the systems that the kernel
is built for, so we still retain the build-time warning for missing
machine descriptors and the run-time warning when the platform does not
match in that case.
In the case that we run on a multiplatform kernel and the machine
provides a fully populated device tree, we attempt to keep booting,
hoping that no machine specific callbacks are necessary.
Finally, this also removes the misguided "select ARCH_VEXPRESS" that
was only added to avoid a build error for allnoconfig kernels.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Cc: "Russell King - ARM Linux" <linux@arm.linux.org.uk>
Cc: Rob Herring <robherring2@gmail.com>
2013-01-31 17:51:18 +00:00
# include <linux/of_platform.h>
2005-04-16 15:20:36 -07:00
# include <linux/init.h>
2010-05-10 09:20:22 +01:00
# include <linux/kexec.h>
2011-04-28 14:27:21 -06:00
# include <linux/of_fdt.h>
2005-04-16 15:20:36 -07:00
# include <linux/cpu.h>
# include <linux/interrupt.h>
2006-02-16 11:08:09 +00:00
# include <linux/smp.h>
2010-01-10 17:23:29 +00:00
# include <linux/proc_fs.h>
2010-07-09 16:27:52 +01:00
# include <linux/memblock.h>
2011-08-19 17:58:35 +01:00
# include <linux/bug.h>
# include <linux/compiler.h>
2011-08-25 19:10:29 -04:00
# include <linux/sort.h>
2005-04-16 15:20:36 -07:00
2009-07-24 12:32:54 +01:00
# include <asm/unified.h>
2012-03-28 18:30:01 +01:00
# include <asm/cp15.h>
2005-04-16 15:20:36 -07:00
# include <asm/cpu.h>
2008-08-10 18:08:10 +01:00
# include <asm/cputype.h>
2005-04-16 15:20:36 -07:00
# include <asm/elf.h>
# include <asm/procinfo.h>
2008-12-01 11:53:07 +00:00
# include <asm/sections.h>
2005-04-16 15:20:36 -07:00
# include <asm/setup.h>
2010-09-04 10:47:48 +01:00
# include <asm/smp_plat.h>
2005-04-16 15:20:36 -07:00
# include <asm/mach-types.h>
# include <asm/cacheflush.h>
2008-08-10 18:10:19 +01:00
# include <asm/cachetype.h>
2005-04-16 15:20:36 -07:00
# include <asm/tlbflush.h>
2011-04-28 14:27:21 -06:00
# include <asm/prom.h>
2005-04-16 15:20:36 -07:00
# include <asm/mach/arch.h>
# include <asm/mach/irq.h>
# include <asm/mach/time.h>
2012-03-28 18:30:01 +01:00
# include <asm/system_info.h>
# include <asm/system_misc.h>
2008-02-20 13:33:40 -06:00
# include <asm/traps.h>
2009-02-16 11:41:36 +01:00
# include <asm/unwind.h>
2011-12-08 10:22:06 -08:00
# include <asm/memblock.h>
2012-02-17 16:54:28 +00:00
# include <asm/virt.h>
2005-04-16 15:20:36 -07:00
2008-01-02 00:56:46 +01:00
# include "atags.h"
2006-03-15 23:17:30 +00:00
2005-04-16 15:20:36 -07:00
# if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type [ 8 ] ;
static int __init fpe_setup ( char * line )
{
memcpy ( fpe_type , line , 8 ) ;
return 1 ;
}
__setup ( " fpe= " , fpe_setup ) ;
# endif
2008-10-06 13:24:40 -04:00
extern void paging_init ( struct machine_desc * desc ) ;
2011-07-05 19:58:29 +01:00
extern void sanity_check_meminfo ( void ) ;
2005-04-16 15:20:36 -07:00
extern void reboot_setup ( char * str ) ;
2011-12-29 13:09:51 +01:00
extern void setup_dma_zone ( struct machine_desc * desc ) ;
2005-04-16 15:20:36 -07:00
unsigned int processor_id ;
2007-12-18 03:53:27 +01:00
EXPORT_SYMBOL ( processor_id ) ;
2010-12-04 17:45:55 +00:00
unsigned int __machine_arch_type __read_mostly ;
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( __machine_arch_type ) ;
2010-12-04 17:45:55 +00:00
unsigned int cacheid __read_mostly ;
2008-09-25 15:35:28 +01:00
EXPORT_SYMBOL ( cacheid ) ;
2005-04-16 15:20:36 -07:00
2007-05-31 22:02:22 +01:00
unsigned int __atags_pointer __initdata ;
2005-04-16 15:20:36 -07:00
unsigned int system_rev ;
EXPORT_SYMBOL ( system_rev ) ;
unsigned int system_serial_low ;
EXPORT_SYMBOL ( system_serial_low ) ;
unsigned int system_serial_high ;
EXPORT_SYMBOL ( system_serial_high ) ;
2010-12-04 17:45:55 +00:00
unsigned int elf_hwcap __read_mostly ;
2005-04-16 15:20:36 -07:00
EXPORT_SYMBOL ( elf_hwcap ) ;
# ifdef MULTI_CPU
2010-12-04 17:45:55 +00:00
struct processor processor __read_mostly ;
2005-04-16 15:20:36 -07:00
# endif
# ifdef MULTI_TLB
2010-12-04 17:45:55 +00:00
struct cpu_tlb_fns cpu_tlb __read_mostly ;
2005-04-16 15:20:36 -07:00
# endif
# ifdef MULTI_USER
2010-12-04 17:45:55 +00:00
struct cpu_user_fns cpu_user __read_mostly ;
2005-04-16 15:20:36 -07:00
# endif
# ifdef MULTI_CACHE
2010-12-04 17:45:55 +00:00
struct cpu_cache_fns cpu_cache __read_mostly ;
2005-04-16 15:20:36 -07:00
# endif
2007-02-05 14:48:08 +01:00
# ifdef CONFIG_OUTER_CACHE
2010-12-04 17:45:55 +00:00
struct outer_cache_fns outer_cache __read_mostly ;
2010-02-16 07:57:43 +01:00
EXPORT_SYMBOL ( outer_cache ) ;
2007-02-05 14:48:08 +01:00
# endif
2005-04-16 15:20:36 -07:00
2011-08-19 17:58:35 +01:00
/*
* Cached cpu_architecture ( ) result for use by assembler code .
* C code should use the cpu_architecture ( ) function instead of accessing this
* variable directly .
*/
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN ;
2005-05-31 22:22:32 +01:00
struct stack {
u32 irq [ 3 ] ;
u32 abt [ 3 ] ;
u32 und [ 3 ] ;
} ____cacheline_aligned ;
static struct stack stacks [ NR_CPUS ] ;
2005-04-16 15:20:36 -07:00
char elf_platform [ ELF_PLATFORM_SIZE ] ;
EXPORT_SYMBOL ( elf_platform ) ;
static const char * cpu_name ;
static const char * machine_name ;
2010-01-27 01:13:31 +01:00
static char __initdata cmd_line [ COMMAND_LINE_SIZE ] ;
2010-12-20 10:18:36 +00:00
struct machine_desc * machine_desc __initdata ;
2005-04-16 15:20:36 -07:00
static union { char c [ 4 ] ; unsigned long l ; } endian_test __initdata = { { ' l ' , ' ? ' , ' ? ' , ' b ' } } ;
# define ENDIANNESS ((char)endian_test.l)
DEFINE_PER_CPU ( struct cpuinfo_arm , cpu_data ) ;
/*
* Standard memory resources
*/
static struct resource mem_res [ ] = {
2006-06-12 14:47:06 -07:00
{
. name = " Video RAM " ,
. start = 0 ,
. end = 0 ,
. flags = IORESOURCE_MEM
} ,
{
2012-01-18 01:57:21 +01:00
. name = " Kernel code " ,
2006-06-12 14:47:06 -07:00
. start = 0 ,
. end = 0 ,
. flags = IORESOURCE_MEM
} ,
{
. name = " Kernel data " ,
. start = 0 ,
. end = 0 ,
. flags = IORESOURCE_MEM
}
2005-04-16 15:20:36 -07:00
} ;
# define video_ram mem_res[0]
# define kernel_code mem_res[1]
# define kernel_data mem_res[2]
static struct resource io_res [ ] = {
2006-06-12 14:47:06 -07:00
{
. name = " reserved " ,
. start = 0x3bc ,
. end = 0x3be ,
. flags = IORESOURCE_IO | IORESOURCE_BUSY
} ,
{
. name = " reserved " ,
. start = 0x378 ,
. end = 0x37f ,
. flags = IORESOURCE_IO | IORESOURCE_BUSY
} ,
{
. name = " reserved " ,
. start = 0x278 ,
. end = 0x27f ,
. flags = IORESOURCE_IO | IORESOURCE_BUSY
}
2005-04-16 15:20:36 -07:00
} ;
# define lp0 io_res[0]
# define lp1 io_res[1]
# define lp2 io_res[2]
static const char * proc_arch [ ] = {
" undefined/unknown " ,
" 3 " ,
" 4 " ,
" 4T " ,
" 5 " ,
" 5T " ,
" 5TE " ,
" 5TEJ " ,
" 6TEJ " ,
2006-01-12 16:28:16 +00:00
" 7 " ,
2005-04-16 15:20:36 -07:00
" ?(11) " ,
" ?(12) " ,
" ?(13) " ,
" ?(14) " ,
" ?(15) " ,
" ?(16) " ,
" ?(17) " ,
} ;
2011-08-19 17:58:35 +01:00
static int __get_cpu_architecture ( void )
2005-04-16 15:20:36 -07:00
{
int cpu_arch ;
2008-08-10 18:08:10 +01:00
if ( ( read_cpuid_id ( ) & 0x0008f000 ) = = 0 ) {
2005-04-16 15:20:36 -07:00
cpu_arch = CPU_ARCH_UNKNOWN ;
2008-08-10 18:08:10 +01:00
} else if ( ( read_cpuid_id ( ) & 0x0008f000 ) = = 0x00007000 ) {
cpu_arch = ( read_cpuid_id ( ) & ( 1 < < 23 ) ) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3 ;
} else if ( ( read_cpuid_id ( ) & 0x00080000 ) = = 0x00000000 ) {
cpu_arch = ( read_cpuid_id ( ) > > 16 ) & 7 ;
2005-04-16 15:20:36 -07:00
if ( cpu_arch )
cpu_arch + = CPU_ARCH_ARMv3 ;
2008-08-10 18:08:10 +01:00
} else if ( ( read_cpuid_id ( ) & 0x000f0000 ) = = 0x000f0000 ) {
2007-09-25 16:49:45 +01:00
unsigned int mmfr0 ;
/* Revised CPUID format. Read the Memory Model Feature
* Register 0 and check for VMSAv7 or PMSAv7 */
asm ( " mrc p15, 0, %0, c0, c1, 4 "
: " =r " ( mmfr0 ) ) ;
2011-02-15 18:06:57 +01:00
if ( ( mmfr0 & 0x0000000f ) > = 0x00000003 | |
( mmfr0 & 0x000000f0 ) > = 0x00000030 )
2007-09-25 16:49:45 +01:00
cpu_arch = CPU_ARCH_ARMv7 ;
else if ( ( mmfr0 & 0x0000000f ) = = 0x00000002 | |
( mmfr0 & 0x000000f0 ) = = 0x00000020 )
cpu_arch = CPU_ARCH_ARMv6 ;
else
cpu_arch = CPU_ARCH_UNKNOWN ;
} else
cpu_arch = CPU_ARCH_UNKNOWN ;
2005-04-16 15:20:36 -07:00
return cpu_arch ;
}
2011-08-19 17:58:35 +01:00
int __pure cpu_architecture ( void )
{
BUG_ON ( __cpu_architecture = = CPU_ARCH_UNKNOWN ) ;
return __cpu_architecture ;
}
2010-09-13 16:18:30 +01:00
static int cpu_has_aliasing_icache ( unsigned int arch )
{
int aliasing_icache ;
unsigned int id_reg , num_sets , line_size ;
2011-08-23 22:22:11 +01:00
/* PIPT caches never alias. */
if ( icache_is_pipt ( ) )
return 0 ;
2010-09-13 16:18:30 +01:00
/* arch specifies the register format */
switch ( arch ) {
case CPU_ARCH_ARMv7 :
2010-10-06 11:07:28 +01:00
asm ( " mcr p15, 2, %0, c0, c0, 0 @ set CSSELR "
: /* No output operands */
2010-09-13 16:18:30 +01:00
: " r " ( 1 ) ) ;
2010-10-06 11:07:28 +01:00
isb ( ) ;
asm ( " mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR "
: " =r " ( id_reg ) ) ;
2010-09-13 16:18:30 +01:00
line_size = 4 < < ( ( id_reg & 0x7 ) + 2 ) ;
num_sets = ( ( id_reg > > 13 ) & 0x7fff ) + 1 ;
aliasing_icache = ( line_size * num_sets ) > PAGE_SIZE ;
break ;
case CPU_ARCH_ARMv6 :
aliasing_icache = read_cpuid_cachetype ( ) & ( 1 < < 11 ) ;
break ;
default :
/* I-cache aliases will be handled by D-cache aliasing code */
aliasing_icache = 0 ;
}
return aliasing_icache ;
}
2008-09-25 15:35:28 +01:00
static void __init cacheid_init ( void )
{
unsigned int arch = cpu_architecture ( ) ;
2009-03-03 11:44:12 +01:00
if ( arch > = CPU_ARCH_ARMv6 ) {
2013-01-30 17:38:21 +01:00
unsigned int cachetype = read_cpuid_cachetype ( ) ;
2009-03-03 11:44:12 +01:00
if ( ( cachetype & ( 7 < < 29 ) ) = = 4 < < 29 ) {
/* ARMv7 register format */
2011-08-03 12:37:04 +01:00
arch = CPU_ARCH_ARMv7 ;
2009-03-03 11:44:12 +01:00
cacheid = CACHEID_VIPT_NONALIASING ;
2011-08-23 22:22:11 +01:00
switch ( cachetype & ( 3 < < 14 ) ) {
case ( 1 < < 14 ) :
2009-03-03 11:44:12 +01:00
cacheid | = CACHEID_ASID_TAGGED ;
2011-08-23 22:22:11 +01:00
break ;
case ( 3 < < 14 ) :
cacheid | = CACHEID_PIPT ;
break ;
}
2010-09-13 16:18:30 +01:00
} else {
2011-08-03 12:37:04 +01:00
arch = CPU_ARCH_ARMv6 ;
if ( cachetype & ( 1 < < 23 ) )
cacheid = CACHEID_VIPT_ALIASING ;
else
cacheid = CACHEID_VIPT_NONALIASING ;
2010-09-13 16:18:30 +01:00
}
2011-08-03 12:37:04 +01:00
if ( cpu_has_aliasing_icache ( arch ) )
cacheid | = CACHEID_VIPT_I_ALIASING ;
2008-09-25 15:35:28 +01:00
} else {
cacheid = CACHEID_VIVT ;
}
2008-09-25 15:39:20 +01:00
printk ( " CPU: %s data cache, %s instruction cache \n " ,
cache_is_vivt ( ) ? " VIVT " :
cache_is_vipt_aliasing ( ) ? " VIPT aliasing " :
2011-08-23 22:22:11 +01:00
cache_is_vipt_nonaliasing ( ) ? " PIPT / VIPT nonaliasing " : " unknown " ,
2008-09-25 15:39:20 +01:00
cache_is_vivt ( ) ? " VIVT " :
icache_is_vivt_asid_tagged ( ) ? " VIVT ASID tagged " :
2010-09-13 16:18:30 +01:00
icache_is_vipt_aliasing ( ) ? " VIPT aliasing " :
2011-08-23 22:22:11 +01:00
icache_is_pipt ( ) ? " PIPT " :
2008-09-25 15:39:20 +01:00
cache_is_vipt_nonaliasing ( ) ? " VIPT nonaliasing " : " unknown " ) ;
2008-09-25 15:35:28 +01:00
}
2005-04-16 15:20:36 -07:00
/*
* These functions re - use the assembly code in head . S , which
* already provide the required functionality .
*/
2006-02-24 21:04:56 +00:00
extern struct proc_info_list * lookup_processor_type ( unsigned int ) ;
2011-01-12 17:50:42 +00:00
2011-04-28 14:27:21 -06:00
void __init early_print ( const char * str , . . . )
2011-01-12 17:50:42 +00:00
{
extern void printascii ( const char * ) ;
char buf [ 256 ] ;
va_list ap ;
va_start ( ap , str ) ;
vsnprintf ( buf , sizeof ( buf ) , str , ap ) ;
va_end ( ap ) ;
# ifdef CONFIG_DEBUG_LL
printascii ( buf ) ;
# endif
printk ( " %s " , buf ) ;
}
2013-03-18 19:44:15 +01:00
static void __init cpuid_init_hwcaps ( void )
{
unsigned int divide_instrs ;
if ( cpu_architecture ( ) < CPU_ARCH_ARMv7 )
return ;
divide_instrs = ( read_cpuid_ext ( CPUID_EXT_ISAR0 ) & 0x0f000000 ) > > 24 ;
switch ( divide_instrs ) {
case 2 :
elf_hwcap | = HWCAP_IDIVA ;
case 1 :
elf_hwcap | = HWCAP_IDIVT ;
}
}
2010-07-05 14:53:10 +01:00
static void __init feat_v6_fixup ( void )
{
int id = read_cpuid_id ( ) ;
if ( ( id & 0xff0f0000 ) ! = 0x41070000 )
return ;
/*
* HWCAP_TLS is available only on 1136 r1p0 and later ,
* see also kuser_get_tls_init .
*/
if ( ( ( ( id > > 4 ) & 0xfff ) = = 0xb36 ) & & ( ( ( id > > 20 ) & 3 ) = = 0 ) )
elf_hwcap & = ~ HWCAP_TLS ;
}
2005-05-31 22:22:32 +01:00
/*
* cpu_init - initialise one CPU .
*
2008-09-25 14:45:02 +01:00
* cpu_init sets up the per - CPU stacks .
2005-05-31 22:22:32 +01:00
*/
2013-04-25 14:40:22 +01:00
void notrace cpu_init ( void )
2005-05-31 22:22:32 +01:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct stack * stk = & stacks [ cpu ] ;
if ( cpu > = NR_CPUS ) {
printk ( KERN_CRIT " CPU%u: bad primary CPU number \n " , cpu ) ;
BUG ( ) ;
}
2012-11-29 20:39:54 +01:00
/*
* This only works on resume and secondary cores . For booting on the
* boot cpu , smp_prepare_boot_cpu is called after percpu area setup .
*/
set_my_cpu_offset ( per_cpu_offset ( cpu ) ) ;
2011-06-21 18:57:31 +01:00
cpu_proc_init ( ) ;
2009-07-24 12:32:54 +01:00
/*
* Define the placement constraint for the inline asm directive below .
* In Thumb - 2 , msr with an immediate value is not allowed .
*/
# ifdef CONFIG_THUMB2_KERNEL
# define PLC "r"
# else
# define PLC "I"
# endif
2005-05-31 22:22:32 +01:00
/*
* setup stacks for re - entrant exception handlers
*/
__asm__ (
" msr cpsr_c, %1 \n \t "
2009-07-24 12:32:54 +01:00
" add r14, %0, %2 \n \t "
" mov sp, r14 \n \t "
2005-05-31 22:22:32 +01:00
" msr cpsr_c, %3 \n \t "
2009-07-24 12:32:54 +01:00
" add r14, %0, %4 \n \t "
" mov sp, r14 \n \t "
2005-05-31 22:22:32 +01:00
" msr cpsr_c, %5 \n \t "
2009-07-24 12:32:54 +01:00
" add r14, %0, %6 \n \t "
" mov sp, r14 \n \t "
2005-05-31 22:22:32 +01:00
" msr cpsr_c, %7 "
:
: " r " ( stk ) ,
2009-07-24 12:32:54 +01:00
PLC ( PSR_F_BIT | PSR_I_BIT | IRQ_MODE ) ,
2005-05-31 22:22:32 +01:00
" I " ( offsetof ( struct stack , irq [ 0 ] ) ) ,
2009-07-24 12:32:54 +01:00
PLC ( PSR_F_BIT | PSR_I_BIT | ABT_MODE ) ,
2005-05-31 22:22:32 +01:00
" I " ( offsetof ( struct stack , abt [ 0 ] ) ) ,
2009-07-24 12:32:54 +01:00
PLC ( PSR_F_BIT | PSR_I_BIT | UND_MODE ) ,
2005-05-31 22:22:32 +01:00
" I " ( offsetof ( struct stack , und [ 0 ] ) ) ,
2009-07-24 12:32:54 +01:00
PLC ( PSR_F_BIT | PSR_I_BIT | SVC_MODE )
2005-06-29 15:34:39 +01:00
: " r14 " ) ;
2005-05-31 22:22:32 +01:00
}
2012-01-20 12:01:12 +01:00
int __cpu_logical_map [ NR_CPUS ] ;
void __init smp_setup_processor_id ( void )
{
int i ;
2012-11-08 18:05:56 +00:00
u32 mpidr = is_smp ( ) ? read_cpuid_mpidr ( ) & MPIDR_HWID_BITMASK : 0 ;
u32 cpu = MPIDR_AFFINITY_LEVEL ( mpidr , 0 ) ;
2012-01-20 12:01:12 +01:00
cpu_logical_map ( 0 ) = cpu ;
2012-11-08 18:05:56 +00:00
for ( i = 1 ; i < nr_cpu_ids ; + + i )
2012-01-20 12:01:12 +01:00
cpu_logical_map ( i ) = i = = cpu ? 0 : i ;
2012-11-08 18:05:56 +00:00
printk ( KERN_INFO " Booting Linux on physical CPU 0x%x \n " , mpidr ) ;
2012-01-20 12:01:12 +01:00
}
2011-06-21 18:57:31 +01:00
static void __init setup_processor ( void )
{
struct proc_info_list * list ;
/*
* locate processor in the list of supported processor
* types . The linker builds this table for us from the
* entries in arch / arm / mm / proc - * . S
*/
list = lookup_processor_type ( read_cpuid_id ( ) ) ;
if ( ! list ) {
printk ( " CPU configuration botched (ID %08x), unable "
" to continue. \n " , read_cpuid_id ( ) ) ;
while ( 1 ) ;
}
cpu_name = list - > cpu_name ;
2011-08-19 17:58:35 +01:00
__cpu_architecture = __get_cpu_architecture ( ) ;
2011-06-21 18:57:31 +01:00
# ifdef MULTI_CPU
processor = * list - > proc ;
# endif
# ifdef MULTI_TLB
cpu_tlb = * list - > tlb ;
# endif
# ifdef MULTI_USER
cpu_user = * list - > user ;
# endif
# ifdef MULTI_CACHE
cpu_cache = * list - > cache ;
# endif
printk ( " CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx \n " ,
cpu_name , read_cpuid_id ( ) , read_cpuid_id ( ) & 15 ,
proc_arch [ cpu_architecture ( ) ] , cr_alignment ) ;
2011-11-11 11:35:58 +01:00
snprintf ( init_utsname ( ) - > machine , __NEW_UTS_LEN + 1 , " %s%c " ,
list - > arch_name , ENDIANNESS ) ;
snprintf ( elf_platform , ELF_PLATFORM_SIZE , " %s%c " ,
list - > elf_name , ENDIANNESS ) ;
2011-06-21 18:57:31 +01:00
elf_hwcap = list - > elf_hwcap ;
2013-03-18 19:44:15 +01:00
cpuid_init_hwcaps ( ) ;
2011-06-21 18:57:31 +01:00
# ifndef CONFIG_ARM_THUMB
2013-03-18 19:44:14 +01:00
elf_hwcap & = ~ ( HWCAP_THUMB | HWCAP_IDIVT ) ;
2011-06-21 18:57:31 +01:00
# endif
feat_v6_fixup ( ) ;
cacheid_init ( ) ;
cpu_init ( ) ;
}
2011-04-28 14:27:21 -06:00
void __init dump_machine_table ( void )
2005-04-16 15:20:36 -07:00
{
2011-02-21 07:00:32 +01:00
struct machine_desc * p ;
2005-04-16 15:20:36 -07:00
2011-04-28 14:27:21 -06:00
early_print ( " Available machine support: \n \n ID (hex) \t NAME \n " ) ;
for_each_machine_desc ( p )
2011-02-21 07:00:32 +01:00
early_print ( " %08x \t %s \n " , p - > nr , p - > name ) ;
2005-04-16 15:20:36 -07:00
2011-02-21 07:00:32 +01:00
early_print ( " \n Please check your kernel config and/or bootloader. \n " ) ;
2005-04-16 15:20:36 -07:00
2011-02-21 07:00:32 +01:00
while ( true )
/* can't use cpu_relax() here as it may require MMU setup */ ;
2005-04-16 15:20:36 -07:00
}
2012-07-12 23:57:35 +01:00
int __init arm_add_memory ( phys_addr_t start , phys_addr_t size )
2005-06-22 21:43:10 +01:00
{
2008-10-06 13:24:40 -04:00
struct membank * bank = & meminfo . bank [ meminfo . nr_banks ] ;
if ( meminfo . nr_banks > = NR_BANKS ) {
printk ( KERN_CRIT " NR_BANKS too low, "
2011-02-15 14:31:37 +01:00
" ignoring memory at 0x%08llx \n " , ( long long ) start ) ;
2008-10-06 13:24:40 -04:00
return - EINVAL ;
}
2006-11-30 20:44:49 +00:00
2005-06-22 21:43:10 +01:00
/*
* Ensure that start / size are aligned to a page boundary .
* Size is appropriately rounded down , start is rounded up .
*/
size - = start & ~ PAGE_MASK ;
2006-11-30 20:44:49 +00:00
bank - > start = PAGE_ALIGN ( start ) ;
2012-04-12 17:15:08 +01:00
2013-04-03 12:24:45 +01:00
# ifndef CONFIG_ARM_LPAE
2012-04-12 17:15:08 +01:00
if ( bank - > start + size < bank - > start ) {
printk ( KERN_CRIT " Truncating memory at 0x%08llx to fit in "
" 32-bit physical address space \n " , ( long long ) start ) ;
/*
* To ensure bank - > start + bank - > size is representable in
* 32 bits , we use ULONG_MAX as the upper limit rather than 4 GB .
* This means we lose a page after masking .
*/
size = ULONG_MAX - bank - > start ;
}
# endif
2012-07-12 23:57:35 +01:00
bank - > size = size & ~ ( phys_addr_t ) ( PAGE_SIZE - 1 ) ;
2008-10-06 13:24:40 -04:00
/*
* Check whether this memory region has non - zero size or
* invalid node number .
*/
2010-05-07 17:40:33 +01:00
if ( bank - > size = = 0 )
2008-10-06 13:24:40 -04:00
return - EINVAL ;
meminfo . nr_banks + + ;
return 0 ;
2005-06-22 21:43:10 +01:00
}
2005-04-16 15:20:36 -07:00
/*
* Pick out the memory size . We look for mem = size @ start ,
* where start and size are " size[KkMm] "
*/
2010-01-11 23:17:34 +01:00
static int __init early_mem ( char * p )
2005-04-16 15:20:36 -07:00
{
static int usermem __initdata = 0 ;
2012-07-12 23:57:35 +01:00
phys_addr_t size ;
2011-02-15 12:44:10 +01:00
phys_addr_t start ;
2010-01-11 23:17:34 +01:00
char * endp ;
2005-04-16 15:20:36 -07:00
/*
* If the user specifies memory size , we
* blow away any automatically generated
* size .
*/
if ( usermem = = 0 ) {
usermem = 1 ;
meminfo . nr_banks = 0 ;
}
start = PHYS_OFFSET ;
2010-01-11 23:17:34 +01:00
size = memparse ( p , & endp ) ;
if ( * endp = = ' @ ' )
start = memparse ( endp + 1 , NULL ) ;
2005-04-16 15:20:36 -07:00
2006-04-20 21:41:18 +01:00
arm_add_memory ( start , size ) ;
2005-04-16 15:20:36 -07:00
2010-01-11 23:17:34 +01:00
return 0 ;
2005-04-16 15:20:36 -07:00
}
2010-01-11 23:17:34 +01:00
early_param ( " mem " , early_mem ) ;
2005-04-16 15:20:36 -07:00
2011-01-14 23:05:14 +01:00
static void __init request_standard_resources ( struct machine_desc * mdesc )
2005-04-16 15:20:36 -07:00
{
2011-01-14 23:05:14 +01:00
struct memblock_region * region ;
2005-04-16 15:20:36 -07:00
struct resource * res ;
2008-12-01 11:53:07 +00:00
kernel_code . start = virt_to_phys ( _text ) ;
kernel_code . end = virt_to_phys ( _etext - 1 ) ;
2010-10-01 14:12:22 +01:00
kernel_data . start = virt_to_phys ( _sdata ) ;
2008-12-01 11:53:07 +00:00
kernel_data . end = virt_to_phys ( _end - 1 ) ;
2005-04-16 15:20:36 -07:00
2011-01-14 23:05:14 +01:00
for_each_memblock ( memory , region ) {
2005-04-16 15:20:36 -07:00
res = alloc_bootmem_low ( sizeof ( * res ) ) ;
res - > name = " System RAM " ;
2011-01-14 23:05:14 +01:00
res - > start = __pfn_to_phys ( memblock_region_memory_base_pfn ( region ) ) ;
res - > end = __pfn_to_phys ( memblock_region_memory_end_pfn ( region ) ) - 1 ;
2005-04-16 15:20:36 -07:00
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
request_resource ( & iomem_resource , res ) ;
if ( kernel_code . start > = res - > start & &
kernel_code . end < = res - > end )
request_resource ( res , & kernel_code ) ;
if ( kernel_data . start > = res - > start & &
kernel_data . end < = res - > end )
request_resource ( res , & kernel_data ) ;
}
if ( mdesc - > video_start ) {
video_ram . start = mdesc - > video_start ;
video_ram . end = mdesc - > video_end ;
request_resource ( & iomem_resource , & video_ram ) ;
}
/*
* Some machines don ' t have the possibility of ever
* possessing lp0 , lp1 or lp2
*/
if ( mdesc - > reserve_lp0 )
request_resource ( & ioport_resource , & lp0 ) ;
if ( mdesc - > reserve_lp1 )
request_resource ( & ioport_resource , & lp1 ) ;
if ( mdesc - > reserve_lp2 )
request_resource ( & ioport_resource , & lp2 ) ;
}
# if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
. orig_video_lines = 30 ,
. orig_video_cols = 80 ,
. orig_video_mode = 0 ,
. orig_video_ega_bx = 0 ,
. orig_video_isVGA = 1 ,
. orig_video_points = 8
} ;
2011-05-04 17:07:55 +01:00
# endif
2005-04-16 15:20:36 -07:00
static int __init customize_machine ( void )
{
ARM: default machine descriptor for multiplatform
Since we now have default implementations for init_time and init_irq,
the init_machine callback is the only one that is not yet optional,
but since simple DT based platforms all have the same
of_platform_populate function call in there, we can consolidate them
as well, and then actually boot with a completely empty machine_desc.
Unofortunately we cannot just default to an empty init_machine: We
cannot call of_platform_populate before init_machine because that
does not work in case of auxdata, and we cannot call it after
init_machine either because the machine might need to run code
after adding the devices.
To take the final step, this adds support for booting without defining
any machine_desc whatsoever.
For the case that CONFIG_MULTIPLATFORM is enabled, it adds a
global machine descriptor that never matches any machine but is
used as a fallback if nothing else matches. We assume that without
CONFIG_MULTIPLATFORM, we only want to boot on the systems that the kernel
is built for, so we still retain the build-time warning for missing
machine descriptors and the run-time warning when the platform does not
match in that case.
In the case that we run on a multiplatform kernel and the machine
provides a fully populated device tree, we attempt to keep booting,
hoping that no machine specific callbacks are necessary.
Finally, this also removes the misguided "select ARCH_VEXPRESS" that
was only added to avoid a build error for allnoconfig kernels.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Cc: "Russell King - ARM Linux" <linux@arm.linux.org.uk>
Cc: Rob Herring <robherring2@gmail.com>
2013-01-31 17:51:18 +00:00
/*
* customizes platform devices , or adds new ones
* On DT based machines , we fall back to populating the
* machine from the device tree , if no callback is provided ,
* otherwise we would always need an init_machine callback .
*/
2010-12-20 10:18:36 +00:00
if ( machine_desc - > init_machine )
machine_desc - > init_machine ( ) ;
ARM: default machine descriptor for multiplatform
Since we now have default implementations for init_time and init_irq,
the init_machine callback is the only one that is not yet optional,
but since simple DT based platforms all have the same
of_platform_populate function call in there, we can consolidate them
as well, and then actually boot with a completely empty machine_desc.
Unofortunately we cannot just default to an empty init_machine: We
cannot call of_platform_populate before init_machine because that
does not work in case of auxdata, and we cannot call it after
init_machine either because the machine might need to run code
after adding the devices.
To take the final step, this adds support for booting without defining
any machine_desc whatsoever.
For the case that CONFIG_MULTIPLATFORM is enabled, it adds a
global machine descriptor that never matches any machine but is
used as a fallback if nothing else matches. We assume that without
CONFIG_MULTIPLATFORM, we only want to boot on the systems that the kernel
is built for, so we still retain the build-time warning for missing
machine descriptors and the run-time warning when the platform does not
match in that case.
In the case that we run on a multiplatform kernel and the machine
provides a fully populated device tree, we attempt to keep booting,
hoping that no machine specific callbacks are necessary.
Finally, this also removes the misguided "select ARCH_VEXPRESS" that
was only added to avoid a build error for allnoconfig kernels.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Cc: "Russell King - ARM Linux" <linux@arm.linux.org.uk>
Cc: Rob Herring <robherring2@gmail.com>
2013-01-31 17:51:18 +00:00
# ifdef CONFIG_OF
else
of_platform_populate ( NULL , of_default_bus_match_table ,
NULL , NULL ) ;
# endif
2005-04-16 15:20:36 -07:00
return 0 ;
}
arch_initcall ( customize_machine ) ;
2012-04-25 22:24:44 +08:00
static int __init init_machine_late ( void )
{
if ( machine_desc - > init_late )
machine_desc - > init_late ( ) ;
return 0 ;
}
late_initcall ( init_machine_late ) ;
2010-05-10 09:20:22 +01:00
# ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem ( void )
{
unsigned long total ;
total = max_low_pfn - min_low_pfn ;
return total < < PAGE_SHIFT ;
}
/**
* reserve_crashkernel ( ) - reserves memory are for crash kernel
*
* This function reserves memory area given in " crashkernel= " kernel command
* line parameter . The memory reserved is used by a dump capture kernel when
* primary kernel is crashing .
*/
static void __init reserve_crashkernel ( void )
{
unsigned long long crash_size , crash_base ;
unsigned long long total_mem ;
int ret ;
total_mem = get_total_mem ( ) ;
ret = parse_crashkernel ( boot_command_line , total_mem ,
& crash_size , & crash_base ) ;
if ( ret )
return ;
ret = reserve_bootmem ( crash_base , crash_size , BOOTMEM_EXCLUSIVE ) ;
if ( ret < 0 ) {
printk ( KERN_WARNING " crashkernel reservation failed - "
" memory is in use (0x%lx) \n " , ( unsigned long ) crash_base ) ;
return ;
}
printk ( KERN_INFO " Reserving %ldMB of memory at %ldMB "
" for crashkernel (System RAM: %ldMB) \n " ,
( unsigned long ) ( crash_size > > 20 ) ,
( unsigned long ) ( crash_base > > 20 ) ,
( unsigned long ) ( total_mem > > 20 ) ) ;
crashk_res . start = crash_base ;
crashk_res . end = crash_base + crash_size - 1 ;
insert_resource ( & iomem_resource , & crashk_res ) ;
}
# else
static inline void reserve_crashkernel ( void ) { }
# endif /* CONFIG_KEXEC */
2011-08-25 19:10:29 -04:00
static int __init meminfo_cmp ( const void * _a , const void * _b )
{
const struct membank * a = _a , * b = _b ;
long cmp = bank_pfn_start ( a ) - bank_pfn_start ( b ) ;
return cmp < 0 ? - 1 : cmp > 0 ? 1 : 0 ;
}
2011-04-28 14:27:21 -06:00
2012-02-17 16:54:28 +00:00
void __init hyp_mode_check ( void )
{
# ifdef CONFIG_ARM_VIRT_EXT
if ( is_hyp_mode_available ( ) ) {
pr_info ( " CPU: All CPU(s) started in HYP mode. \n " ) ;
pr_info ( " CPU: Virtualization extensions available. \n " ) ;
} else if ( is_hyp_mode_mismatched ( ) ) {
pr_warn ( " CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x) \n " ,
__boot_cpu_mode & MODE_MASK ) ;
pr_warn ( " CPU: This may indicate a broken bootloader or firmware. \n " ) ;
} else
pr_info ( " CPU: All CPU(s) started in SVC mode. \n " ) ;
# endif
}
2011-04-28 14:27:21 -06:00
void __init setup_arch ( char * * cmdline_p )
{
struct machine_desc * mdesc ;
setup_processor ( ) ;
2011-04-28 14:27:21 -06:00
mdesc = setup_machine_fdt ( __atags_pointer ) ;
if ( ! mdesc )
2012-12-12 08:32:11 +01:00
mdesc = setup_machine_tags ( __atags_pointer , __machine_arch_type ) ;
2011-04-28 14:27:21 -06:00
machine_desc = mdesc ;
machine_name = mdesc - > name ;
2011-12-29 13:09:51 +01:00
setup_dma_zone ( mdesc ) ;
2011-11-01 14:27:33 +00:00
if ( mdesc - > restart_mode )
reboot_setup ( & mdesc - > restart_mode ) ;
2011-04-28 14:27:21 -06:00
2008-12-01 11:53:07 +00:00
init_mm . start_code = ( unsigned long ) _text ;
init_mm . end_code = ( unsigned long ) _etext ;
init_mm . end_data = ( unsigned long ) _edata ;
init_mm . brk = ( unsigned long ) _end ;
2005-04-16 15:20:36 -07:00
2010-01-27 01:13:31 +01:00
/* populate cmd_line too for later use, preserving boot_command_line */
strlcpy ( cmd_line , boot_command_line , COMMAND_LINE_SIZE ) ;
* cmdline_p = cmd_line ;
2010-01-11 23:17:34 +01:00
parse_early_param ( ) ;
2011-08-25 19:10:29 -04:00
sort ( & meminfo . bank , meminfo . nr_banks , sizeof ( meminfo . bank [ 0 ] ) , meminfo_cmp , NULL ) ;
2011-07-05 19:58:29 +01:00
sanity_check_meminfo ( ) ;
2010-05-22 19:47:18 +01:00
arm_memblock_init ( & meminfo , mdesc ) ;
2010-07-09 16:27:52 +01:00
2008-10-06 13:24:40 -04:00
paging_init ( mdesc ) ;
2011-01-14 23:05:14 +01:00
request_standard_resources ( mdesc ) ;
2005-04-16 15:20:36 -07:00
2011-11-04 15:05:24 +00:00
if ( mdesc - > restart )
arm_pm_restart = mdesc - > restart ;
2011-04-28 14:27:21 -06:00
unflatten_device_tree ( ) ;
2011-12-14 16:01:24 +00:00
arm_dt_init_cpu_maps ( ) ;
2006-02-16 11:08:09 +00:00
# ifdef CONFIG_SMP
2011-09-08 09:06:10 +01:00
if ( is_smp ( ) ) {
smp_set_ops ( mdesc - > smp ) ;
2010-09-04 10:47:48 +01:00
smp_init_cpus ( ) ;
2011-09-08 09:06:10 +01:00
}
2006-02-16 11:08:09 +00:00
# endif
2012-02-17 16:54:28 +00:00
if ( ! is_smp ( ) )
hyp_mode_check ( ) ;
2010-05-10 09:20:22 +01:00
reserve_crashkernel ( ) ;
2006-02-16 11:08:09 +00:00
2010-12-13 09:42:34 +01:00
# ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc - > handle_irq ;
# endif
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_VT
# if defined(CONFIG_VGA_CONSOLE)
conswitchp = & vga_con ;
# elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = & dummy_con ;
# endif
# endif
2010-12-16 13:49:34 +00:00
if ( mdesc - > init_early )
mdesc - > init_early ( ) ;
2005-04-16 15:20:36 -07:00
}
static int __init topology_init ( void )
{
int cpu ;
2007-03-13 09:54:21 +00:00
for_each_possible_cpu ( cpu ) {
struct cpuinfo_arm * cpuinfo = & per_cpu ( cpu_data , cpu ) ;
cpuinfo - > cpu . hotpluggable = 1 ;
register_cpu ( & cpuinfo - > cpu , cpu ) ;
}
2005-04-16 15:20:36 -07:00
return 0 ;
}
subsys_initcall ( topology_init ) ;
2010-01-10 17:23:29 +00:00
# ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init ( void )
{
struct proc_dir_entry * res ;
res = proc_mkdir ( " cpu " , NULL ) ;
if ( ! res )
return - ENOMEM ;
return 0 ;
}
fs_initcall ( proc_cpu_init ) ;
# endif
2005-04-16 15:20:36 -07:00
static const char * hwcap_str [ ] = {
" swp " ,
" half " ,
" thumb " ,
" 26bit " ,
" fastmult " ,
" fpa " ,
" vfp " ,
" edsp " ,
" java " ,
2006-10-27 05:13:19 +01:00
" iwmmxt " ,
2006-12-18 00:59:10 +01:00
" crunch " ,
2008-11-06 13:23:06 +00:00
" thumbee " ,
2008-11-06 13:23:07 +00:00
" neon " ,
2009-02-11 13:13:56 +01:00
" vfpv3 " ,
" vfpv3d16 " ,
2011-06-03 14:15:22 +01:00
" tls " ,
" vfpv4 " ,
" idiva " ,
" idivt " ,
2005-04-16 15:20:36 -07:00
NULL
} ;
static int c_show ( struct seq_file * m , void * v )
{
2012-09-10 18:55:21 +01:00
int i , j ;
u32 cpuid ;
2005-04-16 15:20:36 -07:00
for_each_online_cpu ( i ) {
2005-11-06 21:41:08 +00:00
/*
* glibc reads / proc / cpuinfo to determine the number of
* online processors , looking for lines beginning with
* " processor " . Give glibc what it expects .
*/
seq_printf ( m , " processor \t : %d \n " , i ) ;
2012-09-10 18:55:21 +01:00
cpuid = is_smp ( ) ? per_cpu ( cpu_data , i ) . cpuid : read_cpuid_id ( ) ;
seq_printf ( m , " model name \t : %s rev %d (%s) \n " ,
cpu_name , cpuid & 15 , elf_platform ) ;
# if defined(CONFIG_SMP)
seq_printf ( m , " BogoMIPS \t : %lu.%02lu \n " ,
2005-04-16 15:20:36 -07:00
per_cpu ( cpu_data , i ) . loops_per_jiffy / ( 500000UL / HZ ) ,
( per_cpu ( cpu_data , i ) . loops_per_jiffy / ( 5000UL / HZ ) ) % 100 ) ;
2012-09-10 18:55:21 +01:00
# else
seq_printf ( m , " BogoMIPS \t : %lu.%02lu \n " ,
loops_per_jiffy / ( 500000 / HZ ) ,
( loops_per_jiffy / ( 5000 / HZ ) ) % 100 ) ;
2005-04-16 15:20:36 -07:00
# endif
2012-09-10 18:55:21 +01:00
/* dump out the processor features */
seq_puts ( m , " Features \t : " ) ;
2005-04-16 15:20:36 -07:00
2012-09-10 18:55:21 +01:00
for ( j = 0 ; hwcap_str [ j ] ; j + + )
if ( elf_hwcap & ( 1 < < j ) )
seq_printf ( m , " %s " , hwcap_str [ j ] ) ;
2005-04-16 15:20:36 -07:00
2012-09-10 18:55:21 +01:00
seq_printf ( m , " \n CPU implementer \t : 0x%02x \n " , cpuid > > 24 ) ;
seq_printf ( m , " CPU architecture: %s \n " ,
proc_arch [ cpu_architecture ( ) ] ) ;
2005-04-16 15:20:36 -07:00
2012-09-10 18:55:21 +01:00
if ( ( cpuid & 0x0008f000 ) = = 0x00000000 ) {
/* pre-ARM7 */
seq_printf ( m , " CPU part \t : %07x \n " , cpuid > > 4 ) ;
2005-04-16 15:20:36 -07:00
} else {
2012-09-10 18:55:21 +01:00
if ( ( cpuid & 0x0008f000 ) = = 0x00007000 ) {
/* ARM7 */
seq_printf ( m , " CPU variant \t : 0x%02x \n " ,
( cpuid > > 16 ) & 127 ) ;
} else {
/* post-ARM7 */
seq_printf ( m , " CPU variant \t : 0x%x \n " ,
( cpuid > > 20 ) & 15 ) ;
}
seq_printf ( m , " CPU part \t : 0x%03x \n " ,
( cpuid > > 4 ) & 0xfff ) ;
2005-04-16 15:20:36 -07:00
}
2012-09-10 18:55:21 +01:00
seq_printf ( m , " CPU revision \t : %d \n \n " , cpuid & 15 ) ;
2005-04-16 15:20:36 -07:00
}
seq_printf ( m , " Hardware \t : %s \n " , machine_name ) ;
seq_printf ( m , " Revision \t : %04x \n " , system_rev ) ;
seq_printf ( m , " Serial \t \t : %08x%08x \n " ,
system_serial_high , system_serial_low ) ;
return 0 ;
}
static void * c_start ( struct seq_file * m , loff_t * pos )
{
return * pos < 1 ? ( void * ) 1 : NULL ;
}
static void * c_next ( struct seq_file * m , void * v , loff_t * pos )
{
+ + * pos ;
return NULL ;
}
static void c_stop ( struct seq_file * m , void * v )
{
}
2008-01-22 20:41:07 +01:00
const struct seq_operations cpuinfo_op = {
2005-04-16 15:20:36 -07:00
. start = c_start ,
. next = c_next ,
. stop = c_stop ,
. show = c_show
} ;