6c3ac11343
Notable changes: - Enable THREAD_INFO_IN_TASK to move thread_info off the stack. - A big series from Christoph reworking our DMA code to use more of the generic infrastructure, as he said: "This series switches the powerpc port to use the generic swiotlb and noncoherent dma ops, and to use more generic code for the coherent direct mapping, as well as removing a lot of dead code." - Increase our vmalloc space to 512T with the Hash MMU on modern CPUs, allowing us to support machines with larger amounts of total RAM or distance between nodes. - Two series from Christophe, one to optimise TLB miss handlers on 6xx, and another to optimise the way STRICT_KERNEL_RWX is implemented on some 32-bit CPUs. - Support for KCOV coverage instrumentation which means we can run syzkaller and discover even more bugs in our code. And as always many clean-ups, reworks and minor fixes etc. Thanks to: Alan Modra, Alexey Kardashevskiy, Alistair Popple, Andrea Arcangeli, Andrew Donnellan, Aneesh Kumar K.V, Aravinda Prasad, Balbir Singh, Brajeswar Ghosh, Breno Leitao, Christian Lamparter, Christian Zigotzky, Christophe Leroy, Christoph Hellwig, Corentin Labbe, Daniel Axtens, David Gibson, Diana Craciun, Firoz Khan, Gustavo A. R. Silva, Igor Stoppa, Joe Lawrence, Joel Stanley, Jonathan Neuschäfer, Jordan Niethe, Laurent Dufour, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Masahiro Yamada, Mathieu Malaterre, Matteo Croce, Meelis Roos, Michael W. Bringmann, Nathan Chancellor, Nathan Fontenot, Nicholas Piggin, Nick Desaulniers, Nicolai Stange, Oliver O'Halloran, Paul Mackerras, Peter Xu, PrasannaKumar Muralidharan, Qian Cai, Rashmica Gupta, Reza Arbab, Robert P. J. Day, Russell Currey, Sabyasachi Gupta, Sam Bobroff, Sandipan Das, Sergey Senozhatsky, Souptick Joarder, Stewart Smith, Tyrel Datwyler, Vaibhav Jain, YueHaibing. -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcgRJlAAoJEFHr6jzI4aWAL9oP+gPlrZgyaAg/51lmubLtlbtk QuGU8EiuJZoJD1OHrMPtppBOY7rQZOxJe58AoPig8wTvs+j/TxJ25fmiZncnf5U2 PC8QAjbj0UmQHgy+K30sUeOnDg9tdkHKHJ5/ecjJcvykkqsjyMnV7biFQ1cOA0HT LflXHEEtiG9P9u7jZoAhtnfpgn1/l9mhTYMe26J1fqvC0164qMDFaXDTQXyDfyvG gmuqccGMawSk7IdagmQxwXtwyfwOnarmGn+n31XKRejApGZ/pjiEA23JOJOaJcia m76Jy3roao6sEtCUNpBFXEtwOy9POy3OiGy6yg/9896tDMvG84OuO6ltV1nFGawL PmwE+ug63L4g/HWxZyAeb26T2oTTp/YIaKQPtsq4d286pvg/qr2KPNzFoAEhmJqU yLrebv276pVeiLpLmCLPvcPj9t76vWKZaUm0FoE+zUDg7Rl7Alow8A/c4tdjOI6y QwpbCiYseyiJ32lCZZdbN7Cy6+iM6vb3i1oNKc8MVqhBGTwLJnTU0ruPBSvCaRvD NoQWO1RWpNu/BuivuLEKS9q3AoxenGwiqowxGhdVmI3Oc9jGWcEYlduR00VDYPVp /RCfwtTY5NyC++h5cnbz8aLJ1hBXG5m79CXfprV+zPWeiLPCaMT6w9Y5QUS2wqA+ EZ734NknDJOjaHc4cGdZ =Z9bb -----END PGP SIGNATURE----- Merge tag 'powerpc-5.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux Pull powerpc updates from Michael Ellerman: "Notable changes: - Enable THREAD_INFO_IN_TASK to move thread_info off the stack. - A big series from Christoph reworking our DMA code to use more of the generic infrastructure, as he said: "This series switches the powerpc port to use the generic swiotlb and noncoherent dma ops, and to use more generic code for the coherent direct mapping, as well as removing a lot of dead code." - Increase our vmalloc space to 512T with the Hash MMU on modern CPUs, allowing us to support machines with larger amounts of total RAM or distance between nodes. - Two series from Christophe, one to optimise TLB miss handlers on 6xx, and another to optimise the way STRICT_KERNEL_RWX is implemented on some 32-bit CPUs. - Support for KCOV coverage instrumentation which means we can run syzkaller and discover even more bugs in our code. And as always many clean-ups, reworks and minor fixes etc. Thanks to: Alan Modra, Alexey Kardashevskiy, Alistair Popple, Andrea Arcangeli, Andrew Donnellan, Aneesh Kumar K.V, Aravinda Prasad, Balbir Singh, Brajeswar Ghosh, Breno Leitao, Christian Lamparter, Christian Zigotzky, Christophe Leroy, Christoph Hellwig, Corentin Labbe, Daniel Axtens, David Gibson, Diana Craciun, Firoz Khan, Gustavo A. R. Silva, Igor Stoppa, Joe Lawrence, Joel Stanley, Jonathan Neuschäfer, Jordan Niethe, Laurent Dufour, Madhavan Srinivasan, Mahesh Salgaonkar, Mark Cave-Ayland, Masahiro Yamada, Mathieu Malaterre, Matteo Croce, Meelis Roos, Michael W. Bringmann, Nathan Chancellor, Nathan Fontenot, Nicholas Piggin, Nick Desaulniers, Nicolai Stange, Oliver O'Halloran, Paul Mackerras, Peter Xu, PrasannaKumar Muralidharan, Qian Cai, Rashmica Gupta, Reza Arbab, Robert P. J. Day, Russell Currey, Sabyasachi Gupta, Sam Bobroff, Sandipan Das, Sergey Senozhatsky, Souptick Joarder, Stewart Smith, Tyrel Datwyler, Vaibhav Jain, YueHaibing" * tag 'powerpc-5.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (200 commits) powerpc/32: Clear on-stack exception marker upon exception return powerpc: Remove export of save_stack_trace_tsk_reliable() powerpc/mm: fix "section_base" set but not used powerpc/mm: Fix "sz" set but not used warning powerpc/mm: Check secondary hash page table powerpc: remove nargs from __SYSCALL powerpc/64s: Fix unrelocated interrupt trampoline address test powerpc/powernv/ioda: Fix locked_vm counting for memory used by IOMMU tables powerpc/fsl: Fix the flush of branch predictor. powerpc/powernv: Make opal log only readable by root powerpc/xmon: Fix opcode being uninitialized in print_insn_powerpc powerpc/powernv: move OPAL call wrapper tracing and interrupt handling to C powerpc/64s: Fix data interrupts vs d-side MCE reentrancy powerpc/64s: Prepare to handle data interrupts vs d-side MCE reentrancy powerpc/64s: system reset interrupt preserve HSRRs powerpc/64s: Fix HV NMI vs HV interrupt recoverability test powerpc/mm/hash: Handle mmap_min_addr correctly in get_unmapped_area topdown search powerpc/hugetlb: Handle mmap_min_addr correctly in get_unmapped_area callback selftests/powerpc: Remove duplicate header powerpc sstep: Add support for modsd, modud instructions ...
239 lines
5.7 KiB
C
239 lines
5.7 KiB
C
/*
|
|
* Common prep/pmac/chrp boot and setup code.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/string.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/reboot.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/initrd.h>
|
|
#include <linux/tty.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/root_dev.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/console.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/export.h>
|
|
#include <linux/nvram.h>
|
|
|
|
#include <asm/io.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/elf.h>
|
|
#include <asm/cputable.h>
|
|
#include <asm/bootx.h>
|
|
#include <asm/btext.h>
|
|
#include <asm/machdep.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/pmac_feature.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/nvram.h>
|
|
#include <asm/xmon.h>
|
|
#include <asm/time.h>
|
|
#include <asm/serial.h>
|
|
#include <asm/udbg.h>
|
|
#include <asm/code-patching.h>
|
|
#include <asm/cpu_has_feature.h>
|
|
#include <asm/asm-prototypes.h>
|
|
#include <asm/kdump.h>
|
|
#include <asm/feature-fixups.h>
|
|
|
|
#include "setup.h"
|
|
|
|
#define DBG(fmt...)
|
|
|
|
extern void bootx_init(unsigned long r4, unsigned long phys);
|
|
|
|
int boot_cpuid_phys;
|
|
EXPORT_SYMBOL_GPL(boot_cpuid_phys);
|
|
|
|
int smp_hw_index[NR_CPUS];
|
|
EXPORT_SYMBOL(smp_hw_index);
|
|
|
|
unsigned long ISA_DMA_THRESHOLD;
|
|
unsigned int DMA_MODE_READ;
|
|
unsigned int DMA_MODE_WRITE;
|
|
|
|
EXPORT_SYMBOL(DMA_MODE_READ);
|
|
EXPORT_SYMBOL(DMA_MODE_WRITE);
|
|
|
|
/*
|
|
* We're called here very early in the boot.
|
|
*
|
|
* Note that the kernel may be running at an address which is different
|
|
* from the address that it was linked at, so we must use RELOC/PTRRELOC
|
|
* to access static data (including strings). -- paulus
|
|
*/
|
|
notrace unsigned long __init early_init(unsigned long dt_ptr)
|
|
{
|
|
unsigned long offset = reloc_offset();
|
|
|
|
/* First zero the BSS -- use memset_io, some platforms don't have
|
|
* caches on yet */
|
|
memset_io((void __iomem *)PTRRELOC(&__bss_start), 0,
|
|
__bss_stop - __bss_start);
|
|
|
|
/*
|
|
* Identify the CPU type and fix up code sections
|
|
* that depend on which cpu we have.
|
|
*/
|
|
identify_cpu(offset, mfspr(SPRN_PVR));
|
|
|
|
apply_feature_fixups();
|
|
|
|
return KERNELBASE + offset;
|
|
}
|
|
|
|
|
|
/*
|
|
* This is run before start_kernel(), the kernel has been relocated
|
|
* and we are running with enough of the MMU enabled to have our
|
|
* proper kernel virtual addresses
|
|
*
|
|
* We do the initial parsing of the flat device-tree and prepares
|
|
* for the MMU to be fully initialized.
|
|
*/
|
|
notrace void __init machine_init(u64 dt_ptr)
|
|
{
|
|
unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache);
|
|
unsigned long insn;
|
|
|
|
/* Configure static keys first, now that we're relocated. */
|
|
setup_feature_keys();
|
|
|
|
/* Enable early debugging if any specified (see udbg.h) */
|
|
udbg_early_init();
|
|
|
|
patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
|
|
|
|
insn = create_cond_branch(addr, branch_target(addr), 0x820000);
|
|
patch_instruction(addr, insn); /* replace b by bne cr0 */
|
|
|
|
/* Do some early initialization based on the flat device tree */
|
|
early_init_devtree(__va(dt_ptr));
|
|
|
|
early_init_mmu();
|
|
|
|
setup_kdump_trampoline();
|
|
}
|
|
|
|
/* Checks "l2cr=xxxx" command-line option */
|
|
static int __init ppc_setup_l2cr(char *str)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_L2CR)) {
|
|
unsigned long val = simple_strtoul(str, NULL, 0);
|
|
printk(KERN_INFO "l2cr set to %lx\n", val);
|
|
_set_L2CR(0); /* force invalidate by disable cache */
|
|
_set_L2CR(val); /* and enable it */
|
|
}
|
|
return 1;
|
|
}
|
|
__setup("l2cr=", ppc_setup_l2cr);
|
|
|
|
/* Checks "l3cr=xxxx" command-line option */
|
|
static int __init ppc_setup_l3cr(char *str)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_L3CR)) {
|
|
unsigned long val = simple_strtoul(str, NULL, 0);
|
|
printk(KERN_INFO "l3cr set to %lx\n", val);
|
|
_set_L3CR(val); /* and enable it */
|
|
}
|
|
return 1;
|
|
}
|
|
__setup("l3cr=", ppc_setup_l3cr);
|
|
|
|
static int __init ppc_init(void)
|
|
{
|
|
/* clear the progress line */
|
|
if (ppc_md.progress)
|
|
ppc_md.progress(" ", 0xffff);
|
|
|
|
/* call platform init */
|
|
if (ppc_md.init != NULL) {
|
|
ppc_md.init();
|
|
}
|
|
return 0;
|
|
}
|
|
arch_initcall(ppc_init);
|
|
|
|
static void *__init alloc_stack(void)
|
|
{
|
|
void *ptr = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
|
|
|
if (!ptr)
|
|
panic("cannot allocate %d bytes for stack at %pS\n",
|
|
THREAD_SIZE, (void *)_RET_IP_);
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void __init irqstack_early_init(void)
|
|
{
|
|
unsigned int i;
|
|
|
|
/* interrupt stacks must be in lowmem, we get that for free on ppc32
|
|
* as the memblock is limited to lowmem by default */
|
|
for_each_possible_cpu(i) {
|
|
softirq_ctx[i] = alloc_stack();
|
|
hardirq_ctx[i] = alloc_stack();
|
|
}
|
|
}
|
|
|
|
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
|
|
void __init exc_lvl_early_init(void)
|
|
{
|
|
unsigned int i, hw_cpu;
|
|
|
|
/* interrupt stacks must be in lowmem, we get that for free on ppc32
|
|
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
|
|
for_each_possible_cpu(i) {
|
|
#ifdef CONFIG_SMP
|
|
hw_cpu = get_hard_smp_processor_id(i);
|
|
#else
|
|
hw_cpu = 0;
|
|
#endif
|
|
|
|
critirq_ctx[hw_cpu] = alloc_stack();
|
|
#ifdef CONFIG_BOOKE
|
|
dbgirq_ctx[hw_cpu] = alloc_stack();
|
|
mcheckirq_ctx[hw_cpu] = alloc_stack();
|
|
#endif
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void __init setup_power_save(void)
|
|
{
|
|
#ifdef CONFIG_PPC_BOOK3S_32
|
|
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
|
|
cpu_has_feature(CPU_FTR_CAN_NAP))
|
|
ppc_md.power_save = ppc6xx_idle;
|
|
#endif
|
|
|
|
#ifdef CONFIG_E500
|
|
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
|
|
cpu_has_feature(CPU_FTR_CAN_NAP))
|
|
ppc_md.power_save = e500_idle;
|
|
#endif
|
|
}
|
|
|
|
__init void initialize_cache_info(void)
|
|
{
|
|
/*
|
|
* Set cache line size based on type of cpu as a default.
|
|
* Systems with OF can look in the properties on the cpu node(s)
|
|
* for a possibly more accurate value.
|
|
*/
|
|
dcache_bsize = cur_cpu_spec->dcache_bsize;
|
|
icache_bsize = cur_cpu_spec->icache_bsize;
|
|
ucache_bsize = 0;
|
|
if (cpu_has_feature(CPU_FTR_UNIFIED_ID_CACHE))
|
|
ucache_bsize = icache_bsize = dcache_bsize;
|
|
}
|