Merge branches 'fixes' and 'misc'; commit 'kuser^{/add CPU_THUMB_CAPABLE to indicate}' into for-linus
This commit is contained in:
commit
17a870bea3
@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
|
||||
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
|
||||
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
|
||||
80211.xml sh.xml regulator.xml w1.xml \
|
||||
sh.xml regulator.xml w1.xml \
|
||||
writing_musb_glue_layer.xml iio.xml
|
||||
|
||||
ifeq ($(DOCBOOKS),)
|
||||
|
@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
|
||||
#else
|
||||
const u16 *a = (const u16 *)addr1;
|
||||
const u16 *b = (const u16 *)addr2;
|
||||
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
|
||||
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Roaring Lionus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -2,6 +2,7 @@ config ARM
|
||||
bool
|
||||
default y
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
@ -34,8 +34,7 @@ config PROCESSOR_ID
|
||||
used instead of the auto-probing which utilizes the register.
|
||||
|
||||
config REMAP_VECTORS_TO_RAM
|
||||
bool 'Install vectors to the beginning of RAM' if DRAM_BASE
|
||||
depends on DRAM_BASE
|
||||
bool 'Install vectors to the beginning of RAM'
|
||||
help
|
||||
The kernel needs to change the hardware exception vectors.
|
||||
In nommu mode, the hardware exception vectors are normally
|
||||
|
@ -32,6 +32,7 @@ extern void error(char *);
|
||||
|
||||
/* Not needed, but used in some headers pulled in by decompressors */
|
||||
extern char * strstr(const char * s1, const char *s2);
|
||||
extern size_t strlen(const char *s);
|
||||
|
||||
#ifdef CONFIG_KERNEL_GZIP
|
||||
#include "../../../../lib/decompress_inflate.c"
|
||||
|
@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
|
||||
|
||||
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
|
||||
{
|
||||
unsigned long val = ptr ? virt_to_phys(ptr) : 0;
|
||||
unsigned long val = ptr ? __pa_symbol(ptr) : 0;
|
||||
mcpm_entry_vectors[cluster][cpu] = val;
|
||||
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
|
||||
}
|
||||
@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void)
|
||||
* the kernel as if the power_up method just had deasserted reset
|
||||
* on the CPU.
|
||||
*/
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
||||
phys_reset(virt_to_phys(mcpm_entry_point));
|
||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
||||
|
||||
/* should never get here */
|
||||
BUG();
|
||||
@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg)
|
||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
|
||||
__mcpm_cpu_down(cpu, cluster);
|
||||
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
||||
phys_reset(virt_to_phys(mcpm_entry_point));
|
||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
||||
BUG();
|
||||
}
|
||||
|
||||
@ -449,7 +449,7 @@ int __init mcpm_sync_init(
|
||||
sync_cache_w(&mcpm_sync);
|
||||
|
||||
if (power_up_setup) {
|
||||
mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
|
||||
mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
|
||||
sync_cache_w(&mcpm_power_up_setup_phys);
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#ifndef __CACHE_UNIPHIER_H
|
||||
#define __CACHE_UNIPHIER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#ifdef CONFIG_CACHE_UNIPHIER
|
||||
int uniphier_cache_init(void);
|
||||
|
@ -83,8 +83,15 @@
|
||||
#define IOREMAP_MAX_ORDER 24
|
||||
#endif
|
||||
|
||||
#define VECTORS_BASE UL(0xffff0000)
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern unsigned long vectors_base;
|
||||
#define VECTORS_BASE vectors_base
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The limitation of user task size can grow up to the end of free ram region.
|
||||
* It is difficult to define and perhaps will never meet the original meaning
|
||||
@ -111,6 +118,13 @@
|
||||
|
||||
#endif /* !CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
#define KERNEL_START _sdata
|
||||
#else
|
||||
#define KERNEL_START _stext
|
||||
#endif
|
||||
#define KERNEL_END _end
|
||||
|
||||
/*
|
||||
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these
|
||||
* locations
|
||||
@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end;
|
||||
: "r" (x), "I" (__PV_BITS_31_24) \
|
||||
: "cc")
|
||||
|
||||
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
||||
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
|
||||
{
|
||||
phys_addr_t t;
|
||||
|
||||
@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
#define PHYS_OFFSET PLAT_PHYS_OFFSET
|
||||
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
|
||||
|
||||
static inline phys_addr_t __virt_to_phys(unsigned long x)
|
||||
static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
|
||||
{
|
||||
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
|
||||
}
|
||||
@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
|
||||
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
|
||||
PHYS_PFN_OFFSET)
|
||||
|
||||
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
|
||||
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
extern phys_addr_t __virt_to_phys(unsigned long x);
|
||||
extern phys_addr_t __phys_addr_symbol(unsigned long x);
|
||||
#else
|
||||
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
|
||||
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are *only* valid on the kernel direct mapped RAM memory.
|
||||
* Note: Drivers should NOT use these. They are the wrong
|
||||
@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x)
|
||||
* Drivers should NOT use these either.
|
||||
*/
|
||||
#define __pa(x) __virt_to_phys((unsigned long)(x))
|
||||
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
|
||||
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
|
||||
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
|
||||
|
||||
|
@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t;
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
#define pgprot_noncached(prot) __pgprot(0)
|
||||
#define pgprot_writecombine(prot) __pgprot(0)
|
||||
#define pgprot_dmacoherent(prot) __pgprot(0)
|
||||
#define pgprot_noncached(prot) (prot)
|
||||
#define pgprot_writecombine(prot) (prot)
|
||||
#define pgprot_dmacoherent(prot) (prot)
|
||||
|
||||
|
||||
/*
|
||||
|
@ -151,11 +151,6 @@ __after_proc_init:
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_ICACHE_DISABLE
|
||||
bic r0, r0, #CR_I
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HIGH_VECTOR
|
||||
orr r0, r0, #CR_V
|
||||
#else
|
||||
bic r0, r0, #CR_V
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
#elif defined (CONFIG_CPU_V7M)
|
||||
|
@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
break;
|
||||
|
||||
case R_ARM_PREL31:
|
||||
offset = *(u32 *)loc + sym->st_value - loc;
|
||||
*(u32 *)loc = offset & 0x7fffffff;
|
||||
offset = (*(s32 *)loc << 1) >> 1; /* sign extend */
|
||||
offset += sym->st_value - loc;
|
||||
if (offset >= 0x40000000 || offset < -0x40000000) {
|
||||
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
|
||||
module->name, relindex, i, symname,
|
||||
ELF32_R_TYPE(rel->r_info), loc,
|
||||
sym->st_value);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
*(u32 *)loc &= 0x80000000;
|
||||
*(u32 *)loc |= offset & 0x7fffffff;
|
||||
break;
|
||||
|
||||
case R_ARM_MOVW_ABS_NC:
|
||||
|
@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
|
||||
extern void init_default_cache_policy(unsigned long);
|
||||
extern void paging_init(const struct machine_desc *desc);
|
||||
extern void early_paging_init(const struct machine_desc *);
|
||||
extern void sanity_check_meminfo(void);
|
||||
extern void adjust_lowmem_bounds(void);
|
||||
extern enum reboot_mode reboot_mode;
|
||||
extern void setup_dma_zone(const struct machine_desc *desc);
|
||||
|
||||
@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
|
||||
setup_dma_zone(mdesc);
|
||||
xen_early_init();
|
||||
efi_init();
|
||||
sanity_check_meminfo();
|
||||
/*
|
||||
* Make sure the calculation for lowmem/highmem is set appropriately
|
||||
* before reserving/allocating any mmeory
|
||||
*/
|
||||
adjust_lowmem_bounds();
|
||||
arm_memblock_init(mdesc);
|
||||
/* Memory may have been removed so recalculate the bounds. */
|
||||
adjust_lowmem_bounds();
|
||||
|
||||
early_ioremap_reset();
|
||||
|
||||
|
@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu)
|
||||
pr_err("CPU%u: cpu didn't die\n", cpu);
|
||||
return;
|
||||
}
|
||||
pr_notice("CPU%u: shutdown\n", cpu);
|
||||
pr_debug("CPU%u: shutdown\n", cpu);
|
||||
|
||||
/*
|
||||
* platform_cpu_kill() is generally expected to do the powering off
|
||||
|
@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
phys_addr_t addr;
|
||||
|
||||
addr = virt_to_phys(secondary_startup);
|
||||
addr = __pa_symbol(secondary_startup);
|
||||
|
||||
if (addr > (phys_addr_t)(uint32_t)(-1)) {
|
||||
pr_err("FAIL: resume address over 32bit (%pa)", &addr);
|
||||
|
@ -25,7 +25,7 @@
|
||||
static void write_release_addr(u32 release_phys)
|
||||
{
|
||||
u32 *virt = (u32 *) phys_to_virt(release_phys);
|
||||
writel_relaxed(virt_to_phys(secondary_startup), virt);
|
||||
writel_relaxed(__pa_symbol(secondary_startup), virt);
|
||||
/* Make sure this store is visible to other CPUs */
|
||||
smp_wmb();
|
||||
__cpuc_flush_dcache_area(virt, sizeof(u32));
|
||||
|
@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu,
|
||||
}
|
||||
|
||||
/* Write the secondary init routine to the BootLUT reset vector */
|
||||
val = virt_to_phys(secondary_startup);
|
||||
val = __pa_symbol(secondary_startup);
|
||||
writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT);
|
||||
|
||||
/* Power up the core, will jump straight to its reset vector when we
|
||||
|
@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
|
||||
* Set the reset vector to point to the secondary_startup
|
||||
* routine
|
||||
*/
|
||||
cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup));
|
||||
cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
|
||||
|
||||
/* Unhalt the cpu */
|
||||
cpu_rst_cfg_set(cpu, 0);
|
||||
|
@ -116,7 +116,7 @@ static int nsp_write_lut(unsigned int cpu)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
secondary_startup_phy = virt_to_phys(secondary_startup);
|
||||
secondary_startup_phy = __pa_symbol(secondary_startup);
|
||||
BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX);
|
||||
|
||||
writel_relaxed(secondary_startup_phy, sku_rom_lut);
|
||||
@ -189,7 +189,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
* Secondary cores will start in secondary_startup(),
|
||||
* defined in "arch/arm/kernel/head.S"
|
||||
*/
|
||||
boot_func = virt_to_phys(secondary_startup);
|
||||
boot_func = __pa_symbol(secondary_startup);
|
||||
BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK);
|
||||
BUG_ON(boot_func > (phys_addr_t)U32_MAX);
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/smp_scu.h>
|
||||
|
||||
@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
|
||||
if (!cpu_ctrl)
|
||||
goto unmap_scu;
|
||||
|
||||
vectors_base = ioremap(CONFIG_VECTORS_BASE, SZ_32K);
|
||||
vectors_base = ioremap(VECTORS_BASE, SZ_32K);
|
||||
if (!vectors_base)
|
||||
goto unmap_scu;
|
||||
|
||||
@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
|
||||
* Write the secondary startup address into the SW reset address
|
||||
* vector. This is used by boot_inst.
|
||||
*/
|
||||
writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR);
|
||||
writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR);
|
||||
|
||||
iounmap(vectors_base);
|
||||
unmap_scu:
|
||||
|
@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode)
|
||||
case FW_DO_IDLE_AFTR:
|
||||
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
|
||||
exynos_save_cp15();
|
||||
writel_relaxed(virt_to_phys(exynos_cpu_resume_ns),
|
||||
writel_relaxed(__pa_symbol(exynos_cpu_resume_ns),
|
||||
sysram_ns_base_addr + 0x24);
|
||||
writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
|
||||
if (soc_is_exynos3250()) {
|
||||
@ -135,7 +135,7 @@ static int exynos_suspend(void)
|
||||
exynos_save_cp15();
|
||||
|
||||
writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG);
|
||||
writel(virt_to_phys(exynos_cpu_resume_ns),
|
||||
writel(__pa_symbol(exynos_cpu_resume_ns),
|
||||
sysram_ns_base_addr + EXYNOS_BOOT_ADDR);
|
||||
|
||||
return cpu_suspend(0, exynos_cpu_suspend);
|
||||
|
@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void)
|
||||
*/
|
||||
__raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */
|
||||
__raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */
|
||||
__raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
|
||||
__raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8);
|
||||
}
|
||||
|
||||
static struct syscore_ops exynos_mcpm_syscore_ops = {
|
||||
|
@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
smp_rmb();
|
||||
|
||||
boot_addr = virt_to_phys(exynos4_secondary_startup);
|
||||
boot_addr = __pa_symbol(exynos4_secondary_startup);
|
||||
|
||||
ret = exynos_set_boot_addr(core_id, boot_addr);
|
||||
if (ret)
|
||||
@ -443,7 +443,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
mpidr = cpu_logical_map(i);
|
||||
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
boot_addr = virt_to_phys(exynos4_secondary_startup);
|
||||
boot_addr = __pa_symbol(exynos4_secondary_startup);
|
||||
|
||||
ret = exynos_set_boot_addr(core_id, boot_addr);
|
||||
if (ret)
|
||||
|
@ -132,7 +132,7 @@ static void exynos_set_wakeupmask(long mask)
|
||||
|
||||
static void exynos_cpu_set_boot_vector(long flags)
|
||||
{
|
||||
writel_relaxed(virt_to_phys(exynos_cpu_resume),
|
||||
writel_relaxed(__pa_symbol(exynos_cpu_resume),
|
||||
exynos_boot_vector_addr());
|
||||
writel_relaxed(flags, exynos_boot_vector_flag());
|
||||
}
|
||||
@ -238,7 +238,7 @@ static int exynos_cpu0_enter_aftr(void)
|
||||
|
||||
abort:
|
||||
if (cpu_online(1)) {
|
||||
unsigned long boot_addr = virt_to_phys(exynos_cpu_resume);
|
||||
unsigned long boot_addr = __pa_symbol(exynos_cpu_resume);
|
||||
|
||||
/*
|
||||
* Set the boot vector to something non-zero
|
||||
@ -330,7 +330,7 @@ cpu1_aborted:
|
||||
|
||||
static void exynos_pre_enter_aftr(void)
|
||||
{
|
||||
unsigned long boot_addr = virt_to_phys(exynos_cpu_resume);
|
||||
unsigned long boot_addr = __pa_symbol(exynos_cpu_resume);
|
||||
|
||||
(void)exynos_set_boot_addr(1, boot_addr);
|
||||
}
|
||||
|
@ -344,7 +344,7 @@ static void exynos_pm_prepare(void)
|
||||
exynos_pm_enter_sleep_mode();
|
||||
|
||||
/* ensure at least INFORM0 has the resume address */
|
||||
pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0);
|
||||
pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
|
||||
}
|
||||
|
||||
static void exynos3250_pm_prepare(void)
|
||||
@ -361,7 +361,7 @@ static void exynos3250_pm_prepare(void)
|
||||
exynos_pm_enter_sleep_mode();
|
||||
|
||||
/* ensure at least INFORM0 has the resume address */
|
||||
pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0);
|
||||
pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
|
||||
}
|
||||
|
||||
static void exynos5420_pm_prepare(void)
|
||||
@ -386,7 +386,7 @@ static void exynos5420_pm_prepare(void)
|
||||
|
||||
/* ensure at least INFORM0 has the resume address */
|
||||
if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM))
|
||||
pmu_raw_writel(virt_to_phys(mcpm_entry_point), S5P_INFORM0);
|
||||
pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0);
|
||||
|
||||
tmp = pmu_raw_readl(EXYNOS5_ARM_L2_OPTION);
|
||||
tmp &= ~EXYNOS5_USE_RETENTION;
|
||||
|
@ -327,7 +327,7 @@ static int __init hip04_smp_init(void)
|
||||
*/
|
||||
writel_relaxed(hip04_boot_method[0], relocation);
|
||||
writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */
|
||||
writel_relaxed(virt_to_phys(secondary_startup), relocation + 8);
|
||||
writel_relaxed(__pa_symbol(secondary_startup), relocation + 8);
|
||||
writel_relaxed(0, relocation + 12);
|
||||
iounmap(relocation);
|
||||
|
||||
|
@ -28,7 +28,7 @@ void hi3xxx_set_cpu_jump(int cpu, void *jump_addr)
|
||||
cpu = cpu_logical_map(cpu);
|
||||
if (!cpu || !ctrl_base)
|
||||
return;
|
||||
writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2));
|
||||
writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2));
|
||||
}
|
||||
|
||||
int hi3xxx_get_cpu_jump(int cpu)
|
||||
@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
phys_addr_t jumpaddr;
|
||||
|
||||
jumpaddr = virt_to_phys(secondary_startup);
|
||||
jumpaddr = __pa_symbol(secondary_startup);
|
||||
hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
|
||||
hix5hd2_set_cpu(cpu, true);
|
||||
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
|
||||
@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
struct device_node *node;
|
||||
|
||||
|
||||
jumpaddr = virt_to_phys(secondary_startup);
|
||||
jumpaddr = __pa_symbol(secondary_startup);
|
||||
hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
|
||||
|
||||
node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
|
||||
|
@ -117,7 +117,7 @@ static void __init ls1021a_smp_prepare_cpus(unsigned int max_cpus)
|
||||
dcfg_base = of_iomap(np, 0);
|
||||
BUG_ON(!dcfg_base);
|
||||
|
||||
paddr = virt_to_phys(secondary_startup);
|
||||
paddr = __pa_symbol(secondary_startup);
|
||||
writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1);
|
||||
|
||||
iounmap(dcfg_base);
|
||||
|
@ -499,7 +499,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
|
||||
memset(suspend_ocram_base, 0, sizeof(*pm_info));
|
||||
pm_info = suspend_ocram_base;
|
||||
pm_info->pbase = ocram_pbase;
|
||||
pm_info->resume_addr = virt_to_phys(v7_cpu_resume);
|
||||
pm_info->resume_addr = __pa_symbol(v7_cpu_resume);
|
||||
pm_info->pm_info_size = sizeof(*pm_info);
|
||||
|
||||
/*
|
||||
|
@ -99,7 +99,7 @@ void imx_enable_cpu(int cpu, bool enable)
|
||||
void imx_set_cpu_jump(int cpu, void *jump_addr)
|
||||
{
|
||||
cpu = cpu_logical_map(cpu);
|
||||
writel_relaxed(virt_to_phys(jump_addr),
|
||||
writel_relaxed(__pa_symbol(jump_addr),
|
||||
src_base + SRC_GPR1 + cpu * 8);
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ static void __init __mtk_smp_prepare_cpus(unsigned int max_cpus, int trustzone)
|
||||
* write the address of slave startup address into the system-wide
|
||||
* jump register
|
||||
*/
|
||||
writel_relaxed(virt_to_phys(secondary_startup_arm),
|
||||
writel_relaxed(__pa_symbol(secondary_startup_arm),
|
||||
mtk_smp_base + mtk_smp_info->jump_reg);
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ static void mvebu_pm_store_armadaxp_bootinfo(u32 *store_addr)
|
||||
{
|
||||
phys_addr_t resume_pc;
|
||||
|
||||
resume_pc = virt_to_phys(armada_370_xp_cpu_resume);
|
||||
resume_pc = __pa_symbol(armada_370_xp_cpu_resume);
|
||||
|
||||
/*
|
||||
* The bootloader expects the first two words to be a magic
|
||||
|
@ -112,7 +112,7 @@ static const struct of_device_id of_pmsu_table[] = {
|
||||
|
||||
void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
|
||||
{
|
||||
writel(virt_to_phys(boot_addr), pmsu_mp_base +
|
||||
writel(__pa_symbol(boot_addr), pmsu_mp_base +
|
||||
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ void mvebu_system_controller_set_cpu_boot_addr(void *boot_addr)
|
||||
if (of_machine_is_compatible("marvell,armada375"))
|
||||
mvebu_armada375_smp_wa_init();
|
||||
|
||||
writel(virt_to_phys(boot_addr), system_controller_base +
|
||||
writel(__pa_symbol(boot_addr), system_controller_base +
|
||||
mvebu_sc->resume_boot_addr);
|
||||
}
|
||||
#endif
|
||||
|
@ -315,15 +315,15 @@ void omap3_save_scratchpad_contents(void)
|
||||
scratchpad_contents.boot_config_ptr = 0x0;
|
||||
if (cpu_is_omap3630())
|
||||
scratchpad_contents.public_restore_ptr =
|
||||
virt_to_phys(omap3_restore_3630);
|
||||
__pa_symbol(omap3_restore_3630);
|
||||
else if (omap_rev() != OMAP3430_REV_ES3_0 &&
|
||||
omap_rev() != OMAP3430_REV_ES3_1 &&
|
||||
omap_rev() != OMAP3430_REV_ES3_1_2)
|
||||
scratchpad_contents.public_restore_ptr =
|
||||
virt_to_phys(omap3_restore);
|
||||
__pa_symbol(omap3_restore);
|
||||
else
|
||||
scratchpad_contents.public_restore_ptr =
|
||||
virt_to_phys(omap3_restore_es3);
|
||||
__pa_symbol(omap3_restore_es3);
|
||||
|
||||
if (omap_type() == OMAP2_DEVICE_TYPE_GP)
|
||||
scratchpad_contents.secure_ram_restore_ptr = 0x0;
|
||||
@ -395,7 +395,7 @@ void omap3_save_scratchpad_contents(void)
|
||||
sdrc_block_contents.flags = 0x0;
|
||||
sdrc_block_contents.block_size = 0x0;
|
||||
|
||||
arm_context_addr = virt_to_phys(omap3_arm_context);
|
||||
arm_context_addr = __pa_symbol(omap3_arm_context);
|
||||
|
||||
/* Copy all the contents to the scratchpad location */
|
||||
scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
|
||||
|
@ -273,7 +273,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
|
||||
cpu_clear_prev_logic_pwrst(cpu);
|
||||
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
|
||||
pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
|
||||
set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume));
|
||||
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
|
||||
omap_pm_ops.scu_prepare(cpu, power_state);
|
||||
l2x0_pwrst_prepare(cpu, save_state);
|
||||
|
||||
@ -325,7 +325,7 @@ int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
|
||||
|
||||
pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
|
||||
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
|
||||
set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart));
|
||||
set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
|
||||
omap_pm_ops.scu_prepare(cpu, power_state);
|
||||
|
||||
/*
|
||||
@ -467,13 +467,13 @@ void __init omap4_mpuss_early_init(void)
|
||||
sar_base = omap4_get_sar_ram_base();
|
||||
|
||||
if (cpu_is_omap443x())
|
||||
startup_pa = virt_to_phys(omap4_secondary_startup);
|
||||
startup_pa = __pa_symbol(omap4_secondary_startup);
|
||||
else if (cpu_is_omap446x())
|
||||
startup_pa = virt_to_phys(omap4460_secondary_startup);
|
||||
startup_pa = __pa_symbol(omap4460_secondary_startup);
|
||||
else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
|
||||
startup_pa = virt_to_phys(omap5_secondary_hyp_startup);
|
||||
startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
|
||||
else
|
||||
startup_pa = virt_to_phys(omap5_secondary_startup);
|
||||
startup_pa = __pa_symbol(omap5_secondary_startup);
|
||||
|
||||
if (cpu_is_omap44xx())
|
||||
writel_relaxed(startup_pa, sar_base +
|
||||
|
@ -316,9 +316,9 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
|
||||
* A barrier is added to ensure that write buffer is drained
|
||||
*/
|
||||
if (omap_secure_apis_support())
|
||||
omap_auxcoreboot_addr(virt_to_phys(cfg.startup_addr));
|
||||
omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr));
|
||||
else
|
||||
writel_relaxed(virt_to_phys(cfg.startup_addr),
|
||||
writel_relaxed(__pa_symbol(cfg.startup_addr),
|
||||
base + OMAP_AUX_CORE_BOOT_1);
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
* waiting for. This would wake up the secondary core from WFE
|
||||
*/
|
||||
#define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc
|
||||
__raw_writel(virt_to_phys(sirfsoc_secondary_startup),
|
||||
__raw_writel(__pa_symbol(sirfsoc_secondary_startup),
|
||||
clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET);
|
||||
|
||||
#define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
|
||||
|
@ -54,7 +54,7 @@ static void sirfsoc_set_sleep_mode(u32 mode)
|
||||
|
||||
static int sirfsoc_pre_suspend_power_off(void)
|
||||
{
|
||||
u32 wakeup_entry = virt_to_phys(cpu_resume);
|
||||
u32 wakeup_entry = __pa_symbol(cpu_resume);
|
||||
|
||||
sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base +
|
||||
SIRFSOC_PWRC_SCRATCH_PAD1);
|
||||
|
@ -249,7 +249,7 @@ static int palmz72_pm_suspend(void)
|
||||
store_ptr = *PALMZ72_SAVE_DWORD;
|
||||
|
||||
/* Setting PSPR to a proper value */
|
||||
PSPR = virt_to_phys(&palmz72_resume_info);
|
||||
PSPR = __pa_symbol(&palmz72_resume_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
|
||||
static int pxa25x_cpu_pm_prepare(void)
|
||||
{
|
||||
/* set resume return address */
|
||||
PSPR = virt_to_phys(cpu_resume);
|
||||
PSPR = __pa_symbol(cpu_resume);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,7 @@ static int pxa27x_cpu_pm_valid(suspend_state_t state)
|
||||
static int pxa27x_cpu_pm_prepare(void)
|
||||
{
|
||||
/* set resume return address */
|
||||
PSPR = virt_to_phys(cpu_resume);
|
||||
PSPR = __pa_symbol(cpu_resume);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ static void pxa3xx_cpu_pm_suspend(void)
|
||||
PSPR = 0x5c014000;
|
||||
|
||||
/* overwrite with the resume address */
|
||||
*p = virt_to_phys(cpu_resume);
|
||||
*p = __pa_symbol(cpu_resume);
|
||||
|
||||
cpu_suspend(0, pxa3xx_finish_suspend);
|
||||
|
||||
|
@ -76,7 +76,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
|
||||
}
|
||||
/* Put the boot address in this magic register */
|
||||
regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET,
|
||||
virt_to_phys(versatile_secondary_startup));
|
||||
__pa_symbol(versatile_secondary_startup));
|
||||
}
|
||||
|
||||
static const struct smp_operations realview_dt_smp_ops __initconst = {
|
||||
|
@ -156,7 +156,7 @@ static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
*/
|
||||
mdelay(1); /* ensure the cpus other than cpu0 to startup */
|
||||
|
||||
writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
|
||||
writel(__pa_symbol(secondary_startup), sram_base_addr + 8);
|
||||
writel(0xDEADBEAF, sram_base_addr + 4);
|
||||
dsb_sev();
|
||||
}
|
||||
@ -195,7 +195,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
|
||||
}
|
||||
|
||||
/* set the boot function for the sram code */
|
||||
rockchip_boot_fn = virt_to_phys(secondary_startup);
|
||||
rockchip_boot_fn = __pa_symbol(secondary_startup);
|
||||
|
||||
/* copy the trampoline to sram, that runs during startup of the core */
|
||||
memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
|
||||
|
@ -62,7 +62,7 @@ static inline u32 rk3288_l2_config(void)
|
||||
static void rk3288_config_bootdata(void)
|
||||
{
|
||||
rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8);
|
||||
rkpm_bootdata_cpu_code = virt_to_phys(cpu_resume);
|
||||
rkpm_bootdata_cpu_code = __pa_symbol(cpu_resume);
|
||||
|
||||
rkpm_bootdata_l2ctlr_f = 1;
|
||||
rkpm_bootdata_l2ctlr = rk3288_l2_config();
|
||||
|
@ -484,7 +484,7 @@ static int jive_pm_suspend(void)
|
||||
* correct address to resume from. */
|
||||
|
||||
__raw_writel(0x2BED, S3C2412_INFORM0);
|
||||
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
|
||||
__raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ static void s3c2410_pm_prepare(void)
|
||||
{
|
||||
/* ensure at least GSTATUS3 has the resume address */
|
||||
|
||||
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2410_GSTATUS3);
|
||||
__raw_writel(__pa_symbol(s3c_cpu_resume), S3C2410_GSTATUS3);
|
||||
|
||||
S3C_PMDBG("GSTATUS3 0x%08x\n", __raw_readl(S3C2410_GSTATUS3));
|
||||
S3C_PMDBG("GSTATUS4 0x%08x\n", __raw_readl(S3C2410_GSTATUS4));
|
||||
|
@ -48,7 +48,7 @@ static void s3c2416_pm_prepare(void)
|
||||
* correct address to resume from.
|
||||
*/
|
||||
__raw_writel(0x2BED, S3C2412_INFORM0);
|
||||
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1);
|
||||
__raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1);
|
||||
}
|
||||
|
||||
static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif)
|
||||
|
@ -304,7 +304,7 @@ static void s3c64xx_pm_prepare(void)
|
||||
wake_irqs, ARRAY_SIZE(wake_irqs));
|
||||
|
||||
/* store address of resume. */
|
||||
__raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0);
|
||||
__raw_writel(__pa_symbol(s3c_cpu_resume), S3C64XX_INFORM0);
|
||||
|
||||
/* ensure previous wakeup state is cleared before sleeping */
|
||||
__raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
|
||||
|
@ -69,7 +69,7 @@ static void s5pv210_pm_prepare(void)
|
||||
__raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK);
|
||||
|
||||
/* ensure at least INFORM0 has the resume address */
|
||||
__raw_writel(virt_to_phys(s5pv210_cpu_resume), S5P_INFORM0);
|
||||
__raw_writel(__pa_symbol(s5pv210_cpu_resume), S5P_INFORM0);
|
||||
|
||||
tmp = __raw_readl(S5P_SLEEP_CFG);
|
||||
tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
|
||||
|
@ -73,7 +73,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
|
||||
RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR;
|
||||
|
||||
/* set resume return address */
|
||||
PSPR = virt_to_phys(cpu_resume);
|
||||
PSPR = __pa_symbol(cpu_resume);
|
||||
|
||||
/* go zzz */
|
||||
cpu_suspend(0, sa1100_finish_suspend);
|
||||
|
@ -171,7 +171,7 @@ static void apmu_parse_dt(void (*fn)(struct resource *res, int cpu, int bit))
|
||||
static void __init shmobile_smp_apmu_setup_boot(void)
|
||||
{
|
||||
/* install boot code shared by all CPUs */
|
||||
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
|
||||
shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
|
||||
}
|
||||
|
||||
void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
|
||||
@ -185,7 +185,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
|
||||
int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
/* For this particular CPU register boot vector */
|
||||
shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0);
|
||||
shmobile_smp_hook(cpu, __pa_symbol(secondary_startup), 0);
|
||||
|
||||
return apmu_wrap(cpu, apmu_power_on);
|
||||
}
|
||||
@ -301,7 +301,7 @@ int shmobile_smp_apmu_cpu_kill(unsigned int cpu)
|
||||
#if defined(CONFIG_SUSPEND)
|
||||
static int shmobile_smp_apmu_do_suspend(unsigned long cpu)
|
||||
{
|
||||
shmobile_smp_hook(cpu, virt_to_phys(cpu_resume), 0);
|
||||
shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0);
|
||||
shmobile_smp_apmu_cpu_shutdown(cpu);
|
||||
cpu_do_idle(); /* WFI selects Core Standby */
|
||||
return 1;
|
||||
|
@ -24,7 +24,7 @@ static void __iomem *shmobile_scu_base;
|
||||
static int shmobile_scu_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
/* For this particular CPU register SCU SMP boot vector */
|
||||
shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
|
||||
shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu),
|
||||
shmobile_scu_base_phys);
|
||||
return 0;
|
||||
}
|
||||
@ -33,7 +33,7 @@ void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
|
||||
unsigned int max_cpus)
|
||||
{
|
||||
/* install boot code shared by all CPUs */
|
||||
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
|
||||
shmobile_boot_fn = __pa_symbol(shmobile_smp_boot);
|
||||
|
||||
/* enable SCU and cache coherency on booting CPU */
|
||||
shmobile_scu_base_phys = scu_base_phys;
|
||||
|
@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
|
||||
flush_cache_all();
|
||||
@ -63,7 +63,7 @@ static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
||||
flush_cache_all();
|
||||
|
@ -117,7 +117,7 @@ static void __init spear13xx_smp_prepare_cpus(unsigned int max_cpus)
|
||||
* (presently it is in SRAM). The BootMonitor waits until it receives a
|
||||
* soft interrupt, and then the secondary CPU branches to this address.
|
||||
*/
|
||||
__raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
|
||||
__raw_writel(__pa_symbol(spear13xx_secondary_startup), SYS_LOCATION);
|
||||
}
|
||||
|
||||
const struct smp_operations spear13xx_smp_ops __initconst = {
|
||||
|
@ -103,7 +103,7 @@ static void __init sti_smp_prepare_cpus(unsigned int max_cpus)
|
||||
u32 __iomem *cpu_strt_ptr;
|
||||
u32 release_phys;
|
||||
int cpu;
|
||||
unsigned long entry_pa = virt_to_phys(sti_secondary_startup);
|
||||
unsigned long entry_pa = __pa_symbol(sti_secondary_startup);
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
|
||||
|
||||
|
@ -80,7 +80,7 @@ static int sun6i_smp_boot_secondary(unsigned int cpu,
|
||||
spin_lock(&cpu_lock);
|
||||
|
||||
/* Set CPU boot address */
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
cpucfg_membase + CPUCFG_PRIVATE0_REG);
|
||||
|
||||
/* Assert the CPU core in reset */
|
||||
@ -162,7 +162,7 @@ static int sun8i_smp_boot_secondary(unsigned int cpu,
|
||||
spin_lock(&cpu_lock);
|
||||
|
||||
/* Set CPU boot address */
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
cpucfg_membase + CPUCFG_PRIVATE0_REG);
|
||||
|
||||
/* Assert the CPU core in reset */
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
tango_set_aux_boot_addr(virt_to_phys(secondary_startup));
|
||||
tango_set_aux_boot_addr(__pa_symbol(secondary_startup));
|
||||
tango_start_aux_core(cpu);
|
||||
return 0;
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
static int tango_pm_powerdown(unsigned long arg)
|
||||
{
|
||||
tango_suspend(virt_to_phys(cpu_resume));
|
||||
tango_suspend(__pa_symbol(cpu_resume));
|
||||
|
||||
return -EIO; /* tango_suspend has failed */
|
||||
}
|
||||
|
@ -94,14 +94,14 @@ void __init tegra_cpu_reset_handler_init(void)
|
||||
__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
|
||||
*((u32 *)cpu_possible_mask);
|
||||
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
|
||||
virt_to_phys((void *)secondary_startup);
|
||||
__pa_symbol((void *)secondary_startup);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] =
|
||||
TEGRA_IRAM_LPx_RESUME_AREA;
|
||||
__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] =
|
||||
virt_to_phys((void *)tegra_resume);
|
||||
__pa_symbol((void *)tegra_resume);
|
||||
#endif
|
||||
|
||||
tegra_cpu_reset_handler_enable();
|
||||
|
@ -54,7 +54,7 @@ static void wakeup_secondary(void)
|
||||
* backup ram register at offset 0x1FF0, which is what boot rom code
|
||||
* is waiting for. This will wake up the secondary core from WFE.
|
||||
*/
|
||||
writel(virt_to_phys(secondary_startup),
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
backupram + UX500_CPU1_JUMPADDR_OFFSET);
|
||||
writel(0xA1FEED01,
|
||||
backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
|
||||
|
@ -166,7 +166,7 @@ static int __init dcscb_init(void)
|
||||
* Future entries into the kernel can now go
|
||||
* through the cluster entry vectors.
|
||||
*/
|
||||
vexpress_flags_set(virt_to_phys(mcpm_entry_point));
|
||||
vexpress_flags_set(__pa_symbol(mcpm_entry_point));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ static void __init vexpress_smp_dt_prepare_cpus(unsigned int max_cpus)
|
||||
* until it receives a soft interrupt, and then the
|
||||
* secondary CPU branches to this address.
|
||||
*/
|
||||
vexpress_flags_set(virt_to_phys(versatile_secondary_startup));
|
||||
vexpress_flags_set(__pa_symbol(versatile_secondary_startup));
|
||||
}
|
||||
|
||||
const struct smp_operations vexpress_smp_dt_ops __initconst = {
|
||||
|
@ -54,7 +54,7 @@ static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
|
||||
if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
|
||||
return -EINVAL;
|
||||
ve_spc_set_resume_addr(cluster, cpu,
|
||||
virt_to_phys(mcpm_entry_point));
|
||||
__pa_symbol(mcpm_entry_point));
|
||||
ve_spc_cpu_wakeup_irq(cluster, cpu, true);
|
||||
return 0;
|
||||
}
|
||||
@ -159,7 +159,7 @@ static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
|
||||
|
||||
static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point));
|
||||
ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
|
||||
}
|
||||
|
||||
static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
|
||||
|
@ -76,7 +76,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus)
|
||||
* until it receives a soft interrupt, and then the
|
||||
* secondary CPU branches to this address.
|
||||
*/
|
||||
__raw_writel(virt_to_phys(zx_secondary_startup),
|
||||
__raw_writel(__pa_symbol(zx_secondary_startup),
|
||||
aonsysctrl_base + AON_SYS_CTRL_RESERVED1);
|
||||
|
||||
iounmap(aonsysctrl_base);
|
||||
@ -94,7 +94,7 @@ void __init zx_smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
/* Map the first 4 KB IRAM for suspend usage */
|
||||
sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false);
|
||||
zx_secondary_startup_pa = virt_to_phys(zx_secondary_startup);
|
||||
zx_secondary_startup_pa = __pa_symbol(zx_secondary_startup);
|
||||
fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz);
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ EXPORT_SYMBOL(zynq_cpun_start);
|
||||
|
||||
static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
|
||||
return zynq_cpun_start(__pa_symbol(secondary_startup), cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -29,6 +29,7 @@ config CPU_ARM720T
|
||||
select CPU_COPY_V4WT if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WT if MMU
|
||||
help
|
||||
A 32-bit RISC processor with 8kByte Cache, Write Buffer and
|
||||
@ -46,6 +47,7 @@ config CPU_ARM740T
|
||||
select CPU_CACHE_V4
|
||||
select CPU_CP15_MPU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
help
|
||||
A 32-bit RISC processor with 8KB cache or 4KB variants,
|
||||
write buffer and MPU(Protection Unit) built around
|
||||
@ -79,6 +81,7 @@ config CPU_ARM920T
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM920T is licensed to be produced by numerous vendors,
|
||||
@ -97,6 +100,7 @@ config CPU_ARM922T
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM922T is a version of the ARM920T, but with smaller
|
||||
@ -116,6 +120,7 @@ config CPU_ARM925T
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM925T is a mix between the ARM920T and ARM926T, but with
|
||||
@ -134,6 +139,7 @@ config CPU_ARM926T
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
This is a variant of the ARM920. It has slightly different
|
||||
@ -170,6 +176,7 @@ config CPU_ARM940T
|
||||
select CPU_CACHE_VIVT
|
||||
select CPU_CP15_MPU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
help
|
||||
ARM940T is a member of the ARM9TDMI family of general-
|
||||
purpose microprocessors with MPU and separate 4KB
|
||||
@ -188,6 +195,7 @@ config CPU_ARM946E
|
||||
select CPU_CACHE_VIVT
|
||||
select CPU_CP15_MPU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
help
|
||||
ARM946E-S is a member of the ARM9E-S family of high-
|
||||
performance, 32-bit system-on-chip processor solutions.
|
||||
@ -206,6 +214,7 @@ config CPU_ARM1020
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM1020 is the 32K cached version of the ARM10 processor,
|
||||
@ -225,6 +234,7 @@ config CPU_ARM1020E
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
|
||||
# ARM1022E
|
||||
@ -236,6 +246,7 @@ config CPU_ARM1022
|
||||
select CPU_COPY_V4WB if MMU # can probably do better
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM1022E is an implementation of the ARMv5TE architecture
|
||||
@ -254,6 +265,7 @@ config CPU_ARM1026
|
||||
select CPU_COPY_V4WB if MMU # can probably do better
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
help
|
||||
The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture
|
||||
@ -302,6 +314,7 @@ config CPU_XSCALE
|
||||
select CPU_CACHE_VIVT
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
|
||||
# XScale Core Version 3
|
||||
@ -312,6 +325,7 @@ config CPU_XSC3
|
||||
select CPU_CACHE_VIVT
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
select IO_36
|
||||
|
||||
@ -324,6 +338,7 @@ config CPU_MOHAWK
|
||||
select CPU_COPY_V4WB if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V4WBI if MMU
|
||||
|
||||
# Feroceon
|
||||
@ -335,6 +350,7 @@ config CPU_FEROCEON
|
||||
select CPU_COPY_FEROCEON if MMU
|
||||
select CPU_CP15_MMU
|
||||
select CPU_PABRT_LEGACY
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_FEROCEON if MMU
|
||||
|
||||
config CPU_FEROCEON_OLD_ID
|
||||
@ -367,6 +383,7 @@ config CPU_V6
|
||||
select CPU_CP15_MMU
|
||||
select CPU_HAS_ASID if MMU
|
||||
select CPU_PABRT_V6
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V6 if MMU
|
||||
|
||||
# ARMv6k
|
||||
@ -381,6 +398,7 @@ config CPU_V6K
|
||||
select CPU_CP15_MMU
|
||||
select CPU_HAS_ASID if MMU
|
||||
select CPU_PABRT_V6
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V6 if MMU
|
||||
|
||||
# ARMv7
|
||||
@ -396,6 +414,7 @@ config CPU_V7
|
||||
select CPU_CP15_MPU if !MMU
|
||||
select CPU_HAS_ASID if MMU
|
||||
select CPU_PABRT_V7
|
||||
select CPU_THUMB_CAPABLE
|
||||
select CPU_TLB_V7 if MMU
|
||||
|
||||
# ARMv7M
|
||||
@ -410,11 +429,17 @@ config CPU_V7M
|
||||
|
||||
config CPU_THUMBONLY
|
||||
bool
|
||||
select CPU_THUMB_CAPABLE
|
||||
# There are no CPUs available with MMU that don't implement an ARM ISA:
|
||||
depends on !MMU
|
||||
help
|
||||
Select this if your CPU doesn't support the 32 bit ARM instructions.
|
||||
|
||||
config CPU_THUMB_CAPABLE
|
||||
bool
|
||||
help
|
||||
Select this if your CPU can support Thumb mode.
|
||||
|
||||
# Figure out what processor architecture version we should be using.
|
||||
# This defines the compiler instruction set which depends on the machine type.
|
||||
config CPU_32v3
|
||||
@ -655,11 +680,7 @@ config ARCH_DMA_ADDR_T_64BIT
|
||||
|
||||
config ARM_THUMB
|
||||
bool "Support Thumb user binaries" if !CPU_THUMBONLY
|
||||
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \
|
||||
CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \
|
||||
CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \
|
||||
CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \
|
||||
CPU_V7 || CPU_FEROCEON || CPU_V7M
|
||||
depends on CPU_THUMB_CAPABLE
|
||||
default y
|
||||
help
|
||||
Say Y if you want to include kernel support for running user space
|
||||
|
@ -14,6 +14,7 @@ endif
|
||||
|
||||
obj-$(CONFIG_ARM_PTDUMP) += dump.o
|
||||
obj-$(CONFIG_MODULES) += proc-syms.o
|
||||
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
|
||||
|
||||
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
||||
obj-$(CONFIG_HIGHMEM) += highmem.o
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#define pr_fmt(fmt) "uniphier: " fmt
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/log2.h>
|
||||
@ -71,8 +72,7 @@
|
||||
* @ctrl_base: virtual base address of control registers
|
||||
* @rev_base: virtual base address of revision registers
|
||||
* @op_base: virtual base address of operation registers
|
||||
* @way_present_mask: each bit specifies if the way is present
|
||||
* @way_locked_mask: each bit specifies if the way is locked
|
||||
* @way_mask: each bit specifies if the way is present
|
||||
* @nsets: number of associativity sets
|
||||
* @line_size: line size in bytes
|
||||
* @range_op_max_size: max size that can be handled by a single range operation
|
||||
@ -83,8 +83,7 @@ struct uniphier_cache_data {
|
||||
void __iomem *rev_base;
|
||||
void __iomem *op_base;
|
||||
void __iomem *way_ctrl_base;
|
||||
u32 way_present_mask;
|
||||
u32 way_locked_mask;
|
||||
u32 way_mask;
|
||||
u32 nsets;
|
||||
u32 line_size;
|
||||
u32 range_op_max_size;
|
||||
@ -234,17 +233,13 @@ static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
|
||||
writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
|
||||
}
|
||||
|
||||
static void __init __uniphier_cache_set_locked_ways(
|
||||
struct uniphier_cache_data *data,
|
||||
u32 way_mask)
|
||||
static void __init __uniphier_cache_set_active_ways(
|
||||
struct uniphier_cache_data *data)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
data->way_locked_mask = way_mask & data->way_present_mask;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
writel_relaxed(~data->way_locked_mask & data->way_present_mask,
|
||||
data->way_ctrl_base + 4 * cpu);
|
||||
writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu);
|
||||
}
|
||||
|
||||
static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
|
||||
@ -307,7 +302,7 @@ static void __init uniphier_cache_enable(void)
|
||||
|
||||
list_for_each_entry(data, &uniphier_cache_list, list) {
|
||||
__uniphier_cache_enable(data, true);
|
||||
__uniphier_cache_set_locked_ways(data, 0);
|
||||
__uniphier_cache_set_active_ways(data);
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,8 +377,8 @@ static int __init __uniphier_cache_init(struct device_node *np,
|
||||
goto err;
|
||||
}
|
||||
|
||||
data->way_present_mask =
|
||||
((u32)1 << cache_size / data->nsets / data->line_size) - 1;
|
||||
data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1,
|
||||
0);
|
||||
|
||||
data->ctrl_base = of_iomap(np, 0);
|
||||
if (!data->ctrl_base) {
|
||||
|
@ -868,6 +868,9 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
#else
|
||||
ret = vm_iomap_memory(vma, vma->vm_start,
|
||||
(vma->vm_end - vma->vm_start));
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
return ret;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
struct addr_marker {
|
||||
@ -31,8 +32,8 @@ static struct addr_marker address_markers[] = {
|
||||
{ 0, "vmalloc() Area" },
|
||||
{ VMALLOC_END, "vmalloc() End" },
|
||||
{ FIXADDR_START, "Fixmap Area" },
|
||||
{ CONFIG_VECTORS_BASE, "Vectors" },
|
||||
{ CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
|
||||
{ VECTORS_BASE, "Vectors" },
|
||||
{ VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" },
|
||||
{ -1, NULL },
|
||||
};
|
||||
|
||||
|
@ -327,6 +327,12 @@ void flush_dcache_page(struct page *page)
|
||||
if (page == ZERO_PAGE(0))
|
||||
return;
|
||||
|
||||
if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) {
|
||||
if (test_bit(PG_dcache_clean, &page->flags))
|
||||
clear_bit(PG_dcache_clean, &page->flags);
|
||||
return;
|
||||
}
|
||||
|
||||
mapping = page_mapping(page);
|
||||
|
||||
if (!cache_ops_need_broadcast() &&
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/memblock.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
@ -227,41 +228,59 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
|
||||
return phys;
|
||||
}
|
||||
|
||||
void __init arm_memblock_init(const struct machine_desc *mdesc)
|
||||
static void __init arm_initrd_init(void)
|
||||
{
|
||||
/* Register the kernel text, kernel data and initrd with memblock. */
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
memblock_reserve(__pa(_sdata), _end - _sdata);
|
||||
#else
|
||||
memblock_reserve(__pa(_stext), _end - _stext);
|
||||
#endif
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
phys_addr_t start;
|
||||
unsigned long size;
|
||||
|
||||
/* FDT scan will populate initrd_start */
|
||||
if (initrd_start && !phys_initrd_size) {
|
||||
phys_initrd_start = __virt_to_phys(initrd_start);
|
||||
phys_initrd_size = initrd_end - initrd_start;
|
||||
}
|
||||
initrd_start = initrd_end = 0;
|
||||
if (phys_initrd_size &&
|
||||
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
|
||||
(u64)phys_initrd_start, phys_initrd_size);
|
||||
phys_initrd_start = phys_initrd_size = 0;
|
||||
}
|
||||
if (phys_initrd_size &&
|
||||
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
|
||||
(u64)phys_initrd_start, phys_initrd_size);
|
||||
phys_initrd_start = phys_initrd_size = 0;
|
||||
}
|
||||
if (phys_initrd_size) {
|
||||
memblock_reserve(phys_initrd_start, phys_initrd_size);
|
||||
|
||||
/* Now convert initrd to virtual addresses */
|
||||
initrd_start = __phys_to_virt(phys_initrd_start);
|
||||
initrd_end = initrd_start + phys_initrd_size;
|
||||
initrd_start = initrd_end = 0;
|
||||
|
||||
if (!phys_initrd_size)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Round the memory region to page boundaries as per free_initrd_mem()
|
||||
* This allows us to detect whether the pages overlapping the initrd
|
||||
* are in use, but more importantly, reserves the entire set of pages
|
||||
* as we don't want these pages allocated for other purposes.
|
||||
*/
|
||||
start = round_down(phys_initrd_start, PAGE_SIZE);
|
||||
size = phys_initrd_size + (phys_initrd_start - start);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
|
||||
if (!memblock_is_region_memory(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
|
||||
(u64)start, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (memblock_is_region_reserved(start, size)) {
|
||||
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
|
||||
(u64)start, size);
|
||||
return;
|
||||
}
|
||||
|
||||
memblock_reserve(start, size);
|
||||
|
||||
/* Now convert initrd to virtual addresses */
|
||||
initrd_start = __phys_to_virt(phys_initrd_start);
|
||||
initrd_end = initrd_start + phys_initrd_size;
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init arm_memblock_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
/* Register the kernel text, kernel data and initrd with memblock. */
|
||||
memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
|
||||
|
||||
arm_initrd_init();
|
||||
|
||||
arm_mm_memblock_reserve();
|
||||
|
||||
@ -521,8 +540,7 @@ void __init mem_init(void)
|
||||
" .data : 0x%p" " - 0x%p" " (%4td kB)\n"
|
||||
" .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
|
||||
|
||||
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
|
||||
(PAGE_SIZE)),
|
||||
MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
|
||||
#ifdef CONFIG_HAVE_TCM
|
||||
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
|
||||
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
|
||||
|
@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc);
|
||||
|
||||
phys_addr_t arm_lowmem_limit __initdata = 0;
|
||||
|
||||
void __init sanity_check_meminfo(void)
|
||||
void __init adjust_lowmem_bounds(void)
|
||||
{
|
||||
phys_addr_t memblock_limit = 0;
|
||||
int highmem = 0;
|
||||
u64 vmalloc_limit;
|
||||
struct memblock_region *reg;
|
||||
bool should_use_highmem = false;
|
||||
phys_addr_t lowmem_limit = 0;
|
||||
|
||||
/*
|
||||
* Let's use our own (unoptimized) equivalent of __pa() that is
|
||||
@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void)
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
phys_addr_t block_end = reg->base + reg->size;
|
||||
phys_addr_t size_limit = reg->size;
|
||||
|
||||
if (reg->base >= vmalloc_limit)
|
||||
highmem = 1;
|
||||
else
|
||||
size_limit = vmalloc_limit - reg->base;
|
||||
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
|
||||
|
||||
if (highmem) {
|
||||
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
|
||||
&block_start, &block_end);
|
||||
memblock_remove(reg->base, reg->size);
|
||||
should_use_highmem = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (reg->size > size_limit) {
|
||||
phys_addr_t overlap_size = reg->size - size_limit;
|
||||
|
||||
pr_notice("Truncating RAM at %pa-%pa",
|
||||
&block_start, &block_end);
|
||||
block_end = vmalloc_limit;
|
||||
pr_cont(" to -%pa", &block_end);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
should_use_highmem = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!highmem) {
|
||||
if (block_end > arm_lowmem_limit) {
|
||||
if (reg->size > size_limit)
|
||||
arm_lowmem_limit = vmalloc_limit;
|
||||
else
|
||||
arm_lowmem_limit = block_end;
|
||||
}
|
||||
if (reg->base < vmalloc_limit) {
|
||||
if (block_end > lowmem_limit)
|
||||
/*
|
||||
* Compare as u64 to ensure vmalloc_limit does
|
||||
* not get truncated. block_end should always
|
||||
* fit in phys_addr_t so there should be no
|
||||
* issue with assignment.
|
||||
*/
|
||||
lowmem_limit = min_t(u64,
|
||||
vmalloc_limit,
|
||||
block_end);
|
||||
|
||||
/*
|
||||
* Find the first non-pmd-aligned page, and point
|
||||
@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void)
|
||||
if (!IS_ALIGNED(block_start, PMD_SIZE))
|
||||
memblock_limit = block_start;
|
||||
else if (!IS_ALIGNED(block_end, PMD_SIZE))
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
memblock_limit = lowmem_limit;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if (should_use_highmem)
|
||||
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
|
||||
arm_lowmem_limit = lowmem_limit;
|
||||
|
||||
high_memory = __va(arm_lowmem_limit - 1) + 1;
|
||||
|
||||
@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void)
|
||||
if (!memblock_limit)
|
||||
memblock_limit = arm_lowmem_limit;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
|
||||
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
|
||||
phys_addr_t end = memblock_end_of_DRAM();
|
||||
|
||||
pr_notice("Ignoring RAM at %pa-%pa\n",
|
||||
&memblock_limit, &end);
|
||||
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
|
||||
|
||||
memblock_remove(memblock_limit, end - memblock_limit);
|
||||
}
|
||||
}
|
||||
|
||||
memblock_set_current_limit(memblock_limit);
|
||||
}
|
||||
|
||||
@ -1437,11 +1422,7 @@ static void __init kmap_init(void)
|
||||
static void __init map_lowmem(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
#ifdef CONFIG_XIP_KERNEL
|
||||
phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
|
||||
#else
|
||||
phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
|
||||
#endif
|
||||
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
|
||||
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
|
||||
|
||||
/* Map all the lowmem memory banks. */
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/setup.h>
|
||||
@ -22,6 +23,8 @@
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
unsigned long vectors_base;
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
struct mpu_rgn_info mpu_rgn_info;
|
||||
|
||||
@ -85,7 +88,7 @@ static unsigned long irbar_read(void)
|
||||
}
|
||||
|
||||
/* MPU initialisation functions */
|
||||
void __init sanity_check_meminfo_mpu(void)
|
||||
void __init adjust_lowmem_bounds_mpu(void)
|
||||
{
|
||||
phys_addr_t phys_offset = PHYS_OFFSET;
|
||||
phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
|
||||
@ -274,19 +277,64 @@ void __init mpu_setup(void)
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void sanity_check_meminfo_mpu(void) {}
|
||||
static void adjust_lowmem_bounds_mpu(void) {}
|
||||
static void __init mpu_setup(void) {}
|
||||
#endif /* CONFIG_ARM_MPU */
|
||||
|
||||
#ifdef CONFIG_CPU_CP15
|
||||
#ifdef CONFIG_CPU_HIGH_VECTOR
|
||||
static unsigned long __init setup_vectors_base(void)
|
||||
{
|
||||
unsigned long reg = get_cr();
|
||||
|
||||
set_cr(reg | CR_V);
|
||||
return 0xffff0000;
|
||||
}
|
||||
#else /* CONFIG_CPU_HIGH_VECTOR */
|
||||
/* Write exception base address to VBAR */
|
||||
static inline void set_vbar(unsigned long val)
|
||||
{
|
||||
asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
|
||||
}
|
||||
|
||||
/*
|
||||
* Security extensions, bits[7:4], permitted values,
|
||||
* 0b0000 - not implemented, 0b0001/0b0010 - implemented
|
||||
*/
|
||||
static inline bool security_extensions_enabled(void)
|
||||
{
|
||||
return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
|
||||
}
|
||||
|
||||
static unsigned long __init setup_vectors_base(void)
|
||||
{
|
||||
unsigned long base = 0, reg = get_cr();
|
||||
|
||||
set_cr(reg & ~CR_V);
|
||||
if (security_extensions_enabled()) {
|
||||
if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
|
||||
base = CONFIG_DRAM_BASE;
|
||||
set_vbar(base);
|
||||
} else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
|
||||
if (CONFIG_DRAM_BASE != 0)
|
||||
pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
|
||||
}
|
||||
|
||||
return base;
|
||||
}
|
||||
#endif /* CONFIG_CPU_HIGH_VECTOR */
|
||||
#endif /* CONFIG_CPU_CP15 */
|
||||
|
||||
void __init arm_mm_memblock_reserve(void)
|
||||
{
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
|
||||
/*
|
||||
* Register the exception vector page.
|
||||
* some architectures which the DRAM is the exception vector to trap,
|
||||
* alloc_page breaks with error, although it is not NULL, but "0."
|
||||
*/
|
||||
memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE);
|
||||
memblock_reserve(vectors_base, 2 * PAGE_SIZE);
|
||||
#else /* ifndef CONFIG_CPU_V7M */
|
||||
/*
|
||||
* There is no dedicated vector page on V7-M. So nothing needs to be
|
||||
@ -295,10 +343,10 @@ void __init arm_mm_memblock_reserve(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init sanity_check_meminfo(void)
|
||||
void __init adjust_lowmem_bounds(void)
|
||||
{
|
||||
phys_addr_t end;
|
||||
sanity_check_meminfo_mpu();
|
||||
adjust_lowmem_bounds_mpu();
|
||||
end = memblock_end_of_DRAM();
|
||||
high_memory = __va(end - 1) + 1;
|
||||
memblock_set_current_limit(end);
|
||||
@ -310,7 +358,7 @@ void __init sanity_check_meminfo(void)
|
||||
*/
|
||||
void __init paging_init(const struct machine_desc *mdesc)
|
||||
{
|
||||
early_trap_init((void *)CONFIG_VECTORS_BASE);
|
||||
early_trap_init((void *)vectors_base);
|
||||
mpu_setup();
|
||||
bootmem_init();
|
||||
}
|
||||
|
57
arch/arm/mm/physaddr.c
Normal file
57
arch/arm/mm/physaddr.c
Normal file
@ -0,0 +1,57 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/sections.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/dma.h>
|
||||
|
||||
#include "mm.h"
|
||||
|
||||
static inline bool __virt_addr_valid(unsigned long x)
|
||||
{
|
||||
/*
|
||||
* high_memory does not get immediately defined, and there
|
||||
* are early callers of __pa() against PAGE_OFFSET
|
||||
*/
|
||||
if (!high_memory && x >= PAGE_OFFSET)
|
||||
return true;
|
||||
|
||||
if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* MAX_DMA_ADDRESS is a virtual address that may not correspond to an
|
||||
* actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS)
|
||||
* that we just need to work around it and always return true.
|
||||
*/
|
||||
if (x == MAX_DMA_ADDRESS)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
phys_addr_t __virt_to_phys(unsigned long x)
|
||||
{
|
||||
WARN(!__virt_addr_valid(x),
|
||||
"virt_to_phys used for non-linear address: %pK (%pS)\n",
|
||||
(void *)x, (void *)x);
|
||||
|
||||
return __virt_to_phys_nodebug(x);
|
||||
}
|
||||
EXPORT_SYMBOL(__virt_to_phys);
|
||||
|
||||
phys_addr_t __phys_addr_symbol(unsigned long x)
|
||||
{
|
||||
/* This is bounds checking against the kernel image only.
|
||||
* __pa_symbol should only be used on kernel symbol addresses.
|
||||
*/
|
||||
VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START ||
|
||||
x > (unsigned long)KERNEL_END);
|
||||
|
||||
return __pa_symbol_nodebug(x);
|
||||
}
|
||||
EXPORT_SYMBOL(__phys_addr_symbol);
|
65
arch/arm64/include/asm/asm-uaccess.h
Normal file
65
arch/arm64/include/asm/asm-uaccess.h
Normal file
@ -0,0 +1,65 @@
|
||||
#ifndef __ASM_ASM_UACCESS_H
|
||||
#define __ASM_ASM_UACCESS_H
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* User access enabling/disabling macros.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __uaccess_ttbr0_enable, tmp1
|
||||
get_thread_info \tmp1
|
||||
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
#else
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1
|
||||
uaccess_ttbr0_disable \tmp1
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
#endif
|
@ -22,8 +22,6 @@
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||
extern __must_check long strlen_user(const char __user *str);
|
||||
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
||||
/*
|
||||
* User access enabling/disabling macros.
|
||||
*/
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
.macro __uaccess_ttbr0_disable, tmp1
|
||||
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
|
||||
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
|
||||
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro __uaccess_ttbr0_enable, tmp1
|
||||
get_thread_info \tmp1
|
||||
ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
|
||||
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
|
||||
isb
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
__uaccess_ttbr0_disable \tmp1
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
alternative_if_not ARM64_HAS_PAN
|
||||
save_and_disable_irq \tmp2 // avoid preemption
|
||||
__uaccess_ttbr0_enable \tmp1
|
||||
restore_irq \tmp2
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
#else
|
||||
.macro uaccess_ttbr0_disable, tmp1
|
||||
.endm
|
||||
|
||||
.macro uaccess_ttbr0_enable, tmp1, tmp2
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These macros are no-ops when UAO is present.
|
||||
*/
|
||||
.macro uaccess_disable_not_uao, tmp1
|
||||
uaccess_ttbr0_disable \tmp1
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(1)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
.macro uaccess_enable_not_uao, tmp1, tmp2
|
||||
uaccess_ttbr0_enable \tmp1, \tmp2
|
||||
alternative_if ARM64_ALT_PAN_NOT_UAO
|
||||
SET_PSTATE_PAN(0)
|
||||
alternative_else_nop_endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_UACCESS_H */
|
||||
|
@ -31,7 +31,7 @@
|
||||
#include <asm/memory.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
/*
|
||||
|
@ -17,7 +17,7 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
||||
/*
|
||||
* Copy from user space to a kernel buffer (alignment handled by the hardware)
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
||||
/*
|
||||
* Copy from user space to user space (alignment handled by the hardware)
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
||||
/*
|
||||
* Copy to user space from a kernel buffer (alignment handled by the hardware)
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
|
||||
/*
|
||||
* flush_icache_range(start,end)
|
||||
|
@ -49,7 +49,7 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
#include <xen/interface/xen.h>
|
||||
|
||||
|
||||
|
@ -46,6 +46,7 @@ config X86
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FAST_MULTIPLIER
|
||||
|
@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
||||
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
|
||||
}
|
||||
|
||||
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool negative;
|
||||
asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
|
||||
CC_SET(s)
|
||||
: CC_OUT(s) (negative), ADDR
|
||||
: "ir" ((char) ~(1 << nr)) : "memory");
|
||||
return negative;
|
||||
}
|
||||
|
||||
// Let everybody know we have it
|
||||
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
|
||||
|
||||
/*
|
||||
* __clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
|
@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
const char *name = get_name(bank, NULL);
|
||||
int err = 0;
|
||||
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
if (is_shared_bank(bank)) {
|
||||
nb = node_to_amd_nb(amd_get_nb_id(cpu));
|
||||
|
||||
|
@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
for (i = 0; i < ctcount; i++) {
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
int ilen = ctemplate[i].inlen;
|
||||
void *input_vec;
|
||||
|
||||
input_vec = kmalloc(ilen, GFP_KERNEL);
|
||||
if (!input_vec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(input_vec, ctemplate[i].input, ilen);
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
sg_init_one(&src, ctemplate[i].input, ilen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
pr_err("alg: acomp: request alloc failed for %s\n",
|
||||
algo);
|
||||
kfree(input_vec);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
|
||||
i + 1, algo, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
i + 1, algo);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
}
|
||||
|
||||
for (i = 0; i < dtcount; i++) {
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
int ilen = dtemplate[i].inlen;
|
||||
void *input_vec;
|
||||
|
||||
input_vec = kmalloc(ilen, GFP_KERNEL);
|
||||
if (!input_vec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(input_vec, dtemplate[i].input, ilen);
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
sg_init_one(&src, dtemplate[i].input, ilen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
pr_err("alg: acomp: request alloc failed for %s\n",
|
||||
algo);
|
||||
kfree(input_vec);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
if (ret) {
|
||||
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
|
||||
i + 1, algo, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
||||
i + 1, algo);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
}
|
||||
|
||||
|
@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
|
||||
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
|
||||
#define CESA_TDMA_END_OF_REQ BIT(29)
|
||||
#define CESA_TDMA_BREAK_CHAIN BIT(28)
|
||||
#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
|
||||
#define CESA_TDMA_SET_STATE BIT(27)
|
||||
#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
|
||||
#define CESA_TDMA_DUMMY 0
|
||||
#define CESA_TDMA_DATA 1
|
||||
#define CESA_TDMA_OP 2
|
||||
|
@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
|
||||
sreq->offset = 0;
|
||||
}
|
||||
|
||||
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
struct mv_cesa_req *base = &creq->base;
|
||||
|
||||
/* We must explicitly set the digest state. */
|
||||
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
|
||||
struct mv_cesa_engine *engine = base->engine;
|
||||
int i;
|
||||
|
||||
/* Set the hash state in the IVDIG regs. */
|
||||
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
|
||||
writel_relaxed(creq->state[i], engine->regs +
|
||||
CESA_IVDIG(i));
|
||||
}
|
||||
|
||||
mv_cesa_dma_step(base);
|
||||
}
|
||||
|
||||
static void mv_cesa_ahash_step(struct crypto_async_request *req)
|
||||
{
|
||||
struct ahash_request *ahashreq = ahash_request_cast(req);
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
||||
mv_cesa_dma_step(&creq->base);
|
||||
mv_cesa_ahash_dma_step(ahashreq);
|
||||
else
|
||||
mv_cesa_ahash_std_step(ahashreq);
|
||||
}
|
||||
@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
||||
struct mv_cesa_ahash_dma_iter iter;
|
||||
struct mv_cesa_op_ctx *op = NULL;
|
||||
unsigned int frag_len;
|
||||
bool set_state = false;
|
||||
int ret;
|
||||
u32 type;
|
||||
|
||||
basereq->chain.first = NULL;
|
||||
basereq->chain.last = NULL;
|
||||
|
||||
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
|
||||
set_state = true;
|
||||
|
||||
if (creq->src_nents) {
|
||||
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
|
||||
DMA_TO_DEVICE);
|
||||
@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
||||
if (type != CESA_TDMA_RESULT)
|
||||
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
|
||||
|
||||
if (set_state) {
|
||||
/*
|
||||
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
|
||||
* let the step logic know that the IVDIG registers should be
|
||||
* explicitly set before launching a TDMA chain.
|
||||
*/
|
||||
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_tdma:
|
||||
|
@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
|
||||
last->next = dreq->chain.first;
|
||||
engine->chain.last = dreq->chain.last;
|
||||
|
||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
|
||||
/*
|
||||
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
||||
* the last element of the current chain, or if the request
|
||||
* being queued needs the IV regs to be set before lauching
|
||||
* the request.
|
||||
*/
|
||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
|
||||
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
|
||||
last->next_dma = dreq->chain.first->cur_dma;
|
||||
}
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index)
|
||||
u32 *state = __this_cpu_read(psci_power_state);
|
||||
|
||||
return psci_ops.cpu_suspend(state[index - 1],
|
||||
virt_to_phys(cpu_resume));
|
||||
__pa_symbol(cpu_resume));
|
||||
}
|
||||
|
||||
int psci_cpu_suspend_enter(unsigned long index)
|
||||
|
@ -75,18 +75,18 @@ static char module_name[] = "lart";
|
||||
|
||||
/* blob */
|
||||
#define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM
|
||||
#define BLOB_START 0x00000000
|
||||
#define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
|
||||
#define PART_BLOB_START 0x00000000
|
||||
#define PART_BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM)
|
||||
|
||||
/* kernel */
|
||||
#define NUM_KERNEL_BLOCKS 7
|
||||
#define KERNEL_START (BLOB_START + BLOB_LEN)
|
||||
#define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
|
||||
#define PART_KERNEL_START (PART_BLOB_START + PART_BLOB_LEN)
|
||||
#define PART_KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN)
|
||||
|
||||
/* initial ramdisk */
|
||||
#define NUM_INITRD_BLOCKS 24
|
||||
#define INITRD_START (KERNEL_START + KERNEL_LEN)
|
||||
#define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
|
||||
#define PART_INITRD_START (PART_KERNEL_START + PART_KERNEL_LEN)
|
||||
#define PART_INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN)
|
||||
|
||||
/*
|
||||
* See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet
|
||||
@ -587,20 +587,20 @@ static struct mtd_partition lart_partitions[] = {
|
||||
/* blob */
|
||||
{
|
||||
.name = "blob",
|
||||
.offset = BLOB_START,
|
||||
.size = BLOB_LEN,
|
||||
.offset = PART_BLOB_START,
|
||||
.size = PART_BLOB_LEN,
|
||||
},
|
||||
/* kernel */
|
||||
{
|
||||
.name = "kernel",
|
||||
.offset = KERNEL_START, /* MTDPART_OFS_APPEND */
|
||||
.size = KERNEL_LEN,
|
||||
.offset = PART_KERNEL_START, /* MTDPART_OFS_APPEND */
|
||||
.size = PART_KERNEL_LEN,
|
||||
},
|
||||
/* initial ramdisk / file system */
|
||||
{
|
||||
.name = "file system",
|
||||
.offset = INITRD_START, /* MTDPART_OFS_APPEND */
|
||||
.size = INITRD_LEN, /* MTDPART_SIZ_FULL */
|
||||
.offset = PART_INITRD_START, /* MTDPART_OFS_APPEND */
|
||||
.size = PART_INITRD_LEN, /* MTDPART_SIZ_FULL */
|
||||
}
|
||||
};
|
||||
#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
|
||||
|
@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
|
||||
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
|
||||
&lp->rx_dma_regs->dmasm);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
if (korina_init(dev) < 0) {
|
||||
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
|
||||
return;
|
||||
@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
|
||||
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
|
||||
writel(tmp, &lp->rx_dma_regs->dmasm);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
napi_disable(&lp->napi);
|
||||
|
||||
cancel_work_sync(&lp->restart_task);
|
||||
|
||||
korina_free_ring(dev);
|
||||
|
||||
free_irq(lp->rx_irq, dev);
|
||||
free_irq(lp->tx_irq, dev);
|
||||
free_irq(lp->ovr_irq, dev);
|
||||
|
@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
||||
|
||||
/* Configure tx cq's and rings */
|
||||
for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
||||
u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
|
||||
u8 num_tx_rings_p_up = t == TX ?
|
||||
priv->num_tx_rings_p_up : priv->tx_ring_num[t];
|
||||
|
||||
for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
||||
/* Configure cq */
|
||||
|
@ -326,6 +326,7 @@ enum cfg_version {
|
||||
static const struct pci_device_id rtl8169_pci_tbl[] = {
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
|
||||
|
@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
|
||||
unsigned int mii_address = priv->hw->mii.addr;
|
||||
unsigned int mii_data = priv->hw->mii.data;
|
||||
|
||||
u32 value = MII_WRITE | MII_BUSY;
|
||||
u32 value = MII_BUSY;
|
||||
|
||||
value |= (phyaddr << priv->hw->mii.addr_shift)
|
||||
& priv->hw->mii.addr_mask;
|
||||
@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
|
||||
& priv->hw->mii.clk_csr_mask;
|
||||
if (priv->plat->has_gmac4)
|
||||
value |= MII_GMAC4_WRITE;
|
||||
else
|
||||
value |= MII_WRITE;
|
||||
|
||||
/* Wait until any existing MII operation is complete */
|
||||
if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
|
||||
|
@ -99,6 +99,11 @@ struct ipvl_port {
|
||||
int count;
|
||||
};
|
||||
|
||||
struct ipvl_skb_cb {
|
||||
bool tx_pkt;
|
||||
};
|
||||
#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0]))
|
||||
|
||||
static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
|
||||
{
|
||||
return rcu_dereference(d->rx_handler_data);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user