a9ff696160
Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
190 lines
4.6 KiB
C
190 lines
4.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm/include/asm/proc-fns.h
|
|
*
|
|
* Copyright (C) 1997-1999 Russell King
|
|
* Copyright (C) 2000 Deep Blue Solutions Ltd
|
|
*/
|
|
#ifndef __ASM_PROCFNS_H
|
|
#define __ASM_PROCFNS_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#include <asm/glue-proc.h>
|
|
#include <asm/page.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
struct mm_struct;
|
|
|
|
/*
|
|
* Don't change this structure - ASM code relies on it.
|
|
*/
|
|
struct processor {
|
|
/* MISC
|
|
* get data abort address/flags
|
|
*/
|
|
void (*_data_abort)(unsigned long pc);
|
|
/*
|
|
* Retrieve prefetch fault address
|
|
*/
|
|
unsigned long (*_prefetch_abort)(unsigned long lr);
|
|
/*
|
|
* Set up any processor specifics
|
|
*/
|
|
void (*_proc_init)(void);
|
|
/*
|
|
* Check for processor bugs
|
|
*/
|
|
void (*check_bugs)(void);
|
|
/*
|
|
* Disable any processor specifics
|
|
*/
|
|
void (*_proc_fin)(void);
|
|
/*
|
|
* Special stuff for a reset
|
|
*/
|
|
void (*reset)(unsigned long addr, bool hvc) __attribute__((noreturn));
|
|
/*
|
|
* Idle the processor
|
|
*/
|
|
int (*_do_idle)(void);
|
|
/*
|
|
* Processor architecture specific
|
|
*/
|
|
/*
|
|
* clean a virtual address range from the
|
|
* D-cache without flushing the cache.
|
|
*/
|
|
void (*dcache_clean_area)(void *addr, int size);
|
|
|
|
/*
|
|
* Set the page table
|
|
*/
|
|
void (*switch_mm)(phys_addr_t pgd_phys, struct mm_struct *mm);
|
|
/*
|
|
* Set a possibly extended PTE. Non-extended PTEs should
|
|
* ignore 'ext'.
|
|
*/
|
|
#ifdef CONFIG_ARM_LPAE
|
|
void (*set_pte_ext)(pte_t *ptep, pte_t pte);
|
|
#else
|
|
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
|
|
#endif
|
|
|
|
/* Suspend/resume */
|
|
unsigned int suspend_size;
|
|
void (*do_suspend)(void *);
|
|
void (*do_resume)(void *);
|
|
};
|
|
|
|
#ifndef MULTI_CPU
|
|
static inline void init_proc_vtable(const struct processor *p)
|
|
{
|
|
}
|
|
|
|
extern void cpu_proc_init(void);
|
|
extern void cpu_proc_fin(void);
|
|
extern int cpu_do_idle(void);
|
|
extern void cpu_dcache_clean_area(void *, int);
|
|
extern void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
|
|
#ifdef CONFIG_ARM_LPAE
|
|
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
|
|
#else
|
|
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
|
|
#endif
|
|
extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
|
|
|
|
/* These three are private to arch/arm/kernel/suspend.c */
|
|
extern void cpu_do_suspend(void *);
|
|
extern void cpu_do_resume(void *);
|
|
#else
|
|
|
|
extern struct processor processor;
|
|
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
|
#include <linux/smp.h>
|
|
/*
|
|
* This can't be a per-cpu variable because we need to access it before
|
|
* per-cpu has been initialised. We have a couple of functions that are
|
|
* called in a pre-emptible context, and so can't use smp_processor_id()
|
|
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
|
|
* function pointers for these are identical across all CPUs.
|
|
*/
|
|
extern struct processor *cpu_vtable[];
|
|
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
|
|
#define PROC_TABLE(f) cpu_vtable[0]->f
|
|
static inline void init_proc_vtable(const struct processor *p)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
*cpu_vtable[cpu] = *p;
|
|
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
|
|
cpu_vtable[0]->dcache_clean_area);
|
|
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
|
|
cpu_vtable[0]->set_pte_ext);
|
|
}
|
|
#else
|
|
#define PROC_VTABLE(f) processor.f
|
|
#define PROC_TABLE(f) processor.f
|
|
static inline void init_proc_vtable(const struct processor *p)
|
|
{
|
|
processor = *p;
|
|
}
|
|
#endif
|
|
|
|
#define cpu_proc_init PROC_VTABLE(_proc_init)
|
|
#define cpu_check_bugs PROC_VTABLE(check_bugs)
|
|
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
|
|
#define cpu_reset PROC_VTABLE(reset)
|
|
#define cpu_do_idle PROC_VTABLE(_do_idle)
|
|
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
|
|
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
|
|
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
|
|
|
|
/* These two are private to arch/arm/kernel/suspend.c */
|
|
#define cpu_do_suspend PROC_VTABLE(do_suspend)
|
|
#define cpu_do_resume PROC_VTABLE(do_resume)
|
|
#endif
|
|
|
|
extern void cpu_resume(void);
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
|
|
|
|
#ifdef CONFIG_ARM_LPAE
|
|
|
|
#define cpu_get_ttbr(nr) \
|
|
({ \
|
|
u64 ttbr; \
|
|
__asm__("mrrc p15, " #nr ", %Q0, %R0, c2" \
|
|
: "=r" (ttbr)); \
|
|
ttbr; \
|
|
})
|
|
|
|
#define cpu_get_pgd() \
|
|
({ \
|
|
u64 pg = cpu_get_ttbr(0); \
|
|
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
|
|
(pgd_t *)phys_to_virt(pg); \
|
|
})
|
|
#else
|
|
#define cpu_get_pgd() \
|
|
({ \
|
|
unsigned long pg; \
|
|
__asm__("mrc p15, 0, %0, c2, c0, 0" \
|
|
: "=r" (pg) : : "cc"); \
|
|
pg &= ~0x3fff; \
|
|
(pgd_t *)phys_to_virt(pg); \
|
|
})
|
|
#endif
|
|
|
|
#else /*!CONFIG_MMU */
|
|
|
|
#define cpu_switch_mm(pgd,mm) { }
|
|
|
|
#endif
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* __KERNEL__ */
|
|
#endif /* __ASM_PROCFNS_H */
|