e5080a9677
Every architecture that supports FLATMEM memory model defines its own version of pfn_valid() that essentially compares a pfn to max_mapnr. Use mips/powerpc version implemented as static inline as a generic implementation of pfn_valid() and drop its per-architecture definitions. [rppt@kernel.org: fix the generic pfn_valid()] Link: https://lkml.kernel.org/r/Y9lg7R1Yd931C+y5@kernel.org Link: https://lkml.kernel.org/r/20230129124235.209895-5-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org> Acked-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Guo Ren <guoren@kernel.org> [csky] Acked-by: Huacai Chen <chenhuacai@loongson.cn> [LoongArch] Acked-by: Stafford Horne <shorne@gmail.com> [OpenRISC] Acked-by: Michael Ellerman <mpe@ellerman.id.au> [powerpc] Reviewed-by: David Hildenbrand <david@redhat.com> Tested-by: Conor Dooley <conor.dooley@microchip.com> Cc: Brian Cain <bcain@quicinc.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Helge Deller <deller@gmx.de> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
103 lines
2.8 KiB
C
103 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_PAGE_64_H
|
|
#define _ASM_X86_PAGE_64_H
|
|
|
|
#include <asm/page_64_types.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <asm/cpufeatures.h>
|
|
#include <asm/alternative.h>
|
|
|
|
#include <linux/kmsan-checks.h>
|
|
|
|
/* duplicated to the one in bootmem.h */
|
|
extern unsigned long max_pfn;
|
|
extern unsigned long phys_base;
|
|
|
|
extern unsigned long page_offset_base;
|
|
extern unsigned long vmalloc_base;
|
|
extern unsigned long vmemmap_base;
|
|
|
|
static __always_inline unsigned long __phys_addr_nodebug(unsigned long x)
|
|
{
|
|
unsigned long y = x - __START_KERNEL_map;
|
|
|
|
/* use the carry flag to determine if x was < __START_KERNEL_map */
|
|
x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
|
|
|
|
return x;
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_VIRTUAL
|
|
extern unsigned long __phys_addr(unsigned long);
|
|
extern unsigned long __phys_addr_symbol(unsigned long);
|
|
#else
|
|
#define __phys_addr(x) __phys_addr_nodebug(x)
|
|
#define __phys_addr_symbol(x) \
|
|
((unsigned long)(x) - __START_KERNEL_map + phys_base)
|
|
#endif
|
|
|
|
#define __phys_reloc_hide(x) (x)
|
|
|
|
void clear_page_orig(void *page);
|
|
void clear_page_rep(void *page);
|
|
void clear_page_erms(void *page);
|
|
|
|
static inline void clear_page(void *page)
|
|
{
|
|
/*
|
|
* Clean up KMSAN metadata for the page being cleared. The assembly call
|
|
* below clobbers @page, so we perform unpoisoning before it.
|
|
*/
|
|
kmsan_unpoison_memory(page, PAGE_SIZE);
|
|
alternative_call_2(clear_page_orig,
|
|
clear_page_rep, X86_FEATURE_REP_GOOD,
|
|
clear_page_erms, X86_FEATURE_ERMS,
|
|
"=D" (page),
|
|
"0" (page)
|
|
: "cc", "memory", "rax", "rcx");
|
|
}
|
|
|
|
void copy_page(void *to, void *from);
|
|
|
|
#ifdef CONFIG_X86_5LEVEL
|
|
/*
|
|
* User space process size. This is the first address outside the user range.
|
|
* There are a few constraints that determine this:
|
|
*
|
|
* On Intel CPUs, if a SYSCALL instruction is at the highest canonical
|
|
* address, then that syscall will enter the kernel with a
|
|
* non-canonical return address, and SYSRET will explode dangerously.
|
|
* We avoid this particular problem by preventing anything
|
|
* from being mapped at the maximum canonical address.
|
|
*
|
|
* On AMD CPUs in the Ryzen family, there's a nasty bug in which the
|
|
* CPUs malfunction if they execute code from the highest canonical page.
|
|
* They'll speculate right off the end of the canonical space, and
|
|
* bad things happen. This is worked around in the same way as the
|
|
* Intel problem.
|
|
*
|
|
* With page table isolation enabled, we map the LDT in ... [stay tuned]
|
|
*/
|
|
static __always_inline unsigned long task_size_max(void)
|
|
{
|
|
unsigned long ret;
|
|
|
|
alternative_io("movq %[small],%0","movq %[large],%0",
|
|
X86_FEATURE_LA57,
|
|
"=r" (ret),
|
|
[small] "i" ((1ul << 47)-PAGE_SIZE),
|
|
[large] "i" ((1ul << 56)-PAGE_SIZE));
|
|
|
|
return ret;
|
|
}
|
|
#endif /* CONFIG_X86_5LEVEL */
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_X86_VSYSCALL_EMULATION
|
|
# define __HAVE_ARCH_GATE_AREA 1
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_PAGE_64_H */
|