mm/vmalloc: provide fallback arch huge vmap support functions

If an architecture doesn't support a particular page table level as a huge
vmap page size then allow it to skip defining the support query function.

Link: https://lkml.kernel.org/r/20210317062402.533919-11-npiggin@gmail.com
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Suggested-by: Christoph Hellwig <hch@lst.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ding Tianhong <dingtianhong@huawei.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Nicholas Piggin 2021-04-29 22:58:39 -07:00 committed by Linus Torvalds
parent 97dc2a1548
commit 6f680e70b6
4 changed files with 31 additions and 20 deletions

View File

@ -4,11 +4,8 @@
#include <asm/page.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
/*
@ -19,11 +16,13 @@ static inline bool arch_vmap_pud_supported(pgprot_t prot)
!IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
/* See arch_vmap_pud_supported() */
return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
}
#endif
#endif /* _ASM_ARM64_VMALLOC_H */

View File

@ -5,21 +5,20 @@
#include <asm/page.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
/* HPT does not cope with large pages in the vmalloc area */
return radix_enabled();
}
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
return radix_enabled();
}
#endif
#endif /* _ASM_POWERPC_VMALLOC_H */

View File

@ -6,24 +6,21 @@
#include <asm/pgtable_areas.h>
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}
#ifdef CONFIG_X86_64
#define arch_vmap_pud_supported arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
#ifdef CONFIG_X86_64
return boot_cpu_has(X86_FEATURE_GBPAGES);
#else
return false;
#endif
}
#endif
#define arch_vmap_pmd_supported arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
return boot_cpu_has(X86_FEATURE_PSE);
}
#endif
#endif /* _ASM_X86_VMALLOC_H */

View File

@ -78,10 +78,26 @@ struct vmap_area {
};
};
#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP
static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; }
static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; }
static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; }
/* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
#ifndef arch_vmap_p4d_supported
static inline bool arch_vmap_p4d_supported(pgprot_t prot)
{
return false;
}
#endif
#ifndef arch_vmap_pud_supported
static inline bool arch_vmap_pud_supported(pgprot_t prot)
{
return false;
}
#endif
#ifndef arch_vmap_pmd_supported
static inline bool arch_vmap_pmd_supported(pgprot_t prot)
{
return false;
}
#endif
/*