powerpc/64s: Enable KFENCE on book3s64
KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com> Reviewed-by: Russell Currey <ruscur@russell.cc> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com
This commit is contained in:
parent
d7902d31cb
commit
a5edf9815d
@ -195,7 +195,7 @@ config PPC
|
|||||||
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
|
select HAVE_ARCH_KASAN if PPC_RADIX_MMU
|
||||||
select HAVE_ARCH_KASAN if PPC_BOOK3E_64
|
select HAVE_ARCH_KASAN if PPC_BOOK3E_64
|
||||||
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
|
select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
|
||||||
select HAVE_ARCH_KFENCE if PPC_BOOK3S_32 || PPC_8xx || 40x
|
select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||||
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_ARCH_MMAP_RND_BITS
|
select HAVE_ARCH_MMAP_RND_BITS
|
||||||
|
@ -113,9 +113,11 @@ static inline void __pud_free(pud_t *pud)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Early pud pages allocated via memblock allocator
|
* Early pud pages allocated via memblock allocator
|
||||||
* can't be directly freed to slab
|
* can't be directly freed to slab. KFENCE pages have
|
||||||
|
* both reserved and slab flags set so need to be freed
|
||||||
|
* kmem_cache_free.
|
||||||
*/
|
*/
|
||||||
if (PageReserved(page))
|
if (PageReserved(page) && !PageSlab(page))
|
||||||
free_reserved_page(page);
|
free_reserved_page(page);
|
||||||
else
|
else
|
||||||
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
|
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
|
||||||
|
@ -1106,7 +1106,7 @@ static inline void vmemmap_remove_mapping(unsigned long start,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||||
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
|
static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||||
{
|
{
|
||||||
if (radix_enabled())
|
if (radix_enabled())
|
||||||
|
@ -11,11 +11,25 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64_ELF_ABI_V1
|
||||||
|
#define ARCH_FUNC_PREFIX "."
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline bool arch_kfence_init_pool(void)
|
static inline bool arch_kfence_init_pool(void)
|
||||||
{
|
{
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||||
|
{
|
||||||
|
struct page *page = virt_to_page(addr);
|
||||||
|
|
||||||
|
__kernel_map_pages(page, 1, !protect);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#else
|
||||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||||
{
|
{
|
||||||
pte_t *kpte = virt_to_kpte(addr);
|
pte_t *kpte = virt_to_kpte(addr);
|
||||||
@ -29,5 +43,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __ASM_POWERPC_KFENCE_H */
|
#endif /* __ASM_POWERPC_KFENCE_H */
|
||||||
|
@ -424,7 +424,7 @@ repeat:
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
if (debug_pagealloc_enabled() &&
|
if (debug_pagealloc_enabled_or_kfence() &&
|
||||||
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
||||||
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
|
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
|
||||||
}
|
}
|
||||||
@ -773,7 +773,7 @@ static void __init htab_init_page_sizes(void)
|
|||||||
bool aligned = true;
|
bool aligned = true;
|
||||||
init_hpte_page_sizes();
|
init_hpte_page_sizes();
|
||||||
|
|
||||||
if (!debug_pagealloc_enabled()) {
|
if (!debug_pagealloc_enabled_or_kfence()) {
|
||||||
/*
|
/*
|
||||||
* Pick a size for the linear mapping. Currently, we only
|
* Pick a size for the linear mapping. Currently, we only
|
||||||
* support 16M, 1M and 4K which is the default
|
* support 16M, 1M and 4K which is the default
|
||||||
@ -1061,7 +1061,7 @@ static void __init htab_initialize(void)
|
|||||||
|
|
||||||
prot = pgprot_val(PAGE_KERNEL);
|
prot = pgprot_val(PAGE_KERNEL);
|
||||||
|
|
||||||
if (debug_pagealloc_enabled()) {
|
if (debug_pagealloc_enabled_or_kfence()) {
|
||||||
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||||
linear_map_hash_slots = memblock_alloc_try_nid(
|
linear_map_hash_slots = memblock_alloc_try_nid(
|
||||||
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
|
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
|
||||||
@ -1980,7 +1980,7 @@ repeat:
|
|||||||
return slot;
|
return slot;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||||
static DEFINE_SPINLOCK(linear_map_hash_lock);
|
static DEFINE_SPINLOCK(linear_map_hash_lock);
|
||||||
|
|
||||||
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
|
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||||
@ -2053,7 +2053,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable)
|
|||||||
}
|
}
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
|
||||||
|
|
||||||
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||||
phys_addr_t first_memblock_size)
|
phys_addr_t first_memblock_size)
|
||||||
|
@ -34,6 +34,8 @@
|
|||||||
|
|
||||||
#include <trace/events/thp.h>
|
#include <trace/events/thp.h>
|
||||||
|
|
||||||
|
#include <mm/mmu_decl.h>
|
||||||
|
|
||||||
unsigned int mmu_base_pid;
|
unsigned int mmu_base_pid;
|
||||||
unsigned long radix_mem_block_size __ro_after_init;
|
unsigned long radix_mem_block_size __ro_after_init;
|
||||||
|
|
||||||
@ -276,7 +278,7 @@ static int __meminit create_physical_mapping(unsigned long start,
|
|||||||
int psize;
|
int psize;
|
||||||
unsigned long max_mapping_size = radix_mem_block_size;
|
unsigned long max_mapping_size = radix_mem_block_size;
|
||||||
|
|
||||||
if (debug_pagealloc_enabled())
|
if (debug_pagealloc_enabled_or_kfence())
|
||||||
max_mapping_size = PAGE_SIZE;
|
max_mapping_size = PAGE_SIZE;
|
||||||
|
|
||||||
start = ALIGN(start, PAGE_SIZE);
|
start = ALIGN(start, PAGE_SIZE);
|
||||||
@ -899,7 +901,7 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||||
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
|
void radix__kernel_map_pages(struct page *page, int numpages, int enable)
|
||||||
{
|
{
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user