a5edf9815d
KFENCE support was added for ppc32 in commit 90cbac0e995d ("powerpc: Enable KFENCE for PPC32"). Enable KFENCE on ppc64 architecture with hash and radix MMUs. It uses the same mechanism as debug pagealloc to protect/unprotect pages. All KFENCE kunit tests pass on both MMUs. KFENCE memory is initially allocated using memblock but is later marked as SLAB allocated. This necessitates the change to __pud_free to ensure that the KFENCE pages are freed appropriately. Based on previous work by Christophe Leroy and Jordan Niethe. Signed-off-by: Nicholas Miehlbradt <nicholas@linux.ibm.com> Reviewed-by: Russell Currey <ruscur@russell.cc> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20220926075726.2846-4-nicholas@linux.ibm.com
49 lines
925 B
C
49 lines
925 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* powerpc KFENCE support.
|
|
*
|
|
* Copyright (C) 2020 CS GROUP France
|
|
*/
|
|
|
|
#ifndef __ASM_POWERPC_KFENCE_H
|
|
#define __ASM_POWERPC_KFENCE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#ifdef CONFIG_PPC64_ELF_ABI_V1
|
|
#define ARCH_FUNC_PREFIX "."
|
|
#endif
|
|
|
|
static inline bool arch_kfence_init_pool(void)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
struct page *page = virt_to_page(addr);
|
|
|
|
__kernel_map_pages(page, 1, !protect);
|
|
|
|
return true;
|
|
}
|
|
#else
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
pte_t *kpte = virt_to_kpte(addr);
|
|
|
|
if (protect) {
|
|
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
|
|
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
|
} else {
|
|
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#endif /* __ASM_POWERPC_KFENCE_H */
|