Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Move the virt_to_pfn() and related functions below the declaration of __pa() so it compiles. For symmetry do the same with pfn_to_kaddr(). As the file is included right into the linker file, we need to surround the functions with ifndef __ASSEMBLY__ so we don't cause compilation errors. The conversion moreover exposes the fact that pmd_page_vaddr() was returning an unsigned long rather than a const void * as could be expected, so all the sites defining pmd_page_vaddr() had to be augmented as well. Finally the KVM code in book3s_64_mmu_hv.c was passing an unsigned int to virt_to_phys() so fix that up with a cast so the result compiles. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> [mpe: Fixup kfence.h, simplify pfn_to_kaddr() & pmd_page_vaddr()] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230809-virt-to-phys-powerpc-v1-1-12e912a7d439@linaro.org
49 lines
933 B
C
49 lines
933 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* powerpc KFENCE support.
|
|
*
|
|
* Copyright (C) 2020 CS GROUP France
|
|
*/
|
|
|
|
#ifndef __ASM_POWERPC_KFENCE_H
|
|
#define __ASM_POWERPC_KFENCE_H
|
|
|
|
#include <linux/mm.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
#ifdef CONFIG_PPC64_ELF_ABI_V1
|
|
#define ARCH_FUNC_PREFIX "."
|
|
#endif
|
|
|
|
static inline bool arch_kfence_init_pool(void)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
#ifdef CONFIG_PPC64
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
struct page *page = virt_to_page((void *)addr);
|
|
|
|
__kernel_map_pages(page, 1, !protect);
|
|
|
|
return true;
|
|
}
|
|
#else
|
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
|
{
|
|
pte_t *kpte = virt_to_kpte(addr);
|
|
|
|
if (protect) {
|
|
pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
|
|
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
|
|
} else {
|
|
pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#endif /* __ASM_POWERPC_KFENCE_H */
|