LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE

KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.

All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().

DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().

Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Huacai Chen
2024-04-10 21:08:51 +08:00
parent 0871bc0129
commit 0ca84aeaee
4 changed files with 51 additions and 8 deletions

View File

@ -14,11 +14,6 @@
#include <asm/pgtable-bits.h>
#include <asm/string.h>
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
extern void __init early_iounmap(void __iomem *addr, unsigned long size);
@ -73,6 +68,21 @@ extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t
#define __io_aw() mmiowb()
#ifdef CONFIG_KFENCE
#define virt_to_phys(kaddr) \
({ \
(likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \
page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
})
#define phys_to_virt(paddr) \
({ \
extern char *__kfence_pool; \
(unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \
page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
})
#endif
#include <asm-generic/io.h>
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE

View File

@ -16,6 +16,7 @@
static inline bool arch_kfence_init_pool(void)
{
int err;
char *kaddr, *vaddr;
char *kfence_pool = __kfence_pool;
struct vm_struct *area;
@ -35,6 +36,14 @@ static inline bool arch_kfence_init_pool(void)
return false;
}
kaddr = kfence_pool;
vaddr = __kfence_pool;
while (kaddr < kfence_pool + KFENCE_POOL_SIZE) {
set_page_address(virt_to_page(kaddr), vaddr);
kaddr += PAGE_SIZE;
vaddr += PAGE_SIZE;
}
return true;
}

View File

@ -78,7 +78,26 @@ typedef struct { unsigned long pgprot; } pgprot_t;
struct page *dmw_virt_to_page(unsigned long kaddr);
struct page *tlb_virt_to_page(unsigned long kaddr);
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
#define pfn_to_phys(pfn) __pfn_to_phys(pfn)
#define phys_to_pfn(paddr) __phys_to_pfn(paddr)
#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
#define phys_to_page(paddr) pfn_to_page(phys_to_pfn(paddr))
#ifndef CONFIG_KFENCE
#define page_to_virt(page) __va(page_to_phys(page))
#define virt_to_page(kaddr) phys_to_page(__pa(kaddr))
#else
#define WANT_PAGE_VIRTUAL
#define page_to_virt(page) \
({ \
extern char *__kfence_pool; \
(__kfence_pool == NULL) ? __va(page_to_phys(page)) : page_address(page); \
})
#define virt_to_page(kaddr) \
({ \
@ -86,6 +105,11 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
})
#endif
#define pfn_to_virt(pfn) page_to_virt(pfn_to_page(pfn))
#define virt_to_pfn(kaddr) page_to_pfn(virt_to_page(kaddr))
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))

View File

@ -11,13 +11,13 @@
struct page *dmw_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(virt_to_pfn(kaddr));
return phys_to_page(__pa(kaddr));
}
EXPORT_SYMBOL(dmw_virt_to_page);
struct page *tlb_virt_to_page(unsigned long kaddr)
{
return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
return phys_to_page(pfn_to_phys(pte_pfn(*virt_to_kpte(kaddr))));
}
EXPORT_SYMBOL(tlb_virt_to_page);