LoongArch: mm: Add page table mapped mode support for virt_to_page()
According to LoongArch documentations, there are two types of address translation modes: direct mapped address translation mode (DMW mode) and page table mapped address translation mode (TLB mode). Currently, virt_to_page() only supports direct mapped mode. This patch determines which mode is used, and adds corresponding handling functions for both modes. For more details on the two modes, see [1]. [1] https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#virtual-address-space-and-address-translation-mode Signed-off-by: Enze Li <lienze@kylinos.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
ec9fee79d4
commit
8b5cb1cbf3
@ -84,7 +84,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||||||
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
|
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
|
||||||
|
|
||||||
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
|
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
|
||||||
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
|
|
||||||
|
#define virt_to_page(kaddr) \
|
||||||
|
({ \
|
||||||
|
(likely((unsigned long)kaddr < vm_map_base)) ? \
|
||||||
|
dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
|
||||||
|
})
|
||||||
|
|
||||||
extern int __virt_addr_valid(volatile void *kaddr);
|
extern int __virt_addr_valid(volatile void *kaddr);
|
||||||
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
|
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
|
||||||
|
@ -353,6 +353,9 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
|
|||||||
extern pgd_t swapper_pg_dir[];
|
extern pgd_t swapper_pg_dir[];
|
||||||
extern pgd_t invalid_pg_dir[];
|
extern pgd_t invalid_pg_dir[];
|
||||||
|
|
||||||
|
struct page *dmw_virt_to_page(unsigned long kaddr);
|
||||||
|
struct page *tlb_virt_to_page(unsigned long kaddr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following only work if pte_present() is true.
|
* The following only work if pte_present() is true.
|
||||||
* Undefined behaviour if not..
|
* Undefined behaviour if not..
|
||||||
|
@ -9,6 +9,18 @@
|
|||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
|
|
||||||
|
struct page *dmw_virt_to_page(unsigned long kaddr)
|
||||||
|
{
|
||||||
|
return pfn_to_page(virt_to_pfn(kaddr));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(dmw_virt_to_page);
|
||||||
|
|
||||||
|
struct page *tlb_virt_to_page(unsigned long kaddr)
|
||||||
|
{
|
||||||
|
return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(tlb_virt_to_page);
|
||||||
|
|
||||||
pgd_t *pgd_alloc(struct mm_struct *mm)
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
pgd_t *ret, *init;
|
pgd_t *ret, *init;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user