2022-05-31 18:04:11 +08:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( C ) 2020 - 2022 Loongson Technology Corporation Limited
*/
# include <linux/init.h>
# include <linux/export.h>
# include <linux/mm.h>
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
# include <asm/tlbflush.h>
2023-09-06 22:53:55 +08:00
struct page * dmw_virt_to_page ( unsigned long kaddr )
{
LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.
All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().
DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-04-10 21:08:51 +08:00
return phys_to_page ( __pa ( kaddr ) ) ;
2023-09-06 22:53:55 +08:00
}
2023-11-21 15:03:25 +08:00
EXPORT_SYMBOL ( dmw_virt_to_page ) ;
2023-09-06 22:53:55 +08:00
struct page * tlb_virt_to_page ( unsigned long kaddr )
{
LoongArch: Make {virt, phys, page, pfn} translation work with KFENCE
KFENCE changes virt_to_page() to be able to translate tlb mapped virtual
addresses, but forget to change virt_to_phys()/phys_to_virt() and other
translation functions as well. This patch fix it, otherwise some drivers
(such as nvme and virtio-blk) cannot work with KFENCE.
All {virt, phys, page, pfn} translation functions are updated:
1, virt_to_pfn()/pfn_to_virt();
2, virt_to_page()/page_to_virt();
3, virt_to_phys()/phys_to_virt().
DMW/TLB mapped addresses are distinguished by comparing the vaddress
with vm_map_base in virt_to_xyz(), and we define WANT_PAGE_VIRTUAL in
the KFENCE case for the reverse translations, xyz_to_virt().
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
2024-04-10 21:08:51 +08:00
return phys_to_page ( pfn_to_phys ( pte_pfn ( * virt_to_kpte ( kaddr ) ) ) ) ;
2023-09-06 22:53:55 +08:00
}
2023-11-21 15:03:25 +08:00
EXPORT_SYMBOL ( tlb_virt_to_page ) ;
2023-09-06 22:53:55 +08:00
2022-05-31 18:04:11 +08:00
pgd_t * pgd_alloc ( struct mm_struct * mm )
{
2023-08-07 16:05:03 -07:00
pgd_t * init , * ret = NULL ;
struct ptdesc * ptdesc = pagetable_alloc ( GFP_KERNEL & ~ __GFP_HIGHMEM , 0 ) ;
2022-05-31 18:04:11 +08:00
2023-08-07 16:05:03 -07:00
if ( ptdesc ) {
ret = ( pgd_t * ) ptdesc_address ( ptdesc ) ;
2022-05-31 18:04:11 +08:00
init = pgd_offset ( & init_mm , 0UL ) ;
2022-10-27 20:52:50 +08:00
pgd_init ( ret ) ;
2022-05-31 18:04:11 +08:00
memcpy ( ret + USER_PTRS_PER_PGD , init + USER_PTRS_PER_PGD ,
( PTRS_PER_PGD - USER_PTRS_PER_PGD ) * sizeof ( pgd_t ) ) ;
}
return ret ;
}
EXPORT_SYMBOL_GPL ( pgd_alloc ) ;
2022-10-27 20:52:50 +08:00
void pgd_init ( void * addr )
2022-05-31 18:04:11 +08:00
{
unsigned long * p , * end ;
unsigned long entry ;
# if !defined(__PAGETABLE_PUD_FOLDED)
entry = ( unsigned long ) invalid_pud_table ;
# elif !defined(__PAGETABLE_PMD_FOLDED)
entry = ( unsigned long ) invalid_pmd_table ;
# else
entry = ( unsigned long ) invalid_pte_table ;
# endif
2022-10-27 20:52:50 +08:00
p = ( unsigned long * ) addr ;
2022-05-31 18:04:11 +08:00
end = p + PTRS_PER_PGD ;
do {
p [ 0 ] = entry ;
p [ 1 ] = entry ;
p [ 2 ] = entry ;
p [ 3 ] = entry ;
p [ 4 ] = entry ;
p + = 8 ;
p [ - 3 ] = entry ;
p [ - 2 ] = entry ;
p [ - 1 ] = entry ;
} while ( p ! = end ) ;
}
EXPORT_SYMBOL_GPL ( pgd_init ) ;
# ifndef __PAGETABLE_PMD_FOLDED
2022-10-27 20:52:50 +08:00
void pmd_init ( void * addr )
2022-05-31 18:04:11 +08:00
{
unsigned long * p , * end ;
2022-10-27 20:52:50 +08:00
unsigned long pagetable = ( unsigned long ) invalid_pte_table ;
2022-05-31 18:04:11 +08:00
2022-10-27 20:52:50 +08:00
p = ( unsigned long * ) addr ;
2022-05-31 18:04:11 +08:00
end = p + PTRS_PER_PMD ;
do {
p [ 0 ] = pagetable ;
p [ 1 ] = pagetable ;
p [ 2 ] = pagetable ;
p [ 3 ] = pagetable ;
p [ 4 ] = pagetable ;
p + = 8 ;
p [ - 3 ] = pagetable ;
p [ - 2 ] = pagetable ;
p [ - 1 ] = pagetable ;
} while ( p ! = end ) ;
}
EXPORT_SYMBOL_GPL ( pmd_init ) ;
# endif
# ifndef __PAGETABLE_PUD_FOLDED
2022-10-27 20:52:50 +08:00
void pud_init ( void * addr )
2022-05-31 18:04:11 +08:00
{
unsigned long * p , * end ;
2022-10-27 20:52:50 +08:00
unsigned long pagetable = ( unsigned long ) invalid_pmd_table ;
2022-05-31 18:04:11 +08:00
p = ( unsigned long * ) addr ;
end = p + PTRS_PER_PUD ;
do {
p [ 0 ] = pagetable ;
p [ 1 ] = pagetable ;
p [ 2 ] = pagetable ;
p [ 3 ] = pagetable ;
p [ 4 ] = pagetable ;
p + = 8 ;
p [ - 3 ] = pagetable ;
p [ - 2 ] = pagetable ;
p [ - 1 ] = pagetable ;
} while ( p ! = end ) ;
}
2022-10-27 20:52:50 +08:00
EXPORT_SYMBOL_GPL ( pud_init ) ;
2022-05-31 18:04:11 +08:00
# endif
pmd_t mk_pmd ( struct page * page , pgprot_t prot )
{
pmd_t pmd ;
2023-08-02 16:13:42 +01:00
pmd_val ( pmd ) = ( page_to_pfn ( page ) < < PFN_PTE_SHIFT ) | pgprot_val ( prot ) ;
2022-05-31 18:04:11 +08:00
return pmd ;
}
void set_pmd_at ( struct mm_struct * mm , unsigned long addr ,
pmd_t * pmdp , pmd_t pmd )
{
* pmdp = pmd ;
flush_tlb_all ( ) ;
}
void __init pagetable_init ( void )
{
/* Initialize the entire pgd. */
2022-10-27 20:52:50 +08:00
pgd_init ( swapper_pg_dir ) ;
pgd_init ( invalid_pg_dir ) ;
2022-05-31 18:04:11 +08:00
# ifndef __PAGETABLE_PUD_FOLDED
2022-10-27 20:52:50 +08:00
pud_init ( invalid_pud_table ) ;
2022-05-31 18:04:11 +08:00
# endif
# ifndef __PAGETABLE_PMD_FOLDED
2022-10-27 20:52:50 +08:00
pmd_init ( invalid_pmd_table ) ;
2022-05-31 18:04:11 +08:00
# endif
}