2019-08-21 23:58:37 +09:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/mm.h>
# include <linux/smp.h>
2019-08-22 00:51:50 -07:00
# include <linux/sched.h>
2019-08-21 23:58:37 +09:00
# include <asm/sbi.h>
2021-06-06 17:20:50 +02:00
# include <asm/mmu_context.h>
2023-02-26 18:01:36 +03:00
static inline void local_flush_tlb_all_asid ( unsigned long asid )
{
__asm__ __volatile__ ( " sfence.vma x0, %0 "
:
: " r " ( asid )
: " memory " ) ;
}
static inline void local_flush_tlb_page_asid ( unsigned long addr ,
unsigned long asid )
{
__asm__ __volatile__ ( " sfence.vma %0, %1 "
:
: " r " ( addr ) , " r " ( asid )
: " memory " ) ;
}
2019-08-21 23:58:37 +09:00
void flush_tlb_all ( void )
{
sbi_remote_sfence_vma ( NULL , 0 , - 1 ) ;
}
2021-06-06 17:20:49 +02:00
static void __sbi_tlb_flush_range ( struct mm_struct * mm , unsigned long start ,
2021-04-30 16:28:49 +08:00
unsigned long size , unsigned long stride )
2019-08-21 23:58:37 +09:00
{
2021-06-06 17:20:49 +02:00
struct cpumask * cmask = mm_cpumask ( mm ) ;
2019-08-22 00:51:50 -07:00
unsigned int cpuid ;
2021-06-06 17:20:50 +02:00
bool broadcast ;
2019-08-21 23:58:37 +09:00
2019-08-22 00:51:49 -07:00
if ( cpumask_empty ( cmask ) )
return ;
2019-08-22 00:51:50 -07:00
cpuid = get_cpu ( ) ;
2021-06-06 17:20:50 +02:00
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but ( cmask , cpuid ) < nr_cpu_ids ;
if ( static_branch_unlikely ( & use_asid_allocator ) ) {
2023-03-13 11:49:06 +08:00
unsigned long asid = atomic_long_read ( & mm - > context . id ) & asid_mask ;
2019-08-22 00:51:50 -07:00
2021-06-06 17:20:50 +02:00
if ( broadcast ) {
2022-01-20 01:09:18 -08:00
sbi_remote_sfence_vma_asid ( cmask , start , size , asid ) ;
2021-06-06 17:20:50 +02:00
} else if ( size < = stride ) {
local_flush_tlb_page_asid ( start , asid ) ;
} else {
local_flush_tlb_all_asid ( asid ) ;
}
} else {
if ( broadcast ) {
2022-01-20 01:09:18 -08:00
sbi_remote_sfence_vma ( cmask , start , size ) ;
2021-06-06 17:20:50 +02:00
} else if ( size < = stride ) {
2019-08-22 00:51:51 -07:00
local_flush_tlb_page ( start ) ;
2021-06-06 17:20:50 +02:00
} else {
2019-08-22 00:51:51 -07:00
local_flush_tlb_all ( ) ;
2021-06-06 17:20:50 +02:00
}
2019-08-22 00:51:50 -07:00
}
put_cpu ( ) ;
2019-08-21 23:58:37 +09:00
}
void flush_tlb_mm ( struct mm_struct * mm )
{
2021-06-06 17:20:49 +02:00
__sbi_tlb_flush_range ( mm , 0 , - 1 , PAGE_SIZE ) ;
2019-08-21 23:58:37 +09:00
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long addr )
{
2021-06-06 17:20:49 +02:00
__sbi_tlb_flush_range ( vma - > vm_mm , addr , PAGE_SIZE , PAGE_SIZE ) ;
2019-08-21 23:58:37 +09:00
}
void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
2021-06-06 17:20:49 +02:00
__sbi_tlb_flush_range ( vma - > vm_mm , start , end - start , PAGE_SIZE ) ;
2019-08-21 23:58:37 +09:00
}
2021-04-30 16:28:50 +08:00
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
2021-06-06 17:20:49 +02:00
__sbi_tlb_flush_range ( vma - > vm_mm , start , end - start , PMD_SIZE ) ;
2021-04-30 16:28:50 +08:00
}
# endif