2019-08-21 23:58:37 +09:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/mm.h>
# include <linux/smp.h>
2019-08-22 00:51:50 -07:00
# include <linux/sched.h>
2023-10-30 14:30:26 +01:00
# include <linux/hugetlb.h>
2019-08-21 23:58:37 +09:00
# include <asm/sbi.h>
2021-06-06 17:20:50 +02:00
# include <asm/mmu_context.h>
2023-02-26 18:01:36 +03:00
static inline void local_flush_tlb_all_asid ( unsigned long asid )
{
2023-10-30 14:30:27 +01:00
if ( asid ! = FLUSH_TLB_NO_ASID )
__asm__ __volatile__ ( " sfence.vma x0, %0 "
:
: " r " ( asid )
: " memory " ) ;
else
local_flush_tlb_all ( ) ;
2023-02-26 18:01:36 +03:00
}
static inline void local_flush_tlb_page_asid ( unsigned long addr ,
unsigned long asid )
{
2023-10-30 14:30:27 +01:00
if ( asid ! = FLUSH_TLB_NO_ASID )
__asm__ __volatile__ ( " sfence.vma %0, %1 "
:
: " r " ( addr ) , " r " ( asid )
: " memory " ) ;
else
local_flush_tlb_page ( addr ) ;
2023-02-26 18:01:36 +03:00
}
2019-08-21 23:58:37 +09:00
2023-10-30 14:30:27 +01:00
/*
* Flush entire TLB if number of entries to be flushed is greater
* than the threshold below .
*/
static unsigned long tlb_flush_all_threshold __read_mostly = 64 ;
static void local_flush_tlb_range_threshold_asid ( unsigned long start ,
unsigned long size ,
unsigned long stride ,
unsigned long asid )
2023-03-28 09:22:21 +05:30
{
2023-10-30 14:30:27 +01:00
unsigned long nr_ptes_in_range = DIV_ROUND_UP ( size , stride ) ;
int i ;
if ( nr_ptes_in_range > tlb_flush_all_threshold ) {
local_flush_tlb_all_asid ( asid ) ;
return ;
}
for ( i = 0 ; i < nr_ptes_in_range ; + + i ) {
local_flush_tlb_page_asid ( start , asid ) ;
start + = stride ;
}
2023-03-28 09:22:21 +05:30
}
static inline void local_flush_tlb_range_asid ( unsigned long start ,
unsigned long size , unsigned long stride , unsigned long asid )
{
if ( size < = stride )
local_flush_tlb_page_asid ( start , asid ) ;
2023-10-30 14:30:27 +01:00
else if ( size = = FLUSH_TLB_MAX_SIZE )
2023-03-28 09:22:21 +05:30
local_flush_tlb_all_asid ( asid ) ;
2023-10-30 14:30:27 +01:00
else
local_flush_tlb_range_threshold_asid ( start , size , stride , asid ) ;
2023-03-28 09:22:21 +05:30
}
static void __ipi_flush_tlb_all ( void * info )
{
local_flush_tlb_all ( ) ;
}
2019-08-21 23:58:37 +09:00
void flush_tlb_all ( void )
{
2023-03-28 09:22:21 +05:30
if ( riscv_use_ipi_for_rfence ( ) )
on_each_cpu ( __ipi_flush_tlb_all , NULL , 1 ) ;
else
2023-10-30 14:30:27 +01:00
sbi_remote_sfence_vma_asid ( NULL , 0 , FLUSH_TLB_MAX_SIZE , FLUSH_TLB_NO_ASID ) ;
2023-03-28 09:22:21 +05:30
}
struct flush_tlb_range_data {
unsigned long asid ;
unsigned long start ;
unsigned long size ;
unsigned long stride ;
} ;
static void __ipi_flush_tlb_range_asid ( void * info )
{
struct flush_tlb_range_data * d = info ;
local_flush_tlb_range_asid ( d - > start , d - > size , d - > stride , d - > asid ) ;
}
static void __flush_tlb_range ( struct mm_struct * mm , unsigned long start ,
unsigned long size , unsigned long stride )
2019-08-21 23:58:37 +09:00
{
2023-03-28 09:22:21 +05:30
struct flush_tlb_range_data ftd ;
2023-10-30 14:30:28 +01:00
const struct cpumask * cmask ;
2023-10-30 14:30:27 +01:00
unsigned long asid = FLUSH_TLB_NO_ASID ;
2021-06-06 17:20:50 +02:00
bool broadcast ;
2019-08-21 23:58:37 +09:00
2023-10-30 14:30:28 +01:00
if ( mm ) {
unsigned int cpuid ;
cmask = mm_cpumask ( mm ) ;
if ( cpumask_empty ( cmask ) )
return ;
2019-08-22 00:51:49 -07:00
2023-10-30 14:30:28 +01:00
cpuid = get_cpu ( ) ;
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but ( cmask , cpuid ) < nr_cpu_ids ;
2023-10-30 14:30:27 +01:00
2023-10-30 14:30:28 +01:00
if ( static_branch_unlikely ( & use_asid_allocator ) )
asid = atomic_long_read ( & mm - > context . id ) & asid_mask ;
} else {
cmask = cpu_online_mask ;
broadcast = true ;
}
2023-10-30 14:30:27 +01:00
if ( broadcast ) {
if ( riscv_use_ipi_for_rfence ( ) ) {
ftd . asid = asid ;
ftd . start = start ;
ftd . size = size ;
ftd . stride = stride ;
on_each_cpu_mask ( cmask ,
__ipi_flush_tlb_range_asid ,
& ftd , 1 ) ;
} else
sbi_remote_sfence_vma_asid ( cmask ,
start , size , asid ) ;
2021-06-06 17:20:50 +02:00
} else {
2023-10-30 14:30:27 +01:00
local_flush_tlb_range_asid ( start , size , stride , asid ) ;
2019-08-22 00:51:50 -07:00
}
2023-10-30 14:30:28 +01:00
if ( mm )
put_cpu ( ) ;
2019-08-21 23:58:37 +09:00
}
void flush_tlb_mm ( struct mm_struct * mm )
{
2023-10-30 14:30:27 +01:00
__flush_tlb_range ( mm , 0 , FLUSH_TLB_MAX_SIZE , PAGE_SIZE ) ;
2019-08-21 23:58:37 +09:00
}
2023-10-30 14:30:25 +01:00
void flush_tlb_mm_range ( struct mm_struct * mm ,
unsigned long start , unsigned long end ,
unsigned int page_size )
{
__flush_tlb_range ( mm , start , end - start , page_size ) ;
}
2019-08-21 23:58:37 +09:00
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long addr )
{
2023-03-28 09:22:21 +05:30
__flush_tlb_range ( vma - > vm_mm , addr , PAGE_SIZE , PAGE_SIZE ) ;
2019-08-21 23:58:37 +09:00
}
void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
2023-10-30 14:30:26 +01:00
unsigned long stride_size ;
if ( ! is_vm_hugetlb_page ( vma ) ) {
stride_size = PAGE_SIZE ;
} else {
stride_size = huge_page_size ( hstate_vma ( vma ) ) ;
/*
* As stated in the privileged specification , every PTE in a
* NAPOT region must be invalidated , so reset the stride in that
* case .
*/
if ( has_svnapot ( ) ) {
if ( stride_size > = PGDIR_SIZE )
stride_size = PGDIR_SIZE ;
else if ( stride_size > = P4D_SIZE )
stride_size = P4D_SIZE ;
else if ( stride_size > = PUD_SIZE )
stride_size = PUD_SIZE ;
else if ( stride_size > = PMD_SIZE )
stride_size = PMD_SIZE ;
else
stride_size = PAGE_SIZE ;
}
}
__flush_tlb_range ( vma - > vm_mm , start , end - start , stride_size ) ;
2019-08-21 23:58:37 +09:00
}
2023-10-30 14:30:28 +01:00
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
__flush_tlb_range ( NULL , start , end - start , PAGE_SIZE ) ;
}
2021-04-30 16:28:50 +08:00
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
2023-03-28 09:22:21 +05:30
__flush_tlb_range ( vma - > vm_mm , start , end - start , PMD_SIZE ) ;
2021-04-30 16:28:50 +08:00
}
# endif