2022-05-31 18:04:12 +08:00
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright ( C ) 2020 - 2022 Loongson Technology Corporation Limited
*
* Derived from MIPS :
* Copyright ( C ) 2000 , 2001 Kanoj Sarcar
* Copyright ( C ) 2000 , 2001 Ralf Baechle
* Copyright ( C ) 2000 , 2001 Silicon Graphics , Inc .
* Copyright ( C ) 2000 , 2001 , 2003 Broadcom Corporation
*/
# include <linux/cpu.h>
# include <linux/cpumask.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/seq_file.h>
# include <linux/smp.h>
# include <linux/threads.h>
# include <linux/export.h>
2022-12-10 22:40:15 +08:00
# include <linux/syscore_ops.h>
2022-05-31 18:04:12 +08:00
# include <linux/time.h>
# include <linux/tracepoint.h>
# include <linux/sched/hotplug.h>
# include <linux/sched/task_stack.h>
# include <asm/cpu.h>
# include <asm/idle.h>
# include <asm/loongson.h>
# include <asm/mmu_context.h>
2022-05-31 18:04:12 +08:00
# include <asm/numa.h>
2022-05-31 18:04:12 +08:00
# include <asm/processor.h>
# include <asm/setup.h>
# include <asm/time.h>
int __cpu_number_map [ NR_CPUS ] ; /* Map physical to logical */
EXPORT_SYMBOL ( __cpu_number_map ) ;
int __cpu_logical_map [ NR_CPUS ] ; /* Map logical to physical */
EXPORT_SYMBOL ( __cpu_logical_map ) ;
/* Number of threads (siblings) per CPU core */
int smp_num_siblings = 1 ;
EXPORT_SYMBOL ( smp_num_siblings ) ;
/* Representing the threads (siblings) of each logical CPU */
cpumask_t cpu_sibling_map [ NR_CPUS ] __read_mostly ;
EXPORT_SYMBOL ( cpu_sibling_map ) ;
/* Representing the core map of multi-core chips of each logical CPU */
cpumask_t cpu_core_map [ NR_CPUS ] __read_mostly ;
EXPORT_SYMBOL ( cpu_core_map ) ;
static DECLARE_COMPLETION ( cpu_starting ) ;
static DECLARE_COMPLETION ( cpu_running ) ;
/*
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems .
*/
cpumask_t cpu_foreign_map [ NR_CPUS ] __read_mostly ;
EXPORT_SYMBOL ( cpu_foreign_map ) ;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map ;
/* representing cpus for which core maps can be computed */
static cpumask_t cpu_core_setup_map ;
struct secondary_data cpuboot_data ;
static DEFINE_PER_CPU ( int , cpu_state ) ;
enum ipi_msg_type {
IPI_RESCHEDULE ,
IPI_CALL_FUNCTION ,
} ;
static const char * ipi_types [ NR_IPI ] __tracepoint_string = {
[ IPI_RESCHEDULE ] = " Rescheduling interrupts " ,
[ IPI_CALL_FUNCTION ] = " Function call interrupts " ,
} ;
void show_ipi_list ( struct seq_file * p , int prec )
{
unsigned int cpu , i ;
for ( i = 0 ; i < NR_IPI ; i + + ) {
seq_printf ( p , " %*s%u:%s " , prec - 1 , " IPI " , i , prec > = 4 ? " " : " " ) ;
for_each_online_cpu ( cpu )
seq_printf ( p , " %10u " , per_cpu ( irq_stat , cpu ) . ipi_irqs [ i ] ) ;
seq_printf ( p , " LoongArch %d %s \n " , i + 1 , ipi_types [ i ] ) ;
}
}
/* Send mailbox buffer via Mail_Send */
static void csr_mail_send ( uint64_t data , int cpu , int mailbox )
{
uint64_t val ;
/* Send high 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING ;
val | = ( IOCSR_MBUF_SEND_BOX_HI ( mailbox ) < < IOCSR_MBUF_SEND_BOX_SHIFT ) ;
val | = ( cpu < < IOCSR_MBUF_SEND_CPU_SHIFT ) ;
val | = ( data & IOCSR_MBUF_SEND_H32_MASK ) ;
iocsr_write64 ( val , LOONGARCH_IOCSR_MBUF_SEND ) ;
/* Send low 32 bits */
val = IOCSR_MBUF_SEND_BLOCKING ;
val | = ( IOCSR_MBUF_SEND_BOX_LO ( mailbox ) < < IOCSR_MBUF_SEND_BOX_SHIFT ) ;
val | = ( cpu < < IOCSR_MBUF_SEND_CPU_SHIFT ) ;
val | = ( data < < IOCSR_MBUF_SEND_BUF_SHIFT ) ;
iocsr_write64 ( val , LOONGARCH_IOCSR_MBUF_SEND ) ;
} ;
static u32 ipi_read_clear ( int cpu )
{
u32 action ;
/* Load the ipi register to figure out what we're supposed to do */
action = iocsr_read32 ( LOONGARCH_IOCSR_IPI_STATUS ) ;
/* Clear the ipi register to clear the interrupt */
iocsr_write32 ( action , LOONGARCH_IOCSR_IPI_CLEAR ) ;
smp_mb ( ) ;
return action ;
}
static void ipi_write_action ( int cpu , u32 action )
{
unsigned int irq = 0 ;
while ( ( irq = ffs ( action ) ) ) {
uint32_t val = IOCSR_IPI_SEND_BLOCKING ;
val | = ( irq - 1 ) ;
val | = ( cpu < < IOCSR_IPI_SEND_CPU_SHIFT ) ;
iocsr_write32 ( val , LOONGARCH_IOCSR_IPI_SEND ) ;
action & = ~ BIT ( irq - 1 ) ;
}
}
2022-11-21 19:02:57 +08:00
void loongson_send_ipi_single ( int cpu , unsigned int action )
2022-05-31 18:04:12 +08:00
{
ipi_write_action ( cpu_logical_map ( cpu ) , ( u32 ) action ) ;
}
2022-11-21 19:02:57 +08:00
void loongson_send_ipi_mask ( const struct cpumask * mask , unsigned int action )
2022-05-31 18:04:12 +08:00
{
unsigned int i ;
for_each_cpu ( i , mask )
ipi_write_action ( cpu_logical_map ( i ) , ( u32 ) action ) ;
}
2022-12-08 14:59:15 +08:00
/*
* This function sends a ' reschedule ' IPI to another CPU .
* it goes straight through and wastes no time serializing
* anything . Worst case is that we lose a reschedule . . .
*/
void smp_send_reschedule ( int cpu )
{
loongson_send_ipi_single ( cpu , SMP_RESCHEDULE ) ;
}
EXPORT_SYMBOL_GPL ( smp_send_reschedule ) ;
2022-11-21 19:02:57 +08:00
irqreturn_t loongson_ipi_interrupt ( int irq , void * dev )
2022-05-31 18:04:12 +08:00
{
unsigned int action ;
unsigned int cpu = smp_processor_id ( ) ;
action = ipi_read_clear ( cpu_logical_map ( cpu ) ) ;
if ( action & SMP_RESCHEDULE ) {
scheduler_ipi ( ) ;
per_cpu ( irq_stat , cpu ) . ipi_irqs [ IPI_RESCHEDULE ] + + ;
}
if ( action & SMP_CALL_FUNCTION ) {
generic_smp_call_function_interrupt ( ) ;
per_cpu ( irq_stat , cpu ) . ipi_irqs [ IPI_CALL_FUNCTION ] + + ;
}
return IRQ_HANDLED ;
}
2022-12-10 22:40:05 +08:00
static void __init fdt_smp_setup ( void )
{
# ifdef CONFIG_OF
unsigned int cpu , cpuid ;
struct device_node * node = NULL ;
for_each_of_cpu_node ( node ) {
if ( ! of_device_is_available ( node ) )
continue ;
cpuid = of_get_cpu_hwid ( node , 0 ) ;
if ( cpuid > = nr_cpu_ids )
continue ;
if ( cpuid = = loongson_sysconf . boot_cpu_id ) {
cpu = 0 ;
numa_add_cpu ( cpu ) ;
} else {
cpu = cpumask_next_zero ( - 1 , cpu_present_mask ) ;
}
num_processors + + ;
set_cpu_possible ( cpu , true ) ;
set_cpu_present ( cpu , true ) ;
__cpu_number_map [ cpuid ] = cpu ;
__cpu_logical_map [ cpu ] = cpuid ;
}
loongson_sysconf . nr_cpus = num_processors ;
# endif
}
2022-11-21 19:02:57 +08:00
void __init loongson_smp_setup ( void )
2022-05-31 18:04:12 +08:00
{
2022-12-10 22:40:05 +08:00
fdt_smp_setup ( ) ;
2022-05-31 18:04:12 +08:00
cpu_data [ 0 ] . core = cpu_logical_map ( 0 ) % loongson_sysconf . cores_per_package ;
cpu_data [ 0 ] . package = cpu_logical_map ( 0 ) / loongson_sysconf . cores_per_package ;
iocsr_write32 ( 0xffffffff , LOONGARCH_IOCSR_IPI_EN ) ;
pr_info ( " Detected %i available CPU(s) \n " , loongson_sysconf . nr_cpus ) ;
}
2022-11-21 19:02:57 +08:00
void __init loongson_prepare_cpus ( unsigned int max_cpus )
2022-05-31 18:04:12 +08:00
{
int i = 0 ;
for ( i = 0 ; i < loongson_sysconf . nr_cpus ; i + + ) {
set_cpu_present ( i , true ) ;
csr_mail_send ( 0 , __cpu_logical_map [ i ] , 0 ) ;
}
per_cpu ( cpu_state , smp_processor_id ( ) ) = CPU_ONLINE ;
}
/*
* Setup the PC , SP , and TP of a secondary processor and start it running !
*/
2022-11-21 19:02:57 +08:00
void loongson_boot_secondary ( int cpu , struct task_struct * idle )
2022-05-31 18:04:12 +08:00
{
unsigned long entry ;
pr_info ( " Booting CPU#%d... \n " , cpu ) ;
entry = __pa_symbol ( ( unsigned long ) & smpboot_entry ) ;
cpuboot_data . stack = ( unsigned long ) __KSTK_TOS ( idle ) ;
cpuboot_data . thread_info = ( unsigned long ) task_thread_info ( idle ) ;
csr_mail_send ( entry , cpu_logical_map ( cpu ) , 0 ) ;
2022-11-21 19:02:57 +08:00
loongson_send_ipi_single ( cpu , SMP_BOOT_CPU ) ;
2022-05-31 18:04:12 +08:00
}
/*
* SMP init and finish on secondary CPUs
*/
2022-11-21 19:02:57 +08:00
void loongson_init_secondary ( void )
2022-05-31 18:04:12 +08:00
{
unsigned int cpu = smp_processor_id ( ) ;
unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER ;
change_csr_ecfg ( ECFG0_IM , imask ) ;
iocsr_write32 ( 0xffffffff , LOONGARCH_IOCSR_IPI_EN ) ;
2022-05-31 18:04:12 +08:00
# ifdef CONFIG_NUMA
numa_add_cpu ( cpu ) ;
# endif
2022-05-31 18:04:12 +08:00
per_cpu ( cpu_state , cpu ) = CPU_ONLINE ;
cpu_data [ cpu ] . core =
cpu_logical_map ( cpu ) % loongson_sysconf . cores_per_package ;
cpu_data [ cpu ] . package =
cpu_logical_map ( cpu ) / loongson_sysconf . cores_per_package ;
}
2022-11-21 19:02:57 +08:00
void loongson_smp_finish ( void )
2022-05-31 18:04:12 +08:00
{
local_irq_enable ( ) ;
iocsr_write64 ( 0 , LOONGARCH_IOCSR_MBUF0 ) ;
pr_info ( " CPU#%d finished \n " , smp_processor_id ( ) ) ;
}
# ifdef CONFIG_HOTPLUG_CPU
2022-11-21 19:02:57 +08:00
int loongson_cpu_disable ( void )
2022-05-31 18:04:12 +08:00
{
unsigned long flags ;
unsigned int cpu = smp_processor_id ( ) ;
if ( io_master ( cpu ) )
return - EBUSY ;
2022-05-31 18:04:12 +08:00
# ifdef CONFIG_NUMA
numa_remove_cpu ( cpu ) ;
# endif
2022-05-31 18:04:12 +08:00
set_cpu_online ( cpu , false ) ;
calculate_cpu_foreign_map ( ) ;
local_irq_save ( flags ) ;
irq_migrate_all_off_this_cpu ( ) ;
clear_csr_ecfg ( ECFG0_IM ) ;
local_irq_restore ( flags ) ;
local_flush_tlb_all ( ) ;
return 0 ;
}
2022-11-21 19:02:57 +08:00
void loongson_cpu_die ( unsigned int cpu )
2022-05-31 18:04:12 +08:00
{
while ( per_cpu ( cpu_state , cpu ) ! = CPU_DEAD )
cpu_relax ( ) ;
mb ( ) ;
}
2022-07-20 15:21:51 +08:00
void play_dead ( void )
2022-05-31 18:04:12 +08:00
{
2022-07-20 15:21:51 +08:00
register uint64_t addr ;
2022-05-31 18:04:12 +08:00
register void ( * init_fn ) ( void ) ;
2022-07-20 15:21:51 +08:00
idle_task_exit ( ) ;
2022-05-31 18:04:12 +08:00
local_irq_enable ( ) ;
2022-07-20 15:21:51 +08:00
set_csr_ecfg ( ECFGF_IPI ) ;
__this_cpu_write ( cpu_state , CPU_DEAD ) ;
__smp_mb ( ) ;
do {
__asm__ __volatile__ ( " idle 0 \n \t " ) ;
addr = iocsr_read64 ( LOONGARCH_IOCSR_MBUF0 ) ;
} while ( addr = = 0 ) ;
2022-05-31 18:04:12 +08:00
2022-07-20 15:21:51 +08:00
init_fn = ( void * ) TO_CACHE ( addr ) ;
iocsr_write32 ( 0xffffffff , LOONGARCH_IOCSR_IPI_CLEAR ) ;
2022-05-31 18:04:12 +08:00
init_fn ( ) ;
unreachable ( ) ;
}
# endif
/*
* Power management
*/
# ifdef CONFIG_PM
2022-11-21 19:02:57 +08:00
static int loongson_ipi_suspend ( void )
2022-05-31 18:04:12 +08:00
{
return 0 ;
}
2022-11-21 19:02:57 +08:00
static void loongson_ipi_resume ( void )
2022-05-31 18:04:12 +08:00
{
iocsr_write32 ( 0xffffffff , LOONGARCH_IOCSR_IPI_EN ) ;
}
2022-11-21 19:02:57 +08:00
static struct syscore_ops loongson_ipi_syscore_ops = {
. resume = loongson_ipi_resume ,
. suspend = loongson_ipi_suspend ,
2022-05-31 18:04:12 +08:00
} ;
/*
* Enable boot cpu ipi before enabling nonboot cpus
* during syscore_resume .
*/
static int __init ipi_pm_init ( void )
{
2022-11-21 19:02:57 +08:00
register_syscore_ops ( & loongson_ipi_syscore_ops ) ;
2022-05-31 18:04:12 +08:00
return 0 ;
}
core_initcall ( ipi_pm_init ) ;
# endif
static inline void set_cpu_sibling_map ( int cpu )
{
int i ;
cpumask_set_cpu ( cpu , & cpu_sibling_setup_map ) ;
if ( smp_num_siblings < = 1 )
cpumask_set_cpu ( cpu , & cpu_sibling_map [ cpu ] ) ;
else {
for_each_cpu ( i , & cpu_sibling_setup_map ) {
if ( cpus_are_siblings ( cpu , i ) ) {
cpumask_set_cpu ( i , & cpu_sibling_map [ cpu ] ) ;
cpumask_set_cpu ( cpu , & cpu_sibling_map [ i ] ) ;
}
}
}
}
static inline void set_cpu_core_map ( int cpu )
{
int i ;
cpumask_set_cpu ( cpu , & cpu_core_setup_map ) ;
for_each_cpu ( i , & cpu_core_setup_map ) {
if ( cpu_data [ cpu ] . package = = cpu_data [ i ] . package ) {
cpumask_set_cpu ( i , & cpu_core_map [ cpu ] ) ;
cpumask_set_cpu ( cpu , & cpu_core_map [ i ] ) ;
}
}
}
/*
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears .
*/
void calculate_cpu_foreign_map ( void )
{
int i , k , core_present ;
cpumask_t temp_foreign_map ;
/* Re-calculate the mask */
cpumask_clear ( & temp_foreign_map ) ;
for_each_online_cpu ( i ) {
core_present = 0 ;
for_each_cpu ( k , & temp_foreign_map )
if ( cpus_are_siblings ( i , k ) )
core_present = 1 ;
if ( ! core_present )
cpumask_set_cpu ( i , & temp_foreign_map ) ;
}
for_each_online_cpu ( i )
cpumask_andnot ( & cpu_foreign_map [ i ] ,
& temp_foreign_map , & cpu_sibling_map [ i ] ) ;
}
/* Preload SMP state for boot cpu */
void smp_prepare_boot_cpu ( void )
{
2022-05-31 18:04:12 +08:00
unsigned int cpu , node , rr_node ;
2022-05-31 18:04:12 +08:00
set_cpu_possible ( 0 , true ) ;
set_cpu_online ( 0 , true ) ;
set_my_cpu_offset ( per_cpu_offset ( 0 ) ) ;
2022-05-31 18:04:12 +08:00
rr_node = first_node ( node_online_map ) ;
for_each_possible_cpu ( cpu ) {
node = early_cpu_to_node ( cpu ) ;
/*
* The mapping between present cpus and nodes has been
* built during MADT and SRAT parsing .
*
* If possible cpus = present cpus here , early_cpu_to_node
* will return valid node .
*
* If possible cpus > present cpus here ( e . g . some possible
* cpus will be added by cpu - hotplug later ) , for possible but
* not present cpus , early_cpu_to_node will return NUMA_NO_NODE ,
* and we just map them to online nodes in round - robin way .
* Once hotplugged , new correct mapping will be built for them .
*/
if ( node ! = NUMA_NO_NODE )
set_cpu_numa_node ( cpu , node ) ;
else {
set_cpu_numa_node ( cpu , rr_node ) ;
rr_node = next_node_in ( rr_node , node_online_map ) ;
}
}
2022-05-31 18:04:12 +08:00
}
/* called from main before smp_init() */
void __init smp_prepare_cpus ( unsigned int max_cpus )
{
init_new_context ( current , & init_mm ) ;
current_thread_info ( ) - > cpu = 0 ;
2022-11-21 19:02:57 +08:00
loongson_prepare_cpus ( max_cpus ) ;
2022-05-31 18:04:12 +08:00
set_cpu_sibling_map ( 0 ) ;
set_cpu_core_map ( 0 ) ;
calculate_cpu_foreign_map ( ) ;
# ifndef CONFIG_HOTPLUG_CPU
init_cpu_present ( cpu_possible_mask ) ;
# endif
}
int __cpu_up ( unsigned int cpu , struct task_struct * tidle )
{
2022-11-21 19:02:57 +08:00
loongson_boot_secondary ( cpu , tidle ) ;
2022-05-31 18:04:12 +08:00
/* Wait for CPU to start and be ready to sync counters */
if ( ! wait_for_completion_timeout ( & cpu_starting ,
msecs_to_jiffies ( 5000 ) ) ) {
pr_crit ( " CPU%u: failed to start \n " , cpu ) ;
return - EIO ;
}
/* Wait for CPU to finish startup & mark itself online before return */
wait_for_completion ( & cpu_running ) ;
return 0 ;
}
/*
* First C code run on the secondary CPUs after being started up by
* the master .
*/
asmlinkage void start_secondary ( void )
{
unsigned int cpu ;
sync_counter ( ) ;
cpu = smp_processor_id ( ) ;
set_my_cpu_offset ( per_cpu_offset ( cpu ) ) ;
cpu_probe ( ) ;
constant_clockevent_init ( ) ;
2022-11-21 19:02:57 +08:00
loongson_init_secondary ( ) ;
2022-05-31 18:04:12 +08:00
set_cpu_sibling_map ( cpu ) ;
set_cpu_core_map ( cpu ) ;
notify_cpu_starting ( cpu ) ;
/* Notify boot CPU that we're starting */
complete ( & cpu_starting ) ;
/* The CPU is running, now mark it online */
set_cpu_online ( cpu , true ) ;
calculate_cpu_foreign_map ( ) ;
/*
* Notify boot CPU that we ' re up & online and it can safely return
* from __cpu_up ( )
*/
complete ( & cpu_running ) ;
/*
2022-11-21 19:02:57 +08:00
* irq will be enabled in loongson_smp_finish ( ) , enabling it too
2022-05-31 18:04:12 +08:00
* early is dangerous .
*/
WARN_ON_ONCE ( ! irqs_disabled ( ) ) ;
2022-11-21 19:02:57 +08:00
loongson_smp_finish ( ) ;
2022-05-31 18:04:12 +08:00
cpu_startup_entry ( CPUHP_AP_ONLINE_IDLE ) ;
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
}
static void stop_this_cpu ( void * dummy )
{
set_cpu_online ( smp_processor_id ( ) , false ) ;
calculate_cpu_foreign_map ( ) ;
local_irq_disable ( ) ;
while ( true ) ;
}
void smp_send_stop ( void )
{
smp_call_function ( stop_this_cpu , NULL , 0 ) ;
}
int setup_profiling_timer ( unsigned int multiplier )
{
return 0 ;
}
static void flush_tlb_all_ipi ( void * info )
{
local_flush_tlb_all ( ) ;
}
void flush_tlb_all ( void )
{
on_each_cpu ( flush_tlb_all_ipi , NULL , 1 ) ;
}
static void flush_tlb_mm_ipi ( void * mm )
{
local_flush_tlb_mm ( ( struct mm_struct * ) mm ) ;
}
void flush_tlb_mm ( struct mm_struct * mm )
{
if ( atomic_read ( & mm - > mm_users ) = = 0 )
return ; /* happens as a result of exit_mmap() */
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
on_each_cpu_mask ( mm_cpumask ( mm ) , flush_tlb_mm_ipi , mm , 1 ) ;
} else {
unsigned int cpu ;
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , mm ) )
cpu_context ( cpu , mm ) = 0 ;
}
local_flush_tlb_mm ( mm ) ;
}
preempt_enable ( ) ;
}
struct flush_tlb_data {
struct vm_area_struct * vma ;
unsigned long addr1 ;
unsigned long addr2 ;
} ;
static void flush_tlb_range_ipi ( void * info )
{
struct flush_tlb_data * fd = info ;
local_flush_tlb_range ( fd - > vma , fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start , unsigned long end )
{
struct mm_struct * mm = vma - > vm_mm ;
preempt_disable ( ) ;
if ( ( atomic_read ( & mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = mm ) ) {
struct flush_tlb_data fd = {
. vma = vma ,
. addr1 = start ,
. addr2 = end ,
} ;
on_each_cpu_mask ( mm_cpumask ( mm ) , flush_tlb_range_ipi , & fd , 1 ) ;
} else {
unsigned int cpu ;
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , mm ) )
2022-05-31 18:04:12 +08:00
cpu_context ( cpu , mm ) = 0 ;
2022-05-31 18:04:12 +08:00
}
local_flush_tlb_range ( vma , start , end ) ;
}
preempt_enable ( ) ;
}
static void flush_tlb_kernel_range_ipi ( void * info )
{
struct flush_tlb_data * fd = info ;
local_flush_tlb_kernel_range ( fd - > addr1 , fd - > addr2 ) ;
}
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
struct flush_tlb_data fd = {
. addr1 = start ,
. addr2 = end ,
} ;
on_each_cpu ( flush_tlb_kernel_range_ipi , & fd , 1 ) ;
}
static void flush_tlb_page_ipi ( void * info )
{
struct flush_tlb_data * fd = info ;
local_flush_tlb_page ( fd - > vma , fd - > addr1 ) ;
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
{
preempt_disable ( ) ;
if ( ( atomic_read ( & vma - > vm_mm - > mm_users ) ! = 1 ) | | ( current - > mm ! = vma - > vm_mm ) ) {
struct flush_tlb_data fd = {
. vma = vma ,
. addr1 = page ,
} ;
on_each_cpu_mask ( mm_cpumask ( vma - > vm_mm ) , flush_tlb_page_ipi , & fd , 1 ) ;
} else {
unsigned int cpu ;
for_each_online_cpu ( cpu ) {
if ( cpu ! = smp_processor_id ( ) & & cpu_context ( cpu , vma - > vm_mm ) )
2022-05-31 18:04:12 +08:00
cpu_context ( cpu , vma - > vm_mm ) = 0 ;
2022-05-31 18:04:12 +08:00
}
local_flush_tlb_page ( vma , page ) ;
}
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( flush_tlb_page ) ;
static void flush_tlb_one_ipi ( void * info )
{
unsigned long vaddr = ( unsigned long ) info ;
local_flush_tlb_one ( vaddr ) ;
}
void flush_tlb_one ( unsigned long vaddr )
{
on_each_cpu ( flush_tlb_one_ipi , ( void * ) vaddr , 1 ) ;
}
EXPORT_SYMBOL ( flush_tlb_one ) ;