2005-04-16 15:20:36 -07:00
/*
* linux / arch / arm / kernel / smp . c
*
* Copyright ( C ) 2002 ARM Limited , All Rights Reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
2006-10-25 13:59:16 +01:00
# include <linux/module.h>
2005-04-16 15:20:36 -07:00
# include <linux/delay.h>
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/sched.h>
# include <linux/interrupt.h>
# include <linux/cache.h>
# include <linux/profile.h>
# include <linux/errno.h>
# include <linux/mm.h>
2007-07-30 02:36:13 +04:00
# include <linux/err.h>
2005-04-16 15:20:36 -07:00
# include <linux/cpu.h>
# include <linux/smp.h>
# include <linux/seq_file.h>
2006-10-25 13:59:16 +01:00
# include <linux/irq.h>
2009-05-17 18:58:34 +01:00
# include <linux/percpu.h>
# include <linux/clockchips.h>
2005-04-16 15:20:36 -07:00
# include <asm/atomic.h>
# include <asm/cacheflush.h>
# include <asm/cpu.h>
2009-06-11 15:35:00 +01:00
# include <asm/cputype.h>
2005-06-18 09:33:31 +01:00
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
# include <asm/tlbflush.h>
# include <asm/ptrace.h>
2009-05-17 18:58:34 +01:00
# include <asm/localtimer.h>
2009-09-27 20:55:43 +01:00
# include <asm/smp_plat.h>
2005-04-16 15:20:36 -07:00
2005-06-18 09:33:31 +01:00
/*
* as from 2.5 , kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
* where to place its SVC stack
*/
struct secondary_data secondary_data ;
2005-04-16 15:20:36 -07:00
/*
* structures for inter - processor calls
* - A collection of single bit ipi messages .
*/
struct ipi_data {
spinlock_t lock ;
unsigned long ipi_count ;
unsigned long bits ;
} ;
static DEFINE_PER_CPU ( struct ipi_data , ipi_data ) = {
. lock = SPIN_LOCK_UNLOCKED ,
} ;
enum ipi_msg_type {
IPI_TIMER ,
IPI_RESCHEDULE ,
IPI_CALL_FUNC ,
2008-06-10 20:48:30 +02:00
IPI_CALL_FUNC_SINGLE ,
2005-04-16 15:20:36 -07:00
IPI_CPU_STOP ,
} ;
2005-07-17 21:35:41 +01:00
int __cpuinit __cpu_up ( unsigned int cpu )
2005-04-16 15:20:36 -07:00
{
2005-11-02 21:51:40 +00:00
struct cpuinfo_arm * ci = & per_cpu ( cpu_data , cpu ) ;
struct task_struct * idle = ci - > idle ;
2005-06-18 09:33:31 +01:00
pgd_t * pgd ;
pmd_t * pmd ;
2005-04-16 15:20:36 -07:00
int ret ;
/*
2005-11-02 21:51:40 +00:00
* Spawn a new process manually , if not already done .
* Grab a pointer to its task struct so we can mess with it
2005-04-16 15:20:36 -07:00
*/
2005-11-02 21:51:40 +00:00
if ( ! idle ) {
idle = fork_idle ( cpu ) ;
if ( IS_ERR ( idle ) ) {
printk ( KERN_ERR " CPU%u: fork() failed \n " , cpu ) ;
return PTR_ERR ( idle ) ;
}
ci - > idle = idle ;
2005-04-16 15:20:36 -07:00
}
2005-06-18 09:33:31 +01:00
/*
* Allocate initial page tables to allow the new CPU to
* enable the MMU safely . This essentially means a set
* of our " standard " page tables , with the addition of
* a 1 : 1 mapping for the physical address of the kernel .
*/
pgd = pgd_alloc ( & init_mm ) ;
[ARM] Fix SMP booting with non-zero PHYS_OFFSET
The existing code tries to get the pmd for the temporary page table
by doing:
pgd = pgd_alloc(&init_mm);
pmd = pmd_offset(pgd, PHYS_OFFSET);
Since we have a two level page table, pmd_offset() is a no-op, so
this just has a casting effect from a pgd to a pmd - the address
argument is unused. So this can't work.
Normally, we'd do:
pgd = pgd_offset(&init_mm, PHYS_OFFSET);
...
pmd = pmd_offset(pgd, PHYS_OFFSET);
to get the pmd you want. However, pgd_offset() takes the mm_struct,
not the (unattached) pgd we just allocated. So, instead use:
pgd = pgd_alloc(&init_mm);
pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
Reported-by: Antti P Miettinen <ananaza@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2008-08-07 22:36:59 +01:00
pmd = pmd_offset ( pgd + pgd_index ( PHYS_OFFSET ) , PHYS_OFFSET ) ;
2005-06-18 09:33:31 +01:00
* pmd = __pmd ( ( PHYS_OFFSET & PGDIR_MASK ) |
PMD_TYPE_SECT | PMD_SECT_AP_WRITE ) ;
2009-02-11 13:14:57 +01:00
flush_pmd_entry ( pmd ) ;
2010-02-12 14:36:24 +00:00
outer_clean_range ( __pa ( pmd ) , __pa ( pmd + 1 ) ) ;
2005-06-18 09:33:31 +01:00
/*
* We need to tell the secondary core where to find
* its stack and the page tables .
*/
2006-01-12 01:05:58 -08:00
secondary_data . stack = task_stack_page ( idle ) + THREAD_START_SP ;
2005-06-18 09:33:31 +01:00
secondary_data . pgdir = virt_to_phys ( pgd ) ;
2010-02-12 14:36:24 +00:00
__cpuc_flush_dcache_area ( & secondary_data , sizeof ( secondary_data ) ) ;
outer_clean_range ( __pa ( & secondary_data ) , __pa ( & secondary_data + 1 ) ) ;
2005-06-18 09:33:31 +01:00
2005-04-16 15:20:36 -07:00
/*
* Now bring the CPU into our world .
*/
ret = boot_secondary ( cpu , idle ) ;
2005-06-18 09:33:31 +01:00
if ( ret = = 0 ) {
unsigned long timeout ;
/*
* CPU was successfully started , wait for it
* to come online or time out .
*/
timeout = jiffies + HZ ;
while ( time_before ( jiffies , timeout ) ) {
if ( cpu_online ( cpu ) )
break ;
udelay ( 10 ) ;
barrier ( ) ;
}
if ( ! cpu_online ( cpu ) )
ret = - EIO ;
}
2005-11-08 10:44:46 +00:00
secondary_data . stack = NULL ;
2005-06-18 09:33:31 +01:00
secondary_data . pgdir = 0 ;
[ARM] Fix SMP booting with non-zero PHYS_OFFSET
The existing code tries to get the pmd for the temporary page table
by doing:
pgd = pgd_alloc(&init_mm);
pmd = pmd_offset(pgd, PHYS_OFFSET);
Since we have a two level page table, pmd_offset() is a no-op, so
this just has a casting effect from a pgd to a pmd - the address
argument is unused. So this can't work.
Normally, we'd do:
pgd = pgd_offset(&init_mm, PHYS_OFFSET);
...
pmd = pmd_offset(pgd, PHYS_OFFSET);
to get the pmd you want. However, pgd_offset() takes the mm_struct,
not the (unattached) pgd we just allocated. So, instead use:
pgd = pgd_alloc(&init_mm);
pmd = pmd_offset(pgd + pgd_index(PHYS_OFFSET), PHYS_OFFSET);
Reported-by: Antti P Miettinen <ananaza@iki.fi>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2008-08-07 22:36:59 +01:00
* pmd = __pmd ( 0 ) ;
2009-02-11 13:14:57 +01:00
clean_pmd_entry ( pmd ) ;
2008-02-04 22:29:14 -08:00
pgd_free ( & init_mm , pgd ) ;
2005-06-18 09:33:31 +01:00
2005-04-16 15:20:36 -07:00
if ( ret ) {
2005-06-19 19:48:16 +01:00
printk ( KERN_CRIT " CPU%u: processor failed to boot \n " , cpu ) ;
2005-04-16 15:20:36 -07:00
/*
* FIXME : We need to clean up the new idle thread . - - rmk
*/
}
return ret ;
}
2005-11-02 22:24:33 +00:00
# ifdef CONFIG_HOTPLUG_CPU
/*
* __cpu_disable runs on the processor to be shutdown .
*/
2009-09-27 21:04:48 +01:00
int __cpu_disable ( void )
2005-11-02 22:24:33 +00:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct task_struct * p ;
int ret ;
ret = mach_cpu_disable ( cpu ) ;
if ( ret )
return ret ;
/*
* Take this CPU offline . Once we clear this , we can ' t return ,
* and we must not schedule until we ' re ready to give up the cpu .
*/
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , false ) ;
2005-11-02 22:24:33 +00:00
/*
* OK - migrate IRQs away from this CPU
*/
migrate_irqs ( ) ;
2005-11-08 19:08:05 +00:00
/*
* Stop the local timer for this CPU .
*/
2008-12-01 14:54:57 +00:00
local_timer_stop ( ) ;
2005-11-08 19:08:05 +00:00
2005-11-02 22:24:33 +00:00
/*
* Flush user cache and TLB mappings , and then remove this CPU
* from the vm mask set of all processes .
*/
flush_cache_all ( ) ;
local_flush_tlb_all ( ) ;
read_lock ( & tasklist_lock ) ;
for_each_process ( p ) {
if ( p - > mm )
2009-09-24 09:34:49 -06:00
cpumask_clear_cpu ( cpu , mm_cpumask ( p - > mm ) ) ;
2005-11-02 22:24:33 +00:00
}
read_unlock ( & tasklist_lock ) ;
return 0 ;
}
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed , or it is timed out .
*/
2009-09-27 21:04:48 +01:00
void __cpu_die ( unsigned int cpu )
2005-11-02 22:24:33 +00:00
{
if ( ! platform_cpu_kill ( cpu ) )
printk ( " CPU%u: unable to kill \n " , cpu ) ;
}
/*
* Called from the idle thread for the CPU which has been shutdown .
*
* Note that we disable IRQs here , but do not re - enable them
* before returning to the caller . This is also the behaviour
* of the other hotplug - cpu capable cores , so presumably coming
* out of idle fixes this .
*/
2009-09-27 21:04:48 +01:00
void __ref cpu_die ( void )
2005-11-02 22:24:33 +00:00
{
unsigned int cpu = smp_processor_id ( ) ;
local_irq_disable ( ) ;
idle_task_exit ( ) ;
/*
* actual CPU shutdown procedure is at least platform ( if not
* CPU ) specific
*/
platform_cpu_die ( cpu ) ;
/*
* Do not return to the idle loop - jump back to the secondary
* cpu initialisation . There ' s some initialisation which needs
* to be repeated to undo the effects of taking the CPU offline .
*/
__asm__ ( " mov sp, %0 \n "
" b secondary_start_kernel "
:
2006-01-12 01:05:58 -08:00
: " r " ( task_stack_page ( current ) + THREAD_SIZE - 8 ) ) ;
2005-11-02 22:24:33 +00:00
}
# endif /* CONFIG_HOTPLUG_CPU */
2005-06-18 09:33:31 +01:00
/*
* This is the secondary CPU boot entry . We ' re using this CPUs
* idle thread stack , but a set of temporary page tables .
*/
2005-07-17 21:35:41 +01:00
asmlinkage void __cpuinit secondary_start_kernel ( void )
2005-06-18 09:33:31 +01:00
{
struct mm_struct * mm = & init_mm ;
2005-11-12 17:21:47 +00:00
unsigned int cpu = smp_processor_id ( ) ;
2005-06-18 09:33:31 +01:00
printk ( " CPU%u: Booted secondary processor \n " , cpu ) ;
/*
* All kernel threads share the same mm context ; grab a
* reference and switch to it .
*/
atomic_inc ( & mm - > mm_users ) ;
atomic_inc ( & mm - > mm_count ) ;
current - > active_mm = mm ;
2009-09-24 09:34:49 -06:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2005-06-18 09:33:31 +01:00
cpu_switch_mm ( mm - > pgd , mm ) ;
enter_lazy_tlb ( mm , current ) ;
2005-07-28 20:32:47 +01:00
local_flush_tlb_all ( ) ;
2005-06-18 09:33:31 +01:00
cpu_init ( ) ;
2005-11-08 21:39:01 -08:00
preempt_disable ( ) ;
2005-06-18 09:33:31 +01:00
/*
* Give the platform a chance to do its own initialisation .
*/
platform_secondary_init ( cpu ) ;
/*
* Enable local interrupts .
*/
2008-09-07 16:57:22 +02:00
notify_cpu_starting ( cpu ) ;
2005-06-18 09:33:31 +01:00
local_irq_enable ( ) ;
local_fiq_enable ( ) ;
2008-02-04 17:30:57 +01:00
/*
2009-05-17 18:58:34 +01:00
* Setup the percpu timer for this CPU .
2008-02-04 17:30:57 +01:00
*/
2009-05-17 18:58:34 +01:00
percpu_timer_setup ( ) ;
2008-02-04 17:30:57 +01:00
2005-06-18 09:33:31 +01:00
calibrate_delay ( ) ;
smp_store_cpu_info ( cpu ) ;
/*
* OK , now it ' s safe to let the boot CPU continue
*/
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , true ) ;
2005-06-18 09:33:31 +01:00
/*
* OK , it ' s off to the idle thread for us
*/
cpu_idle ( ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Called by both boot and secondaries to move global data into
* per - processor storage .
*/
2005-07-17 21:35:41 +01:00
void __cpuinit smp_store_cpu_info ( unsigned int cpuid )
2005-04-16 15:20:36 -07:00
{
struct cpuinfo_arm * cpu_info = & per_cpu ( cpu_data , cpuid ) ;
cpu_info - > loops_per_jiffy = loops_per_jiffy ;
}
void __init smp_cpus_done ( unsigned int max_cpus )
{
int cpu ;
unsigned long bogosum = 0 ;
for_each_online_cpu ( cpu )
bogosum + = per_cpu ( cpu_data , cpu ) . loops_per_jiffy ;
printk ( KERN_INFO " SMP: Total of %d processors activated "
" (%lu.%02lu BogoMIPS). \n " ,
num_online_cpus ( ) ,
bogosum / ( 500000 / HZ ) ,
( bogosum / ( 5000 / HZ ) ) % 100 ) ;
}
void __init smp_prepare_boot_cpu ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
2005-11-02 21:51:40 +00:00
per_cpu ( cpu_data , cpu ) . idle = current ;
2005-04-16 15:20:36 -07:00
}
2009-05-17 16:20:18 +01:00
static void send_ipi_message ( const struct cpumask * mask , enum ipi_msg_type msg )
2005-04-16 15:20:36 -07:00
{
unsigned long flags ;
unsigned int cpu ;
local_irq_save ( flags ) ;
2009-05-17 16:20:18 +01:00
for_each_cpu ( cpu , mask ) {
2005-04-16 15:20:36 -07:00
struct ipi_data * ipi = & per_cpu ( ipi_data , cpu ) ;
spin_lock ( & ipi - > lock ) ;
ipi - > bits | = 1 < < msg ;
spin_unlock ( & ipi - > lock ) ;
}
/*
* Call the platform specific cross - CPU call function .
*/
2009-05-17 16:20:18 +01:00
smp_cross_call ( mask ) ;
2005-04-16 15:20:36 -07:00
local_irq_restore ( flags ) ;
}
2009-05-17 16:20:18 +01:00
void arch_send_call_function_ipi_mask ( const struct cpumask * mask )
2005-04-16 15:20:36 -07:00
{
2008-06-10 20:48:30 +02:00
send_ipi_message ( mask , IPI_CALL_FUNC ) ;
2005-04-16 15:20:36 -07:00
}
2008-06-10 20:48:30 +02:00
void arch_send_call_function_single_ipi ( int cpu )
2008-02-04 17:28:56 +01:00
{
2009-05-17 16:20:18 +01:00
send_ipi_message ( cpumask_of ( cpu ) , IPI_CALL_FUNC_SINGLE ) ;
2008-02-04 17:28:56 +01:00
}
2005-04-16 15:20:36 -07:00
void show_ipi_list ( struct seq_file * p )
{
unsigned int cpu ;
seq_puts ( p , " IPI: " ) ;
2005-07-11 19:26:31 +01:00
for_each_present_cpu ( cpu )
2005-04-16 15:20:36 -07:00
seq_printf ( p , " %10lu " , per_cpu ( ipi_data , cpu ) . ipi_count ) ;
seq_putc ( p , ' \n ' ) ;
}
2005-11-08 19:08:05 +00:00
void show_local_irqs ( struct seq_file * p )
{
unsigned int cpu ;
seq_printf ( p , " LOC: " ) ;
for_each_present_cpu ( cpu )
seq_printf ( p , " %10u " , irq_stat [ cpu ] . local_timer_irqs ) ;
seq_putc ( p , ' \n ' ) ;
}
2009-05-17 18:58:34 +01:00
/*
* Timer ( local or broadcast ) support
*/
static DEFINE_PER_CPU ( struct clock_event_device , percpu_clockevent ) ;
2006-10-25 13:59:16 +01:00
static void ipi_timer ( void )
2005-04-16 15:20:36 -07:00
{
2009-05-17 18:58:34 +01:00
struct clock_event_device * evt = & __get_cpu_var ( percpu_clockevent ) ;
2005-04-16 15:20:36 -07:00
irq_enter ( ) ;
2009-05-17 18:58:34 +01:00
evt - > event_handler ( evt ) ;
2005-04-16 15:20:36 -07:00
irq_exit ( ) ;
}
2005-11-08 19:08:05 +00:00
# ifdef CONFIG_LOCAL_TIMERS
2007-05-08 11:31:07 +01:00
asmlinkage void __exception do_local_timer ( struct pt_regs * regs )
2005-11-08 19:08:05 +00:00
{
2006-10-25 13:59:16 +01:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2005-11-08 19:08:05 +00:00
int cpu = smp_processor_id ( ) ;
if ( local_timer_ack ( ) ) {
irq_stat [ cpu ] . local_timer_irqs + + ;
2006-10-25 13:59:16 +01:00
ipi_timer ( ) ;
2005-11-08 19:08:05 +00:00
}
2006-10-25 13:59:16 +01:00
set_irq_regs ( old_regs ) ;
2005-11-08 19:08:05 +00:00
}
# endif
2009-05-17 18:58:34 +01:00
# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
static void smp_timer_broadcast ( const struct cpumask * mask )
{
send_ipi_message ( mask , IPI_TIMER ) ;
}
static void broadcast_timer_set_mode ( enum clock_event_mode mode ,
struct clock_event_device * evt )
{
}
static void local_timer_setup ( struct clock_event_device * evt )
{
evt - > name = " dummy_timer " ;
evt - > features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_DUMMY ;
evt - > rating = 400 ;
evt - > mult = 1 ;
evt - > set_mode = broadcast_timer_set_mode ;
evt - > broadcast = smp_timer_broadcast ;
clockevents_register_device ( evt ) ;
}
# endif
void __cpuinit percpu_timer_setup ( void )
{
unsigned int cpu = smp_processor_id ( ) ;
struct clock_event_device * evt = & per_cpu ( percpu_clockevent , cpu ) ;
evt - > cpumask = cpumask_of ( cpu ) ;
local_timer_setup ( evt ) ;
}
2005-04-16 15:20:36 -07:00
static DEFINE_SPINLOCK ( stop_lock ) ;
/*
* ipi_cpu_stop - handle IPI from smp_send_stop ( )
*/
static void ipi_cpu_stop ( unsigned int cpu )
{
spin_lock ( & stop_lock ) ;
printk ( KERN_CRIT " CPU%u: stopping \n " , cpu ) ;
dump_stack ( ) ;
spin_unlock ( & stop_lock ) ;
2009-05-28 14:16:52 +01:00
set_cpu_online ( cpu , false ) ;
2005-04-16 15:20:36 -07:00
local_fiq_disable ( ) ;
local_irq_disable ( ) ;
while ( 1 )
cpu_relax ( ) ;
}
/*
* Main handler for inter - processor interrupts
*
* For ARM , the ipimask now only identifies a single
* category of IPI ( Bit 1 IPIs have been replaced by a
* different mechanism ) :
*
* Bit 0 - Inter - processor function call
*/
2007-05-08 11:31:07 +01:00
asmlinkage void __exception do_IPI ( struct pt_regs * regs )
2005-04-16 15:20:36 -07:00
{
unsigned int cpu = smp_processor_id ( ) ;
struct ipi_data * ipi = & per_cpu ( ipi_data , cpu ) ;
2006-10-25 13:59:16 +01:00
struct pt_regs * old_regs = set_irq_regs ( regs ) ;
2005-04-16 15:20:36 -07:00
ipi - > ipi_count + + ;
for ( ; ; ) {
unsigned long msgs ;
spin_lock ( & ipi - > lock ) ;
msgs = ipi - > bits ;
ipi - > bits = 0 ;
spin_unlock ( & ipi - > lock ) ;
if ( ! msgs )
break ;
do {
unsigned nextmsg ;
nextmsg = msgs & - msgs ;
msgs & = ~ nextmsg ;
nextmsg = ffz ( ~ nextmsg ) ;
switch ( nextmsg ) {
case IPI_TIMER :
2006-10-25 13:59:16 +01:00
ipi_timer ( ) ;
2005-04-16 15:20:36 -07:00
break ;
case IPI_RESCHEDULE :
/*
* nothing more to do - eveything is
* done on the interrupt return path
*/
break ;
case IPI_CALL_FUNC :
2008-06-10 20:48:30 +02:00
generic_smp_call_function_interrupt ( ) ;
break ;
case IPI_CALL_FUNC_SINGLE :
generic_smp_call_function_single_interrupt ( ) ;
2005-04-16 15:20:36 -07:00
break ;
case IPI_CPU_STOP :
ipi_cpu_stop ( cpu ) ;
break ;
default :
printk ( KERN_CRIT " CPU%u: Unknown IPI message 0x%x \n " ,
cpu , nextmsg ) ;
break ;
}
} while ( msgs ) ;
}
2006-10-25 13:59:16 +01:00
set_irq_regs ( old_regs ) ;
2005-04-16 15:20:36 -07:00
}
void smp_send_reschedule ( int cpu )
{
2009-05-17 16:20:18 +01:00
send_ipi_message ( cpumask_of ( cpu ) , IPI_RESCHEDULE ) ;
2005-04-16 15:20:36 -07:00
}
void smp_send_stop ( void )
{
cpumask_t mask = cpu_online_map ;
cpu_clear ( smp_processor_id ( ) , mask ) ;
2009-05-17 16:20:18 +01:00
send_ipi_message ( & mask , IPI_CPU_STOP ) ;
2005-04-16 15:20:36 -07:00
}
/*
* not supported here
*/
2007-07-23 12:59:46 +01:00
int setup_profiling_timer ( unsigned int multiplier )
2005-04-16 15:20:36 -07:00
{
return - EINVAL ;
}
2005-06-28 13:49:16 +01:00
2009-05-17 16:20:18 +01:00
static void
on_each_cpu_mask ( void ( * func ) ( void * ) , void * info , int wait ,
const struct cpumask * mask )
2005-06-28 13:49:16 +01:00
{
preempt_disable ( ) ;
2009-05-17 16:20:18 +01:00
smp_call_function_many ( mask , func , info , wait ) ;
if ( cpumask_test_cpu ( smp_processor_id ( ) , mask ) )
2005-06-28 13:49:16 +01:00
func ( info ) ;
preempt_enable ( ) ;
}
/**********************************************************************/
/*
* TLB operations
*/
struct tlb_args {
struct vm_area_struct * ta_vma ;
unsigned long ta_start ;
unsigned long ta_end ;
} ;
static inline void ipi_flush_tlb_all ( void * ignored )
{
local_flush_tlb_all ( ) ;
}
static inline void ipi_flush_tlb_mm ( void * arg )
{
struct mm_struct * mm = ( struct mm_struct * ) arg ;
local_flush_tlb_mm ( mm ) ;
}
static inline void ipi_flush_tlb_page ( void * arg )
{
struct tlb_args * ta = ( struct tlb_args * ) arg ;
local_flush_tlb_page ( ta - > ta_vma , ta - > ta_start ) ;
}
static inline void ipi_flush_tlb_kernel_page ( void * arg )
{
struct tlb_args * ta = ( struct tlb_args * ) arg ;
local_flush_tlb_kernel_page ( ta - > ta_start ) ;
}
static inline void ipi_flush_tlb_range ( void * arg )
{
struct tlb_args * ta = ( struct tlb_args * ) arg ;
local_flush_tlb_range ( ta - > ta_vma , ta - > ta_start , ta - > ta_end ) ;
}
static inline void ipi_flush_tlb_kernel_range ( void * arg )
{
struct tlb_args * ta = ( struct tlb_args * ) arg ;
local_flush_tlb_kernel_range ( ta - > ta_start , ta - > ta_end ) ;
}
void flush_tlb_all ( void )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) )
on_each_cpu ( ipi_flush_tlb_all , NULL , 1 ) ;
else
local_flush_tlb_all ( ) ;
2005-06-28 13:49:16 +01:00
}
void flush_tlb_mm ( struct mm_struct * mm )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) )
2009-09-24 09:34:49 -06:00
on_each_cpu_mask ( ipi_flush_tlb_mm , mm , 1 , mm_cpumask ( mm ) ) ;
2009-05-30 14:00:14 +01:00
else
local_flush_tlb_mm ( mm ) ;
2005-06-28 13:49:16 +01:00
}
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long uaddr )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) ) {
struct tlb_args ta ;
ta . ta_vma = vma ;
ta . ta_start = uaddr ;
2009-09-24 09:34:49 -06:00
on_each_cpu_mask ( ipi_flush_tlb_page , & ta , 1 , mm_cpumask ( vma - > vm_mm ) ) ;
2009-05-30 14:00:14 +01:00
} else
local_flush_tlb_page ( vma , uaddr ) ;
2005-06-28 13:49:16 +01:00
}
void flush_tlb_kernel_page ( unsigned long kaddr )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) ) {
struct tlb_args ta ;
ta . ta_start = kaddr ;
on_each_cpu ( ipi_flush_tlb_kernel_page , & ta , 1 ) ;
} else
local_flush_tlb_kernel_page ( kaddr ) ;
2005-06-28 13:49:16 +01:00
}
void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) ) {
struct tlb_args ta ;
ta . ta_vma = vma ;
ta . ta_start = start ;
ta . ta_end = end ;
2009-09-24 09:34:49 -06:00
on_each_cpu_mask ( ipi_flush_tlb_range , & ta , 1 , mm_cpumask ( vma - > vm_mm ) ) ;
2009-05-30 14:00:14 +01:00
} else
local_flush_tlb_range ( vma , start , end ) ;
2005-06-28 13:49:16 +01:00
}
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
2009-05-30 14:00:14 +01:00
if ( tlb_ops_need_broadcast ( ) ) {
struct tlb_args ta ;
ta . ta_start = start ;
ta . ta_end = end ;
on_each_cpu ( ipi_flush_tlb_kernel_range , & ta , 1 ) ;
} else
local_flush_tlb_kernel_range ( start , end ) ;
2005-06-28 13:49:16 +01:00
}