2005-04-17 02:20:36 +04:00
/*
2006-08-21 22:23:38 +04:00
* linux / arch / arm / mm / context . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2002 - 2003 Deep Blue Solutions Ltd , all rights reserved .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
2010-01-26 21:09:42 +03:00
# include <linux/smp.h>
# include <linux/percpu.h>
2005-04-17 02:20:36 +04:00
# include <asm/mmu_context.h>
# include <asm/tlbflush.h>
2007-05-08 23:03:09 +04:00
static DEFINE_SPINLOCK ( cpu_asid_lock ) ;
unsigned int cpu_last_asid = ASID_FIRST_VERSION ;
2010-01-26 21:09:42 +03:00
# ifdef CONFIG_SMP
DEFINE_PER_CPU ( struct mm_struct * , current_mm ) ;
# endif
2005-04-17 02:20:36 +04:00
/*
* We fork ( ) ed a process , and we need a new context for the child
2011-06-09 13:10:27 +04:00
* to run in . We reserve version 0 for initial tasks so we will
* always allocate an ASID . The ASID 0 is reserved for the TTBR
* register changing sequence .
2005-04-17 02:20:36 +04:00
*/
void __init_new_context ( struct task_struct * tsk , struct mm_struct * mm )
{
mm - > context . id = 0 ;
2010-01-26 21:09:42 +03:00
spin_lock_init ( & mm - > context . id_lock ) ;
2005-04-17 02:20:36 +04:00
}
2010-01-26 21:09:42 +03:00
static void flush_context ( void )
{
2011-06-09 13:10:27 +04:00
/* set the reserved ASID before flushing the TLB */
asm ( " mcr p15, 0, %0, c13, c0, 1 \n " : : " r " ( 0 ) ) ;
2010-01-26 21:09:42 +03:00
isb ( ) ;
local_flush_tlb_all ( ) ;
if ( icache_is_vivt_asid_tagged ( ) ) {
__flush_icache_all ( ) ;
dsb ( ) ;
}
}
# ifdef CONFIG_SMP
static void set_mm_context ( struct mm_struct * mm , unsigned int asid )
{
unsigned long flags ;
/*
* Locking needed for multi - threaded applications where the
* same mm - > context . id could be set from different CPUs during
* the broadcast . This function is also called via IPI so the
* mm - > context . id_lock has to be IRQ - safe .
*/
spin_lock_irqsave ( & mm - > context . id_lock , flags ) ;
if ( likely ( ( mm - > context . id ^ cpu_last_asid ) > > ASID_BITS ) ) {
/*
* Old version of ASID found . Set the new one and
* reset mm_cpumask ( mm ) .
*/
mm - > context . id = asid ;
cpumask_clear ( mm_cpumask ( mm ) ) ;
}
spin_unlock_irqrestore ( & mm - > context . id_lock , flags ) ;
/*
* Set the mm_cpumask ( mm ) bit for the current CPU .
*/
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) ;
}
/*
* Reset the ASID on the current CPU . This function call is broadcast
* from the CPU handling the ASID rollover and holding cpu_asid_lock .
*/
static void reset_context ( void * info )
{
unsigned int asid ;
unsigned int cpu = smp_processor_id ( ) ;
struct mm_struct * mm = per_cpu ( current_mm , cpu ) ;
/*
* Check if a current_mm was set on this CPU as it might still
* be in the early booting stages and using the reserved ASID .
*/
if ( ! mm )
return ;
smp_rmb ( ) ;
2011-06-09 13:12:41 +04:00
asid = cpu_last_asid + cpu + 1 ;
2010-01-26 21:09:42 +03:00
flush_context ( ) ;
set_mm_context ( mm , asid ) ;
/* set the new ASID */
asm ( " mcr p15, 0, %0, c13, c0, 1 \n " : : " r " ( mm - > context . id ) ) ;
isb ( ) ;
}
# else
static inline void set_mm_context ( struct mm_struct * mm , unsigned int asid )
{
mm - > context . id = asid ;
cpumask_copy ( mm_cpumask ( mm ) , cpumask_of ( smp_processor_id ( ) ) ) ;
}
# endif
2005-04-17 02:20:36 +04:00
void __new_context ( struct mm_struct * mm )
{
unsigned int asid ;
2007-05-08 23:03:09 +04:00
spin_lock ( & cpu_asid_lock ) ;
2010-01-26 21:09:42 +03:00
# ifdef CONFIG_SMP
/*
* Check the ASID again , in case the change was broadcast from
* another CPU before we acquired the lock .
*/
if ( unlikely ( ( ( mm - > context . id ^ cpu_last_asid ) > > ASID_BITS ) = = 0 ) ) {
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( mm ) ) ;
spin_unlock ( & cpu_asid_lock ) ;
return ;
}
# endif
/*
* At this point , it is guaranteed that the current mm ( with
* an old ASID ) isn ' t active on any other CPU since the ASIDs
* are changed simultaneously via IPI .
*/
2005-04-17 02:20:36 +04:00
asid = + + cpu_last_asid ;
if ( asid = = 0 )
2007-05-08 23:03:09 +04:00
asid = cpu_last_asid = ASID_FIRST_VERSION ;
2005-04-17 02:20:36 +04:00
/*
* If we ' ve used up all our ASIDs , we need
* to start a new version and flush the TLB .
*/
2007-05-08 23:03:09 +04:00
if ( unlikely ( ( asid & ~ ASID_MASK ) = = 0 ) ) {
2011-06-09 13:12:41 +04:00
asid = cpu_last_asid + smp_processor_id ( ) + 1 ;
2010-01-26 21:09:42 +03:00
flush_context ( ) ;
# ifdef CONFIG_SMP
smp_wmb ( ) ;
smp_call_function ( reset_context , NULL , 1 ) ;
# endif
2011-06-09 13:12:41 +04:00
cpu_last_asid + = NR_CPUS ;
2007-02-05 16:47:40 +03:00
}
2005-04-17 02:20:36 +04:00
2010-01-26 21:09:42 +03:00
set_mm_context ( mm , asid ) ;
spin_unlock ( & cpu_asid_lock ) ;
2005-04-17 02:20:36 +04:00
}