2005-04-17 02:20:36 +04:00
/*
2006-08-21 22:23:38 +04:00
* linux / arch / arm / mm / context . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2002 - 2003 Deep Blue Solutions Ltd , all rights reserved .
2012-06-15 17:47:31 +04:00
* Copyright ( C ) 2012 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
2010-01-26 21:09:42 +03:00
# include <linux/smp.h>
# include <linux/percpu.h>
2005-04-17 02:20:36 +04:00
# include <asm/mmu_context.h>
2012-06-15 17:47:31 +04:00
# include <asm/smp_plat.h>
2012-07-06 18:43:03 +04:00
# include <asm/thread_notify.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
2012-06-15 17:47:31 +04:00
/*
* On ARMv6 , we have the following structure in the Context ID :
*
* 31 7 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - +
* | process ID | ASID |
* + - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - +
* | context ID |
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
*
* The ASID is used to tag entries in the CPU caches and TLBs .
* The context ID is used by debuggers and trace logic , and
* should be unique within all running processes .
2013-02-11 15:25:06 +04:00
*
* In big endian operation , the two 32 bit words are swapped if accesed by
* non 64 - bit operations .
2012-06-15 17:47:31 +04:00
*/
# define ASID_FIRST_VERSION (1ULL << ASID_BITS)
2012-08-01 17:57:49 +04:00
# define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
# define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
# define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
2012-06-15 17:47:31 +04:00
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( cpu_asid_lock ) ;
2012-08-01 17:57:49 +04:00
static atomic64_t asid_generation = ATOMIC64_INIT ( ASID_FIRST_VERSION ) ;
static DECLARE_BITMAP ( asid_map , NUM_USER_ASIDS ) ;
2012-06-15 17:47:31 +04:00
2012-07-27 15:31:35 +04:00
static DEFINE_PER_CPU ( atomic64_t , active_asids ) ;
2012-06-15 17:47:31 +04:00
static DEFINE_PER_CPU ( u64 , reserved_asids ) ;
static cpumask_t tlb_flush_pending ;
2005-04-17 02:20:36 +04:00
2011-11-22 21:30:31 +04:00
# ifdef CONFIG_ARM_LPAE
2012-06-15 17:47:31 +04:00
static void cpu_set_reserved_ttbr0 ( void )
2011-05-31 18:38:43 +04:00
{
unsigned long ttbl = __pa ( swapper_pg_dir ) ;
unsigned long ttbh = 0 ;
/*
* Set TTBR0 to swapper_pg_dir which contains only global entries . The
* ASID is set to 0.
*/
asm volatile (
" mcrr p15, 0, %0, %1, c2 @ set TTBR0 \n "
:
: " r " ( ttbl ) , " r " ( ttbh ) ) ;
isb ( ) ;
2011-11-22 21:30:31 +04:00
}
# else
2012-06-15 17:47:31 +04:00
static void cpu_set_reserved_ttbr0 ( void )
2011-05-31 18:38:43 +04:00
{
u32 ttb ;
/* Copy TTBR1 into TTBR0 */
asm volatile (
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1 \n "
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0 \n "
: " =r " ( ttb ) ) ;
isb ( ) ;
}
2011-11-22 21:30:31 +04:00
# endif
2012-07-06 18:43:03 +04:00
# ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier ( struct notifier_block * unused , unsigned long cmd ,
void * t )
{
u32 contextidr ;
pid_t pid ;
struct thread_info * thread = t ;
if ( cmd ! = THREAD_NOTIFY_SWITCH )
return NOTIFY_DONE ;
pid = task_pid_nr ( thread - > task ) < < ASID_BITS ;
asm volatile (
" mrc p15, 0, %0, c13, c0, 1 \n "
2012-08-24 18:21:52 +04:00
" and %0, %0, %2 \n "
" orr %0, %0, %1 \n "
" mcr p15, 0, %0, c13, c0, 1 \n "
2012-07-06 18:43:03 +04:00
: " =r " ( contextidr ) , " +r " ( pid )
2012-08-24 18:21:52 +04:00
: " I " ( ~ ASID_MASK ) ) ;
2012-07-06 18:43:03 +04:00
isb ( ) ;
return NOTIFY_OK ;
}
static struct notifier_block contextidr_notifier_block = {
. notifier_call = contextidr_notifier ,
} ;
static int __init contextidr_notifier_init ( void )
{
return thread_register_notifier ( & contextidr_notifier_block ) ;
}
arch_initcall ( contextidr_notifier_init ) ;
# endif
2012-06-15 17:47:31 +04:00
static void flush_context ( unsigned int cpu )
2005-04-17 02:20:36 +04:00
{
2012-06-15 17:47:31 +04:00
int i ;
2012-08-01 17:57:49 +04:00
u64 asid ;
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear ( asid_map , 0 , NUM_USER_ASIDS ) ;
for_each_possible_cpu ( i ) {
if ( i = = cpu ) {
asid = 0 ;
} else {
asid = atomic64_xchg ( & per_cpu ( active_asids , i ) , 0 ) ;
__set_bit ( ASID_TO_IDX ( asid ) , asid_map ) ;
}
per_cpu ( reserved_asids , i ) = asid ;
}
2012-06-15 17:47:31 +04:00
/* Queue a TLB invalidate and flush the I-cache if necessary. */
if ( ! tlb_ops_need_broadcast ( ) )
cpumask_set_cpu ( cpu , & tlb_flush_pending ) ;
else
cpumask_setall ( & tlb_flush_pending ) ;
if ( icache_is_vivt_asid_tagged ( ) )
2010-01-26 21:09:42 +03:00
__flush_icache_all ( ) ;
}
2012-08-01 17:57:49 +04:00
static int is_reserved_asid ( u64 asid )
2012-06-15 17:47:31 +04:00
{
int cpu ;
for_each_possible_cpu ( cpu )
2012-08-01 17:57:49 +04:00
if ( per_cpu ( reserved_asids , cpu ) = = asid )
2012-06-15 17:47:31 +04:00
return 1 ;
return 0 ;
}
2010-01-26 21:09:42 +03:00
2013-02-28 20:47:36 +04:00
static u64 new_context ( struct mm_struct * mm , unsigned int cpu )
2010-01-26 21:09:42 +03:00
{
2013-02-28 20:47:36 +04:00
u64 asid = atomic64_read ( & mm - > context . id ) ;
2012-08-01 17:57:49 +04:00
u64 generation = atomic64_read ( & asid_generation ) ;
2010-01-26 21:09:42 +03:00
2012-08-01 17:57:49 +04:00
if ( asid ! = 0 & & is_reserved_asid ( asid ) ) {
2010-01-26 21:09:42 +03:00
/*
2012-06-15 17:47:31 +04:00
* Our current ASID was active during a rollover , we can
* continue to use it and this was just a false alarm .
2010-01-26 21:09:42 +03:00
*/
2012-08-01 17:57:49 +04:00
asid = generation | ( asid & ~ ASID_MASK ) ;
2012-06-15 17:47:31 +04:00
} else {
/*
* Allocate a free ASID . If we can ' t find one , take a
* note of the currently active ASIDs and mark the TLBs
* as requiring flushes .
*/
2012-08-01 17:57:49 +04:00
asid = find_first_zero_bit ( asid_map , NUM_USER_ASIDS ) ;
if ( asid = = NUM_USER_ASIDS ) {
generation = atomic64_add_return ( ASID_FIRST_VERSION ,
& asid_generation ) ;
flush_context ( cpu ) ;
asid = find_first_zero_bit ( asid_map , NUM_USER_ASIDS ) ;
}
__set_bit ( asid , asid_map ) ;
asid = generation | IDX_TO_ASID ( asid ) ;
2010-01-26 21:09:42 +03:00
cpumask_clear ( mm_cpumask ( mm ) ) ;
}
2013-02-28 20:47:36 +04:00
return asid ;
2010-01-26 21:09:42 +03:00
}
2012-06-15 17:47:31 +04:00
void check_and_switch_context ( struct mm_struct * mm , struct task_struct * tsk )
2010-01-26 21:09:42 +03:00
{
2012-06-15 17:47:31 +04:00
unsigned long flags ;
2010-01-26 21:09:42 +03:00
unsigned int cpu = smp_processor_id ( ) ;
2013-02-28 20:47:36 +04:00
u64 asid ;
2010-01-26 21:09:42 +03:00
2012-11-25 06:24:32 +04:00
if ( unlikely ( mm - > context . vmalloc_seq ! = init_mm . context . vmalloc_seq ) )
__check_vmalloc_seq ( mm ) ;
2010-01-26 21:09:42 +03:00
/*
2012-06-15 17:47:31 +04:00
* Required during context switch to avoid speculative page table
* walking with the wrong TTBR .
2010-01-26 21:09:42 +03:00
*/
2012-06-15 17:47:31 +04:00
cpu_set_reserved_ttbr0 ( ) ;
2005-04-17 02:20:36 +04:00
2013-02-28 20:47:36 +04:00
asid = atomic64_read ( & mm - > context . id ) ;
if ( ! ( ( asid ^ atomic64_read ( & asid_generation ) ) > > ASID_BITS )
& & atomic64_xchg ( & per_cpu ( active_asids , cpu ) , asid ) )
2012-07-27 15:31:35 +04:00
goto switch_mm_fastpath ;
2012-06-15 17:47:31 +04:00
raw_spin_lock_irqsave ( & cpu_asid_lock , flags ) ;
/* Check that our ASID belongs to the current generation. */
2013-02-28 20:47:36 +04:00
asid = atomic64_read ( & mm - > context . id ) ;
if ( ( asid ^ atomic64_read ( & asid_generation ) ) > > ASID_BITS ) {
asid = new_context ( mm , cpu ) ;
atomic64_set ( & mm - > context . id , asid ) ;
}
2005-04-17 02:20:36 +04:00
2013-02-28 20:48:40 +04:00
if ( cpumask_test_and_clear_cpu ( cpu , & tlb_flush_pending ) ) {
local_flush_bp_all ( ) ;
2012-06-15 17:47:31 +04:00
local_flush_tlb_all ( ) ;
2013-02-28 20:48:40 +04:00
}
2013-02-28 20:47:20 +04:00
2013-02-28 20:47:36 +04:00
atomic64_set ( & per_cpu ( active_asids , cpu ) , asid ) ;
2013-02-28 20:47:20 +04:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2012-06-15 17:47:31 +04:00
raw_spin_unlock_irqrestore ( & cpu_asid_lock , flags ) ;
2012-07-27 15:31:35 +04:00
switch_mm_fastpath :
2012-06-15 17:47:31 +04:00
cpu_switch_mm ( mm - > pgd , mm ) ;
2005-04-17 02:20:36 +04:00
}