2005-04-17 02:20:36 +04:00
/*
2006-08-21 22:23:38 +04:00
* linux / arch / arm / mm / context . c
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 2002 - 2003 Deep Blue Solutions Ltd , all rights reserved .
2012-06-15 17:47:31 +04:00
* Copyright ( C ) 2012 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
2005-04-17 02:20:36 +04:00
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/mm.h>
2010-01-26 21:09:42 +03:00
# include <linux/smp.h>
# include <linux/percpu.h>
2005-04-17 02:20:36 +04:00
# include <asm/mmu_context.h>
2012-06-15 17:47:31 +04:00
# include <asm/smp_plat.h>
2012-07-06 18:43:03 +04:00
# include <asm/thread_notify.h>
2005-04-17 02:20:36 +04:00
# include <asm/tlbflush.h>
2012-07-17 01:20:17 +04:00
# include <asm/proc-fns.h>
2005-04-17 02:20:36 +04:00
2012-06-15 17:47:31 +04:00
/*
* On ARMv6 , we have the following structure in the Context ID :
*
* 31 7 0
* + - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - +
* | process ID | ASID |
* + - - - - - - - - - - - - - - - - - - - - - - - - - + - - - - - - - - - - - +
* | context ID |
* + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +
*
* The ASID is used to tag entries in the CPU caches and TLBs .
* The context ID is used by debuggers and trace logic , and
* should be unique within all running processes .
2013-02-11 15:25:06 +04:00
*
2013-12-17 22:17:54 +04:00
* In big endian operation , the two 32 bit words are swapped if accessed
* by non - 64 - bit operations .
2012-06-15 17:47:31 +04:00
*/
# define ASID_FIRST_VERSION (1ULL << ASID_BITS)
2013-06-21 15:06:55 +04:00
# define NUM_USER_ASIDS ASID_FIRST_VERSION
2012-06-15 17:47:31 +04:00
2009-07-03 17:44:46 +04:00
static DEFINE_RAW_SPINLOCK ( cpu_asid_lock ) ;
2012-08-01 17:57:49 +04:00
static atomic64_t asid_generation = ATOMIC64_INIT ( ASID_FIRST_VERSION ) ;
static DECLARE_BITMAP ( asid_map , NUM_USER_ASIDS ) ;
2012-06-15 17:47:31 +04:00
2013-06-21 15:07:27 +04:00
static DEFINE_PER_CPU ( atomic64_t , active_asids ) ;
2012-06-15 17:47:31 +04:00
static DEFINE_PER_CPU ( u64 , reserved_asids ) ;
static cpumask_t tlb_flush_pending ;
2005-04-17 02:20:36 +04:00
2013-06-21 15:07:27 +04:00
# ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask ( int this_cpu , struct mm_struct * mm ,
cpumask_t * mask )
{
int cpu ;
unsigned long flags ;
u64 context_id , asid ;
raw_spin_lock_irqsave ( & cpu_asid_lock , flags ) ;
context_id = mm - > context . id . counter ;
for_each_online_cpu ( cpu ) {
if ( cpu = = this_cpu )
continue ;
/*
* We only need to send an IPI if the other CPUs are
* running the same ASID as the one being invalidated .
*/
asid = per_cpu ( active_asids , cpu ) . counter ;
if ( asid = = 0 )
asid = per_cpu ( reserved_asids , cpu ) ;
if ( context_id = = asid )
cpumask_set_cpu ( cpu , mask ) ;
}
raw_spin_unlock_irqrestore ( & cpu_asid_lock , flags ) ;
}
# endif
2011-11-22 21:30:31 +04:00
# ifdef CONFIG_ARM_LPAE
2013-12-17 22:17:11 +04:00
/*
* With LPAE , the ASID and page tables are updated atomicly , so there is
* no need for a reserved set of tables ( the active ASID tracking prevents
* any issues across a rollover ) .
*/
# define cpu_set_reserved_ttbr0()
2011-11-22 21:30:31 +04:00
# else
2012-06-15 17:47:31 +04:00
static void cpu_set_reserved_ttbr0 ( void )
2011-05-31 18:38:43 +04:00
{
u32 ttb ;
2013-12-17 22:17:11 +04:00
/*
* Copy TTBR1 into TTBR0 .
* This points at swapper_pg_dir , which contains only global
* entries so any speculative walks are perfectly safe .
*/
2011-05-31 18:38:43 +04:00
asm volatile (
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1 \n "
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0 \n "
: " =r " ( ttb ) ) ;
isb ( ) ;
}
2011-11-22 21:30:31 +04:00
# endif
2012-07-06 18:43:03 +04:00
# ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier ( struct notifier_block * unused , unsigned long cmd ,
void * t )
{
u32 contextidr ;
pid_t pid ;
struct thread_info * thread = t ;
if ( cmd ! = THREAD_NOTIFY_SWITCH )
return NOTIFY_DONE ;
pid = task_pid_nr ( thread - > task ) < < ASID_BITS ;
asm volatile (
" mrc p15, 0, %0, c13, c0, 1 \n "
2012-08-24 18:21:52 +04:00
" and %0, %0, %2 \n "
" orr %0, %0, %1 \n "
" mcr p15, 0, %0, c13, c0, 1 \n "
2012-07-06 18:43:03 +04:00
: " =r " ( contextidr ) , " +r " ( pid )
2012-08-24 18:21:52 +04:00
: " I " ( ~ ASID_MASK ) ) ;
2012-07-06 18:43:03 +04:00
isb ( ) ;
return NOTIFY_OK ;
}
static struct notifier_block contextidr_notifier_block = {
. notifier_call = contextidr_notifier ,
} ;
static int __init contextidr_notifier_init ( void )
{
return thread_register_notifier ( & contextidr_notifier_block ) ;
}
arch_initcall ( contextidr_notifier_init ) ;
# endif
2012-06-15 17:47:31 +04:00
static void flush_context ( unsigned int cpu )
2005-04-17 02:20:36 +04:00
{
2012-06-15 17:47:31 +04:00
int i ;
2012-08-01 17:57:49 +04:00
u64 asid ;
/* Update the list of reserved ASIDs and the ASID bitmap. */
bitmap_clear ( asid_map , 0 , NUM_USER_ASIDS ) ;
for_each_possible_cpu ( i ) {
if ( i = = cpu ) {
asid = 0 ;
} else {
asid = atomic64_xchg ( & per_cpu ( active_asids , i ) , 0 ) ;
2013-06-21 15:06:19 +04:00
/*
* If this CPU has already been through a
* rollover , but hasn ' t run another task in
* the meantime , we must preserve its reserved
* ASID , as this is the only trace we have of
* the process it is still running .
*/
if ( asid = = 0 )
asid = per_cpu ( reserved_asids , i ) ;
2013-06-21 15:06:55 +04:00
__set_bit ( asid & ~ ASID_MASK , asid_map ) ;
2012-08-01 17:57:49 +04:00
}
per_cpu ( reserved_asids , i ) = asid ;
}
2012-06-15 17:47:31 +04:00
/* Queue a TLB invalidate and flush the I-cache if necessary. */
2013-02-11 17:47:48 +04:00
cpumask_setall ( & tlb_flush_pending ) ;
2012-06-15 17:47:31 +04:00
if ( icache_is_vivt_asid_tagged ( ) )
2010-01-26 21:09:42 +03:00
__flush_icache_all ( ) ;
}
2012-08-01 17:57:49 +04:00
static int is_reserved_asid ( u64 asid )
2012-06-15 17:47:31 +04:00
{
int cpu ;
for_each_possible_cpu ( cpu )
2012-08-01 17:57:49 +04:00
if ( per_cpu ( reserved_asids , cpu ) = = asid )
2012-06-15 17:47:31 +04:00
return 1 ;
return 0 ;
}
2010-01-26 21:09:42 +03:00
2013-02-28 20:47:36 +04:00
static u64 new_context ( struct mm_struct * mm , unsigned int cpu )
2010-01-26 21:09:42 +03:00
{
2013-12-17 22:17:31 +04:00
static u32 cur_idx = 1 ;
2013-02-28 20:47:36 +04:00
u64 asid = atomic64_read ( & mm - > context . id ) ;
2012-08-01 17:57:49 +04:00
u64 generation = atomic64_read ( & asid_generation ) ;
2010-01-26 21:09:42 +03:00
2014-11-14 13:37:34 +03:00
if ( asid ! = 0 ) {
2010-01-26 21:09:42 +03:00
/*
2014-11-14 13:37:34 +03:00
* If our current ASID was active during a rollover , we
* can continue to use it and this was just a false alarm .
2010-01-26 21:09:42 +03:00
*/
2014-11-14 13:37:34 +03:00
if ( is_reserved_asid ( asid ) )
return generation | ( asid & ~ ASID_MASK ) ;
2012-06-15 17:47:31 +04:00
/*
2014-11-14 13:37:34 +03:00
* We had a valid ASID in a previous life , so try to re - use
* it if possible . ,
2012-06-15 17:47:31 +04:00
*/
2014-11-14 13:37:34 +03:00
asid & = ~ ASID_MASK ;
if ( ! __test_and_set_bit ( asid , asid_map ) )
goto bump_gen ;
2010-01-26 21:09:42 +03:00
}
2014-11-14 13:37:34 +03:00
/*
* Allocate a free ASID . If we can ' t find one , take a note of the
* currently active ASIDs and mark the TLBs as requiring flushes .
* We always count from ASID # 1 , as we reserve ASID # 0 to switch
* via TTBR0 and to avoid speculative page table walks from hitting
* in any partial walk caches , which could be populated from
* overlapping level - 1 descriptors used to map both the module
* area and the userspace stack .
*/
asid = find_next_zero_bit ( asid_map , NUM_USER_ASIDS , cur_idx ) ;
if ( asid = = NUM_USER_ASIDS ) {
generation = atomic64_add_return ( ASID_FIRST_VERSION ,
& asid_generation ) ;
flush_context ( cpu ) ;
asid = find_next_zero_bit ( asid_map , NUM_USER_ASIDS , 1 ) ;
}
__set_bit ( asid , asid_map ) ;
cur_idx = asid ;
bump_gen :
asid | = generation ;
cpumask_clear ( mm_cpumask ( mm ) ) ;
2013-02-28 20:47:36 +04:00
return asid ;
2010-01-26 21:09:42 +03:00
}
2012-06-15 17:47:31 +04:00
void check_and_switch_context ( struct mm_struct * mm , struct task_struct * tsk )
2010-01-26 21:09:42 +03:00
{
2012-06-15 17:47:31 +04:00
unsigned long flags ;
2010-01-26 21:09:42 +03:00
unsigned int cpu = smp_processor_id ( ) ;
2013-02-28 20:47:36 +04:00
u64 asid ;
2010-01-26 21:09:42 +03:00
2012-11-25 06:24:32 +04:00
if ( unlikely ( mm - > context . vmalloc_seq ! = init_mm . context . vmalloc_seq ) )
__check_vmalloc_seq ( mm ) ;
2010-01-26 21:09:42 +03:00
/*
2013-12-17 22:17:54 +04:00
* We cannot update the pgd and the ASID atomicly with classic
* MMU , so switch exclusively to global mappings to avoid
* speculative page table walking with the wrong TTBR .
2010-01-26 21:09:42 +03:00
*/
2012-06-15 17:47:31 +04:00
cpu_set_reserved_ttbr0 ( ) ;
2005-04-17 02:20:36 +04:00
2013-02-28 20:47:36 +04:00
asid = atomic64_read ( & mm - > context . id ) ;
if ( ! ( ( asid ^ atomic64_read ( & asid_generation ) ) > > ASID_BITS )
& & atomic64_xchg ( & per_cpu ( active_asids , cpu ) , asid ) )
2012-07-27 15:31:35 +04:00
goto switch_mm_fastpath ;
2012-06-15 17:47:31 +04:00
raw_spin_lock_irqsave ( & cpu_asid_lock , flags ) ;
/* Check that our ASID belongs to the current generation. */
2013-02-28 20:47:36 +04:00
asid = atomic64_read ( & mm - > context . id ) ;
if ( ( asid ^ atomic64_read ( & asid_generation ) ) > > ASID_BITS ) {
asid = new_context ( mm , cpu ) ;
atomic64_set ( & mm - > context . id , asid ) ;
}
2005-04-17 02:20:36 +04:00
2013-02-28 20:48:40 +04:00
if ( cpumask_test_and_clear_cpu ( cpu , & tlb_flush_pending ) ) {
local_flush_bp_all ( ) ;
2012-06-15 17:47:31 +04:00
local_flush_tlb_all ( ) ;
2013-02-28 20:48:40 +04:00
}
2013-02-28 20:47:20 +04:00
2013-02-28 20:47:36 +04:00
atomic64_set ( & per_cpu ( active_asids , cpu ) , asid ) ;
2013-02-28 20:47:20 +04:00
cpumask_set_cpu ( cpu , mm_cpumask ( mm ) ) ;
2012-06-15 17:47:31 +04:00
raw_spin_unlock_irqrestore ( & cpu_asid_lock , flags ) ;
2012-07-27 15:31:35 +04:00
switch_mm_fastpath :
2012-06-15 17:47:31 +04:00
cpu_switch_mm ( mm - > pgd , mm ) ;
2005-04-17 02:20:36 +04:00
}