2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2017-07-24 14:28:03 +10:00
/*
* Common implementation of switch_mm_irqs_off
*
* Copyright IBM Corp . 2017
*/
# include <linux/mm.h>
# include <linux/cpu.h>
2018-01-29 15:20:11 -05:00
# include <linux/sched/mm.h>
2017-07-24 14:28:03 +10:00
# include <asm/mmu_context.h>
2018-11-29 14:07:01 +00:00
# include <asm/pgalloc.h>
2017-07-24 14:28:03 +10:00
# if defined(CONFIG_PPC32)
static inline void switch_mm_pgdir ( struct task_struct * tsk ,
struct mm_struct * mm )
{
/* 32-bit keeps track of the current PGDIR in the thread struct */
tsk - > thread . pgdir = mm - > pgd ;
2021-10-19 09:29:18 +02:00
# ifdef CONFIG_PPC_BOOK3S_32
tsk - > thread . sr0 = mm - > context . sr0 ;
# endif
2021-10-19 09:29:28 +02:00
# if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
tsk - > thread . pid = mm - > context . id ;
# endif
2017-07-24 14:28:03 +10:00
}
# elif defined(CONFIG_PPC_BOOK3E_64)
static inline void switch_mm_pgdir ( struct task_struct * tsk ,
struct mm_struct * mm )
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
get_paca ( ) - > pgd = mm - > pgd ;
2021-10-19 09:29:28 +02:00
# ifdef CONFIG_PPC_KUAP
tsk - > thread . pid = mm - > context . id ;
# endif
2017-07-24 14:28:03 +10:00
}
# else
static inline void switch_mm_pgdir ( struct task_struct * tsk ,
struct mm_struct * mm ) { }
# endif
void switch_mm_irqs_off ( struct mm_struct * prev , struct mm_struct * next ,
struct task_struct * tsk )
{
bool new_on_cpu = false ;
/* Mark this context has been used on the new CPU */
if ( ! cpumask_test_cpu ( smp_processor_id ( ) , mm_cpumask ( next ) ) ) {
cpumask_set_cpu ( smp_processor_id ( ) , mm_cpumask ( next ) ) ;
inc_mm_active_cpus ( next ) ;
/*
* This full barrier orders the store to the cpumask above vs
2021-04-22 01:17:32 +10:00
* a subsequent load which allows this CPU / MMU to begin loading
* translations for ' next ' from page table PTEs into the TLB .
2017-07-24 14:28:03 +10:00
*
2021-04-22 01:17:32 +10:00
* When using the radix MMU , that operation is the load of the
2017-07-24 14:28:03 +10:00
* MMU context id , which is then moved to SPRN_PID .
*
* For the hash MMU it is either the first load from slb_cache
2021-04-22 01:17:32 +10:00
* in switch_slb ( ) to preload the SLBs , or the load of
* get_user_context which loads the context for the VSID hash
* to insert a new SLB , in the SLB fault handler .
2017-07-24 14:28:03 +10:00
*
2018-06-01 20:01:20 +10:00
* On the other side , the barrier is in mm / tlb - radix . c for
2021-04-22 01:17:32 +10:00
* radix which orders earlier stores to clear the PTEs before
* the load of mm_cpumask to check which CPU TLBs should be
* flushed . For hash , pte_xchg to clear the PTE includes the
* barrier .
2018-01-29 15:20:11 -05:00
*
2021-04-22 01:17:32 +10:00
* This full barrier is also needed by membarrier when
* switching between processes after store to rq - > curr , before
* user - space memory accesses .
2017-07-24 14:28:03 +10:00
*/
smp_mb ( ) ;
new_on_cpu = true ;
}
/* Some subarchs need to track the PGD elsewhere */
switch_mm_pgdir ( tsk , next ) ;
/* Nothing else to do if we aren't actually switching */
if ( prev = = next )
return ;
/*
* We must stop all altivec streams before changing the HW
* context
*/
if ( cpu_has_feature ( CPU_FTR_ALTIVEC ) )
2021-12-21 16:59:03 +11:00
asm volatile ( PPC_DSSALL ) ;
2017-07-24 14:28:03 +10:00
2021-05-28 19:07:41 +10:00
if ( ! new_on_cpu )
2018-01-29 15:20:11 -05:00
membarrier_arch_switch_mm ( prev , next , tsk ) ;
2017-07-24 14:28:03 +10:00
/*
* The actual HW switching method differs between the various
* sub architectures . Out of line for now
*/
switch_mmu_context ( prev , next , tsk ) ;
}
2019-04-26 15:58:01 +00:00
# ifndef CONFIG_PPC_BOOK3S_64
2018-11-29 14:07:01 +00:00
void arch_exit_mmap ( struct mm_struct * mm )
{
void * frag = pte_frag_get ( & mm - > context ) ;
if ( frag )
pte_frag_destroy ( frag ) ;
}
# endif