2019-05-27 08:55:01 +02:00
// SPDX-License-Identifier: GPL-2.0-or-later
2008-12-18 19:13:38 +00:00
/*
* This file contains the routines for TLB flushing .
* On machines where the MMU does not use a hash table to store virtual to
* physical translations ( ie , SW loaded TLBs or Book3E compilant processors ,
* this does - not - include 603 however which shares the implementation with
* hash based processors )
*
* - - BenH
*
2009-07-23 23:15:47 +00:00
* Copyright 2008 , 2009 Ben Herrenschmidt < benh @ kernel . crashing . org >
* IBM Corp .
2008-12-18 19:13:38 +00:00
*
* Derived from arch / ppc / mm / init . c :
* Copyright ( C ) 1995 - 1996 Gary Thomas ( gdt @ linuxppc . org )
*
* Modifications by Paul Mackerras ( PowerMac ) ( paulus @ cs . anu . edu . au )
* and Cort Dougan ( PReP ) ( cort @ cs . nmt . edu )
* Copyright ( C ) 1996 Paul Mackerras
*
* Derived from " arch/i386/mm/init.c "
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*/
# include <linux/kernel.h>
2011-07-29 16:19:31 +10:00
# include <linux/export.h>
2008-12-18 19:13:38 +00:00
# include <linux/mm.h>
# include <linux/init.h>
# include <linux/highmem.h>
# include <linux/pagemap.h>
# include <linux/preempt.h>
# include <linux/spinlock.h>
2010-07-12 14:36:09 +10:00
# include <linux/memblock.h>
2011-07-04 18:38:03 +00:00
# include <linux/of_fdt.h>
2011-06-28 09:54:48 +00:00
# include <linux/hugetlb.h>
2008-12-18 19:13:38 +00:00
2020-08-06 23:22:28 -07:00
# include <asm/pgalloc.h>
2008-12-18 19:13:38 +00:00
# include <asm/tlbflush.h>
# include <asm/tlb.h>
2009-07-23 23:15:47 +00:00
# include <asm/code-patching.h>
2015-10-06 22:48:09 -05:00
# include <asm/cputhreads.h>
2011-06-28 09:54:48 +00:00
# include <asm/hugetlb.h>
2013-10-11 19:22:38 -05:00
# include <asm/paca.h>
2008-12-18 19:13:38 +00:00
2019-03-29 09:59:59 +00:00
# include <mm/mmu_decl.h>
2008-12-18 19:13:38 +00:00
2011-06-28 09:54:48 +00:00
/*
* This struct lists the sw - supported page sizes . The hardawre MMU may support
* other sizes not listed here . The . ind field is only used on MMUs that have
* indirect page table entries .
*/
2016-12-07 08:47:28 +01:00
# if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
2011-10-10 10:50:40 +00:00
# ifdef CONFIG_PPC_FSL_BOOK3E
2011-06-28 09:54:48 +00:00
struct mmu_psize_def mmu_psize_defs [ MMU_PAGE_COUNT ] = {
[ MMU_PAGE_4K ] = {
. shift = 12 ,
. enc = BOOK3E_PAGESZ_4K ,
} ,
2013-10-11 19:22:38 -05:00
[ MMU_PAGE_2M ] = {
. shift = 21 ,
. enc = BOOK3E_PAGESZ_2M ,
} ,
2011-06-28 09:54:48 +00:00
[ MMU_PAGE_4M ] = {
. shift = 22 ,
. enc = BOOK3E_PAGESZ_4M ,
} ,
[ MMU_PAGE_16M ] = {
. shift = 24 ,
. enc = BOOK3E_PAGESZ_16M ,
} ,
[ MMU_PAGE_64M ] = {
. shift = 26 ,
. enc = BOOK3E_PAGESZ_64M ,
} ,
[ MMU_PAGE_256M ] = {
. shift = 28 ,
. enc = BOOK3E_PAGESZ_256M ,
} ,
[ MMU_PAGE_1G ] = {
. shift = 30 ,
. enc = BOOK3E_PAGESZ_1GB ,
} ,
} ;
2016-12-07 08:47:28 +01:00
# elif defined(CONFIG_PPC_8xx)
struct mmu_psize_def mmu_psize_defs [ MMU_PAGE_COUNT ] = {
[ MMU_PAGE_4K ] = {
. shift = 12 ,
} ,
[ MMU_PAGE_16K ] = {
. shift = 14 ,
} ,
[ MMU_PAGE_512K ] = {
. shift = 19 ,
} ,
[ MMU_PAGE_8M ] = {
. shift = 23 ,
} ,
} ;
2011-06-28 09:54:48 +00:00
# else
2009-07-23 23:15:47 +00:00
struct mmu_psize_def mmu_psize_defs [ MMU_PAGE_COUNT ] = {
[ MMU_PAGE_4K ] = {
. shift = 12 ,
2010-07-09 14:57:43 +10:00
. ind = 20 ,
2009-07-23 23:15:47 +00:00
. enc = BOOK3E_PAGESZ_4K ,
} ,
[ MMU_PAGE_16K ] = {
. shift = 14 ,
. enc = BOOK3E_PAGESZ_16K ,
} ,
[ MMU_PAGE_64K ] = {
. shift = 16 ,
2010-07-09 14:57:43 +10:00
. ind = 28 ,
2009-07-23 23:15:47 +00:00
. enc = BOOK3E_PAGESZ_64K ,
} ,
[ MMU_PAGE_1M ] = {
. shift = 20 ,
. enc = BOOK3E_PAGESZ_1M ,
} ,
[ MMU_PAGE_16M ] = {
. shift = 24 ,
2010-07-09 14:57:43 +10:00
. ind = 36 ,
2009-07-23 23:15:47 +00:00
. enc = BOOK3E_PAGESZ_16M ,
} ,
[ MMU_PAGE_256M ] = {
. shift = 28 ,
. enc = BOOK3E_PAGESZ_256M ,
} ,
[ MMU_PAGE_1G ] = {
. shift = 30 ,
. enc = BOOK3E_PAGESZ_1GB ,
} ,
} ;
2011-06-28 09:54:48 +00:00
# endif /* CONFIG_FSL_BOOKE */
2009-07-23 23:15:47 +00:00
static inline int mmu_get_tsize ( int psize )
{
return mmu_psize_defs [ psize ] . enc ;
}
# else
static inline int mmu_get_tsize ( int psize )
{
/* This isn't used on !Book3E for now */
return 0 ;
}
2011-06-28 09:54:48 +00:00
# endif /* CONFIG_PPC_BOOK3E_MMU */
2009-07-23 23:15:47 +00:00
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
* implementations at some point
*/
# ifdef CONFIG_PPC64
int mmu_pte_psize ; /* Page size used for PTE pages */
2009-07-23 23:15:58 +00:00
int mmu_vmemmap_psize ; /* Page size used for the virtual mem map */
2013-10-11 19:22:38 -05:00
int book3e_htw_mode ; /* HW tablewalk? Value is PPC_HTW_* */
2009-07-23 23:15:47 +00:00
unsigned long linear_map_top ; /* Top of linear mapping */
2014-03-10 17:29:38 -05:00
/*
* Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit / mcheck / debug
* exceptions . This is used for bolted and e6500 TLB miss handlers which
* do not modify this SPRG in the TLB miss code ; for other TLB miss handlers ,
* this is set to zero .
*/
int extlb_level_exc ;
2009-07-23 23:15:47 +00:00
# endif /* CONFIG_PPC64 */
2011-06-28 14:54:47 -05:00
# ifdef CONFIG_PPC_FSL_BOOK3E
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU ( int , next_tlbcam_idx ) ;
EXPORT_PER_CPU_SYMBOL ( next_tlbcam_idx ) ;
# endif
2008-12-18 19:13:38 +00:00
/*
* Base TLB flushing operations :
*
* - flush_tlb_mm ( mm ) flushes the specified mm context TLB ' s
* - flush_tlb_page ( vma , vmaddr ) flushes one page
* - flush_tlb_range ( vma , start , end ) flushes a range of pages
* - flush_tlb_kernel_range ( start , end ) flushes kernel pages
*
* - local_ * variants of page and mm only apply to the current
* processor
*/
powerpc/8xx: Simplify TLB handling
In the old days, TLB handling for 8xx was using tlbie and tlbia
instructions directly as much as possible.
But commit f048aace29e0 ("powerpc/mm: Add SMP support to no-hash
TLB handling") broke that by introducing out-of-line unnecessary
complex functions for booke/smp which don't have tlbie/tlbia
instructions and require more complex handling.
Restore direct use of tlbie and tlbia for 8xx which is never SMP.
With this patch we now get
c00ecc68 <ptep_clear_flush>:
c00ecc68: 39 00 00 00 li r8,0
c00ecc6c: 81 46 00 00 lwz r10,0(r6)
c00ecc70: 91 06 00 00 stw r8,0(r6)
c00ecc74: 7c 00 2a 64 tlbie r5,r0
c00ecc78: 7c 00 04 ac hwsync
c00ecc7c: 91 43 00 00 stw r10,0(r3)
c00ecc80: 4e 80 00 20 blr
Before it was
c0012880 <local_flush_tlb_page>:
c0012880: 2c 03 00 00 cmpwi r3,0
c0012884: 41 82 00 54 beq c00128d8 <local_flush_tlb_page+0x58>
c0012888: 81 22 00 00 lwz r9,0(r2)
c001288c: 81 43 00 20 lwz r10,32(r3)
c0012890: 39 29 00 01 addi r9,r9,1
c0012894: 91 22 00 00 stw r9,0(r2)
c0012898: 2c 0a 00 00 cmpwi r10,0
c001289c: 41 82 00 10 beq c00128ac <local_flush_tlb_page+0x2c>
c00128a0: 81 2a 01 dc lwz r9,476(r10)
c00128a4: 2c 09 ff ff cmpwi r9,-1
c00128a8: 41 82 00 0c beq c00128b4 <local_flush_tlb_page+0x34>
c00128ac: 7c 00 22 64 tlbie r4,r0
c00128b0: 7c 00 04 ac hwsync
c00128b4: 81 22 00 00 lwz r9,0(r2)
c00128b8: 39 29 ff ff addi r9,r9,-1
c00128bc: 2c 09 00 00 cmpwi r9,0
c00128c0: 91 22 00 00 stw r9,0(r2)
c00128c4: 4c a2 00 20 bclr+ 4,eq
c00128c8: 81 22 00 70 lwz r9,112(r2)
c00128cc: 71 29 00 04 andi. r9,r9,4
c00128d0: 4d 82 00 20 beqlr
c00128d4: 48 65 76 74 b c0669f48 <preempt_schedule>
c00128d8: 81 22 00 00 lwz r9,0(r2)
c00128dc: 39 29 00 01 addi r9,r9,1
c00128e0: 91 22 00 00 stw r9,0(r2)
c00128e4: 4b ff ff c8 b c00128ac <local_flush_tlb_page+0x2c>
...
c00ecdc8 <ptep_clear_flush>:
c00ecdc8: 94 21 ff f0 stwu r1,-16(r1)
c00ecdcc: 39 20 00 00 li r9,0
c00ecdd0: 93 c1 00 08 stw r30,8(r1)
c00ecdd4: 83 c6 00 00 lwz r30,0(r6)
c00ecdd8: 91 26 00 00 stw r9,0(r6)
c00ecddc: 93 e1 00 0c stw r31,12(r1)
c00ecde0: 7c 08 02 a6 mflr r0
c00ecde4: 7c 7f 1b 78 mr r31,r3
c00ecde8: 7c 83 23 78 mr r3,r4
c00ecdec: 7c a4 2b 78 mr r4,r5
c00ecdf0: 90 01 00 14 stw r0,20(r1)
c00ecdf4: 4b f2 5a 8d bl c0012880 <local_flush_tlb_page>
c00ecdf8: 93 df 00 00 stw r30,0(r31)
c00ecdfc: 7f e3 fb 78 mr r3,r31
c00ece00: 80 01 00 14 lwz r0,20(r1)
c00ece04: 83 c1 00 08 lwz r30,8(r1)
c00ece08: 83 e1 00 0c lwz r31,12(r1)
c00ece0c: 7c 08 03 a6 mtlr r0
c00ece10: 38 21 00 10 addi r1,r1,16
c00ece14: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/fb324f1c8f2ddb57cf6aad1cea26329558f1c1c0.1631887021.git.christophe.leroy@csgroup.eu
2021-09-17 15:57:12 +02:00
# ifndef CONFIG_PPC_8xx
2008-12-18 19:13:38 +00:00
/*
* These are the base non - SMP variants of page and mm flushing
*/
void local_flush_tlb_mm ( struct mm_struct * mm )
{
unsigned int pid ;
preempt_disable ( ) ;
pid = mm - > context . id ;
if ( pid ! = MMU_NO_CONTEXT )
_tlbil_pid ( pid ) ;
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( local_flush_tlb_mm ) ;
2009-07-23 23:15:24 +00:00
void __local_flush_tlb_page ( struct mm_struct * mm , unsigned long vmaddr ,
int tsize , int ind )
2008-12-18 19:13:38 +00:00
{
unsigned int pid ;
preempt_disable ( ) ;
2009-07-23 23:15:24 +00:00
pid = mm ? mm - > context . id : 0 ;
2008-12-18 19:13:38 +00:00
if ( pid ! = MMU_NO_CONTEXT )
2009-07-23 23:15:24 +00:00
_tlbil_va ( vmaddr , pid , tsize , ind ) ;
2008-12-18 19:13:38 +00:00
preempt_enable ( ) ;
}
2009-07-23 23:15:24 +00:00
void local_flush_tlb_page ( struct vm_area_struct * vma , unsigned long vmaddr )
{
__local_flush_tlb_page ( vma ? vma - > vm_mm : NULL , vmaddr ,
2009-07-23 23:15:47 +00:00
mmu_get_tsize ( mmu_virtual_psize ) , 0 ) ;
2009-07-23 23:15:24 +00:00
}
EXPORT_SYMBOL ( local_flush_tlb_page ) ;
powerpc/8xx: Simplify TLB handling
In the old days, TLB handling for 8xx was using tlbie and tlbia
instructions directly as much as possible.
But commit f048aace29e0 ("powerpc/mm: Add SMP support to no-hash
TLB handling") broke that by introducing out-of-line unnecessary
complex functions for booke/smp which don't have tlbie/tlbia
instructions and require more complex handling.
Restore direct use of tlbie and tlbia for 8xx which is never SMP.
With this patch we now get
c00ecc68 <ptep_clear_flush>:
c00ecc68: 39 00 00 00 li r8,0
c00ecc6c: 81 46 00 00 lwz r10,0(r6)
c00ecc70: 91 06 00 00 stw r8,0(r6)
c00ecc74: 7c 00 2a 64 tlbie r5,r0
c00ecc78: 7c 00 04 ac hwsync
c00ecc7c: 91 43 00 00 stw r10,0(r3)
c00ecc80: 4e 80 00 20 blr
Before it was
c0012880 <local_flush_tlb_page>:
c0012880: 2c 03 00 00 cmpwi r3,0
c0012884: 41 82 00 54 beq c00128d8 <local_flush_tlb_page+0x58>
c0012888: 81 22 00 00 lwz r9,0(r2)
c001288c: 81 43 00 20 lwz r10,32(r3)
c0012890: 39 29 00 01 addi r9,r9,1
c0012894: 91 22 00 00 stw r9,0(r2)
c0012898: 2c 0a 00 00 cmpwi r10,0
c001289c: 41 82 00 10 beq c00128ac <local_flush_tlb_page+0x2c>
c00128a0: 81 2a 01 dc lwz r9,476(r10)
c00128a4: 2c 09 ff ff cmpwi r9,-1
c00128a8: 41 82 00 0c beq c00128b4 <local_flush_tlb_page+0x34>
c00128ac: 7c 00 22 64 tlbie r4,r0
c00128b0: 7c 00 04 ac hwsync
c00128b4: 81 22 00 00 lwz r9,0(r2)
c00128b8: 39 29 ff ff addi r9,r9,-1
c00128bc: 2c 09 00 00 cmpwi r9,0
c00128c0: 91 22 00 00 stw r9,0(r2)
c00128c4: 4c a2 00 20 bclr+ 4,eq
c00128c8: 81 22 00 70 lwz r9,112(r2)
c00128cc: 71 29 00 04 andi. r9,r9,4
c00128d0: 4d 82 00 20 beqlr
c00128d4: 48 65 76 74 b c0669f48 <preempt_schedule>
c00128d8: 81 22 00 00 lwz r9,0(r2)
c00128dc: 39 29 00 01 addi r9,r9,1
c00128e0: 91 22 00 00 stw r9,0(r2)
c00128e4: 4b ff ff c8 b c00128ac <local_flush_tlb_page+0x2c>
...
c00ecdc8 <ptep_clear_flush>:
c00ecdc8: 94 21 ff f0 stwu r1,-16(r1)
c00ecdcc: 39 20 00 00 li r9,0
c00ecdd0: 93 c1 00 08 stw r30,8(r1)
c00ecdd4: 83 c6 00 00 lwz r30,0(r6)
c00ecdd8: 91 26 00 00 stw r9,0(r6)
c00ecddc: 93 e1 00 0c stw r31,12(r1)
c00ecde0: 7c 08 02 a6 mflr r0
c00ecde4: 7c 7f 1b 78 mr r31,r3
c00ecde8: 7c 83 23 78 mr r3,r4
c00ecdec: 7c a4 2b 78 mr r4,r5
c00ecdf0: 90 01 00 14 stw r0,20(r1)
c00ecdf4: 4b f2 5a 8d bl c0012880 <local_flush_tlb_page>
c00ecdf8: 93 df 00 00 stw r30,0(r31)
c00ecdfc: 7f e3 fb 78 mr r3,r31
c00ece00: 80 01 00 14 lwz r0,20(r1)
c00ece04: 83 c1 00 08 lwz r30,8(r1)
c00ece08: 83 e1 00 0c lwz r31,12(r1)
c00ece0c: 7c 08 03 a6 mtlr r0
c00ece10: 38 21 00 10 addi r1,r1,16
c00ece14: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/fb324f1c8f2ddb57cf6aad1cea26329558f1c1c0.1631887021.git.christophe.leroy@csgroup.eu
2021-09-17 15:57:12 +02:00
# endif
2008-12-18 19:13:38 +00:00
/*
* And here are the SMP non - local implementations
*/
# ifdef CONFIG_SMP
2010-02-18 02:22:44 +00:00
static DEFINE_RAW_SPINLOCK ( tlbivax_lock ) ;
2008-12-18 19:13:38 +00:00
struct tlb_flush_param {
unsigned long addr ;
unsigned int pid ;
2009-07-23 23:15:24 +00:00
unsigned int tsize ;
unsigned int ind ;
2008-12-18 19:13:38 +00:00
} ;
static void do_flush_tlb_mm_ipi ( void * param )
{
struct tlb_flush_param * p = param ;
_tlbil_pid ( p ? p - > pid : 0 ) ;
}
static void do_flush_tlb_page_ipi ( void * param )
{
struct tlb_flush_param * p = param ;
2009-07-23 23:15:24 +00:00
_tlbil_va ( p - > addr , p - > pid , p - > tsize , p - > ind ) ;
2008-12-18 19:13:38 +00:00
}
/* Note on invalidations and PID:
*
* We snapshot the PID with preempt disabled . At this point , it can still
* change either because :
* - our context is being stolen ( PID - > NO_CONTEXT ) on another CPU
* - we are invaliating some target that isn ' t currently running here
* and is concurrently acquiring a new PID on another CPU
* - some other CPU is re - acquiring a lost PID for this mm
* etc . . .
*
* However , this shouldn ' t be a problem as we only guarantee
* invalidation of TLB entries present prior to this call , so we
* don ' t care about the PID changing , and invalidating a stale PID
* is generally harmless .
*/
void flush_tlb_mm ( struct mm_struct * mm )
{
unsigned int pid ;
preempt_disable ( ) ;
pid = mm - > context . id ;
if ( unlikely ( pid = = MMU_NO_CONTEXT ) )
goto no_context ;
2009-07-23 23:15:10 +00:00
if ( ! mm_is_core_local ( mm ) ) {
2008-12-18 19:13:38 +00:00
struct tlb_flush_param p = { . pid = pid } ;
2009-03-15 18:16:43 +00:00
/* Ignores smp_processor_id() even if set. */
smp_call_function_many ( mm_cpumask ( mm ) ,
do_flush_tlb_mm_ipi , & p , 1 ) ;
2008-12-18 19:13:38 +00:00
}
_tlbil_pid ( pid ) ;
no_context :
preempt_enable ( ) ;
}
EXPORT_SYMBOL ( flush_tlb_mm ) ;
2009-07-23 23:15:24 +00:00
void __flush_tlb_page ( struct mm_struct * mm , unsigned long vmaddr ,
int tsize , int ind )
2008-12-18 19:13:38 +00:00
{
2009-03-15 18:16:43 +00:00
struct cpumask * cpu_mask ;
2008-12-18 19:13:38 +00:00
unsigned int pid ;
2015-02-04 13:18:02 +11:00
/*
* This function as well as __local_flush_tlb_page ( ) must only be called
* for user contexts .
*/
2018-09-07 18:35:26 +03:00
if ( WARN_ON ( ! mm ) )
2015-01-30 19:08:27 +07:00
return ;
2008-12-18 19:13:38 +00:00
preempt_disable ( ) ;
2015-01-30 19:08:27 +07:00
pid = mm - > context . id ;
2008-12-18 19:13:38 +00:00
if ( unlikely ( pid = = MMU_NO_CONTEXT ) )
goto bail ;
2009-07-23 23:15:24 +00:00
cpu_mask = mm_cpumask ( mm ) ;
2009-07-23 23:15:10 +00:00
if ( ! mm_is_core_local ( mm ) ) {
2008-12-18 19:13:38 +00:00
/* If broadcast tlbivax is supported, use it */
if ( mmu_has_feature ( MMU_FTR_USE_TLBIVAX_BCAST ) ) {
int lock = mmu_has_feature ( MMU_FTR_LOCK_BCAST_INVAL ) ;
if ( lock )
2010-02-18 02:22:44 +00:00
raw_spin_lock ( & tlbivax_lock ) ;
2009-07-23 23:15:24 +00:00
_tlbivax_bcast ( vmaddr , pid , tsize , ind ) ;
2008-12-18 19:13:38 +00:00
if ( lock )
2010-02-18 02:22:44 +00:00
raw_spin_unlock ( & tlbivax_lock ) ;
2008-12-18 19:13:38 +00:00
goto bail ;
} else {
2009-07-23 23:15:24 +00:00
struct tlb_flush_param p = {
. pid = pid ,
. addr = vmaddr ,
. tsize = tsize ,
. ind = ind ,
} ;
2009-03-15 18:16:43 +00:00
/* Ignores smp_processor_id() even if set in cpu_mask */
smp_call_function_many ( cpu_mask ,
2008-12-18 19:13:38 +00:00
do_flush_tlb_page_ipi , & p , 1 ) ;
}
}
2009-07-23 23:15:24 +00:00
_tlbil_va ( vmaddr , pid , tsize , ind ) ;
2008-12-18 19:13:38 +00:00
bail :
preempt_enable ( ) ;
}
2009-07-23 23:15:24 +00:00
void flush_tlb_page ( struct vm_area_struct * vma , unsigned long vmaddr )
{
2011-06-28 09:54:48 +00:00
# ifdef CONFIG_HUGETLB_PAGE
2013-11-21 18:26:42 -06:00
if ( vma & & is_vm_hugetlb_page ( vma ) )
2011-06-28 09:54:48 +00:00
flush_hugetlb_page ( vma , vmaddr ) ;
# endif
2009-07-23 23:15:24 +00:00
__flush_tlb_page ( vma ? vma - > vm_mm : NULL , vmaddr ,
2009-07-23 23:15:47 +00:00
mmu_get_tsize ( mmu_virtual_psize ) , 0 ) ;
2009-07-23 23:15:24 +00:00
}
2008-12-18 19:13:38 +00:00
EXPORT_SYMBOL ( flush_tlb_page ) ;
# endif /* CONFIG_SMP */
2011-07-04 18:38:03 +00:00
# ifdef CONFIG_PPC_47x
void __init early_init_mmu_47x ( void )
{
# ifdef CONFIG_SMP
unsigned long root = of_get_flat_dt_root ( ) ;
if ( of_get_flat_dt_prop ( root , " cooperative-partition " , NULL ) )
mmu_clear_feature ( MMU_FTR_USE_TLBIVAX_BCAST ) ;
# endif /* CONFIG_SMP */
}
# endif /* CONFIG_PPC_47x */
2008-12-18 19:13:38 +00:00
/*
* Flush kernel TLB entries in the given range
*/
powerpc/8xx: Simplify flush_tlb_kernel_range()
In the same spirit as commit 63f501e07a85 ("powerpc/8xx: Simplify TLB
handling"), simplify flush_tlb_kernel_range() for 8xx.
8xx cannot be SMP, and has 'tlbie' and 'tlbia' instructions, so
an inline version of flush_tlb_kernel_range() for 8xx is worth it.
With this page, first leg of change_page_attr() is:
2c: 55 29 00 3c rlwinm r9,r9,0,0,30
30: 91 23 00 00 stw r9,0(r3)
34: 7c 00 22 64 tlbie r4,r0
38: 7c 00 04 ac hwsync
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
Before the patch it was:
30: 55 29 00 3c rlwinm r9,r9,0,0,30
34: 91 2a 00 00 stw r9,0(r10)
38: 94 21 ff f0 stwu r1,-16(r1)
3c: 7c 08 02 a6 mflr r0
40: 38 83 10 00 addi r4,r3,4096
44: 90 01 00 14 stw r0,20(r1)
48: 48 00 00 01 bl 48 <change_page_attr+0x48>
48: R_PPC_REL24 flush_tlb_kernel_range
4c: 80 01 00 14 lwz r0,20(r1)
50: 38 60 00 00 li r3,0
54: 7c 08 03 a6 mtlr r0
58: 38 21 00 10 addi r1,r1,16
5c: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d2610043419ce3e0e53a85386baf2c3625af5cfb.1647877442.git.christophe.leroy@csgroup.eu
2022-03-21 16:44:18 +01:00
# ifndef CONFIG_PPC_8xx
2008-12-18 19:13:38 +00:00
void flush_tlb_kernel_range ( unsigned long start , unsigned long end )
{
# ifdef CONFIG_SMP
preempt_disable ( ) ;
smp_call_function ( do_flush_tlb_mm_ipi , NULL , 1 ) ;
_tlbil_pid ( 0 ) ;
preempt_enable ( ) ;
2008-12-30 23:42:55 +00:00
# else
2008-12-18 19:13:38 +00:00
_tlbil_pid ( 0 ) ;
2008-12-30 23:42:55 +00:00
# endif
2008-12-18 19:13:38 +00:00
}
EXPORT_SYMBOL ( flush_tlb_kernel_range ) ;
powerpc/8xx: Simplify flush_tlb_kernel_range()
In the same spirit as commit 63f501e07a85 ("powerpc/8xx: Simplify TLB
handling"), simplify flush_tlb_kernel_range() for 8xx.
8xx cannot be SMP, and has 'tlbie' and 'tlbia' instructions, so
an inline version of flush_tlb_kernel_range() for 8xx is worth it.
With this page, first leg of change_page_attr() is:
2c: 55 29 00 3c rlwinm r9,r9,0,0,30
30: 91 23 00 00 stw r9,0(r3)
34: 7c 00 22 64 tlbie r4,r0
38: 7c 00 04 ac hwsync
3c: 38 60 00 00 li r3,0
40: 4e 80 00 20 blr
Before the patch it was:
30: 55 29 00 3c rlwinm r9,r9,0,0,30
34: 91 2a 00 00 stw r9,0(r10)
38: 94 21 ff f0 stwu r1,-16(r1)
3c: 7c 08 02 a6 mflr r0
40: 38 83 10 00 addi r4,r3,4096
44: 90 01 00 14 stw r0,20(r1)
48: 48 00 00 01 bl 48 <change_page_attr+0x48>
48: R_PPC_REL24 flush_tlb_kernel_range
4c: 80 01 00 14 lwz r0,20(r1)
50: 38 60 00 00 li r3,0
54: 7c 08 03 a6 mtlr r0
58: 38 21 00 10 addi r1,r1,16
5c: 4e 80 00 20 blr
Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d2610043419ce3e0e53a85386baf2c3625af5cfb.1647877442.git.christophe.leroy@csgroup.eu
2022-03-21 16:44:18 +01:00
# endif
2008-12-18 19:13:38 +00:00
/*
* Currently , for range flushing , we just do a full mm flush . This should
* be optimized based on a threshold on the size of the range , since
* some implementation can stack multiple tlbivax before a tlbsync but
* for now , we keep it that way
*/
void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
{
2018-01-23 14:22:50 +01:00
if ( end - start = = PAGE_SIZE & & ! ( start & ~ PAGE_MASK ) )
flush_tlb_page ( vma , start ) ;
else
flush_tlb_mm ( vma - > vm_mm ) ;
2008-12-18 19:13:38 +00:00
}
EXPORT_SYMBOL ( flush_tlb_range ) ;
2009-07-23 23:15:28 +00:00
void tlb_flush ( struct mmu_gather * tlb )
{
flush_tlb_mm ( tlb - > mm ) ;
}
2009-07-23 23:15:47 +00:00
/*
* Below are functions specific to the 64 - bit variant of Book3E though that
* may change in the future
*/
# ifdef CONFIG_PPC64
/*
* Handling of virtual linear page tables or indirect TLB entries
* flushing when PTE pages are freed
*/
void tlb_flush_pgtable ( struct mmu_gather * tlb , unsigned long address )
{
int tsize = mmu_psize_defs [ mmu_pte_psize ] . enc ;
2013-10-11 19:22:38 -05:00
if ( book3e_htw_mode ! = PPC_HTW_NONE ) {
2009-07-23 23:15:47 +00:00
unsigned long start = address & PMD_MASK ;
unsigned long end = address + PMD_SIZE ;
unsigned long size = 1UL < < mmu_psize_defs [ mmu_pte_psize ] . shift ;
/* This isn't the most optimal, ideally we would factor out the
* while preempt & CPU mask mucking around , or even the IPI but
* it will do for now
*/
while ( start < end ) {
__flush_tlb_page ( tlb - > mm , start , tsize , 1 ) ;
start + = size ;
}
} else {
unsigned long rmask = 0xf000000000000000ul ;
unsigned long rid = ( address & rmask ) | 0x1000000000000000ul ;
unsigned long vpte = address & ~ rmask ;
vpte = ( vpte > > ( PAGE_SHIFT - 3 ) ) & ~ 0xffful ;
vpte | = rid ;
__flush_tlb_page ( tlb - > mm , vpte , tsize , 0 ) ;
}
}
2021-12-16 17:00:18 -05:00
static void __init setup_page_sizes ( void )
2010-07-09 14:57:43 +10:00
{
2010-10-08 02:13:25 -05:00
unsigned int tlb0cfg ;
unsigned int tlb0ps ;
unsigned int eptcfg ;
2010-07-09 14:57:43 +10:00
int i , psize ;
2010-10-08 02:13:25 -05:00
# ifdef CONFIG_PPC_FSL_BOOK3E
unsigned int mmucfg = mfspr ( SPRN_MMUCFG ) ;
2013-03-05 12:08:32 -06:00
int fsl_mmu = mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) ;
2010-10-08 02:13:25 -05:00
2013-03-05 12:08:32 -06:00
if ( fsl_mmu & & ( mmucfg & MMUCFG_MAVN ) = = MMUCFG_MAVN_V1 ) {
2010-10-08 02:13:25 -05:00
unsigned int tlb1cfg = mfspr ( SPRN_TLB1CFG ) ;
unsigned int min_pg , max_pg ;
min_pg = ( tlb1cfg & TLBnCFG_MINSIZE ) > > TLBnCFG_MINSIZE_SHIFT ;
max_pg = ( tlb1cfg & TLBnCFG_MAXSIZE ) > > TLBnCFG_MAXSIZE_SHIFT ;
for ( psize = 0 ; psize < MMU_PAGE_COUNT ; + + psize ) {
struct mmu_psize_def * def ;
unsigned int shift ;
def = & mmu_psize_defs [ psize ] ;
shift = def - > shift ;
2013-10-11 19:22:38 -05:00
if ( shift = = 0 | | shift & 1 )
2010-10-08 02:13:25 -05:00
continue ;
/* adjust to be in terms of 4^shift Kb */
shift = ( shift - 10 ) > > 1 ;
if ( ( shift > = min_pg ) & & ( shift < = max_pg ) )
def - > flags | = MMU_PAGE_SIZE_DIRECT ;
}
2013-10-11 19:22:38 -05:00
goto out ;
2010-10-08 02:13:25 -05:00
}
2013-03-05 12:08:32 -06:00
if ( fsl_mmu & & ( mmucfg & MMUCFG_MAVN ) = = MMUCFG_MAVN_V2 ) {
2013-10-11 19:22:38 -05:00
u32 tlb1cfg , tlb1ps ;
tlb0cfg = mfspr ( SPRN_TLB0CFG ) ;
tlb1cfg = mfspr ( SPRN_TLB1CFG ) ;
tlb1ps = mfspr ( SPRN_TLB1PS ) ;
eptcfg = mfspr ( SPRN_EPTCFG ) ;
if ( ( tlb1cfg & TLBnCFG_IND ) & & ( tlb0cfg & TLBnCFG_PT ) )
book3e_htw_mode = PPC_HTW_E6500 ;
/*
* We expect 4 K subpage size and unrestricted indirect size .
* The lack of a restriction on indirect size is a Freescale
* extension , indicated by PSn = 0 but SPSn ! = 0.
*/
if ( eptcfg ! = 2 )
book3e_htw_mode = PPC_HTW_NONE ;
2013-03-05 12:08:32 -06:00
for ( psize = 0 ; psize < MMU_PAGE_COUNT ; + + psize ) {
struct mmu_psize_def * def = & mmu_psize_defs [ psize ] ;
2018-10-01 16:21:51 +10:00
if ( ! def - > shift )
continue ;
2013-03-05 12:08:32 -06:00
if ( tlb1ps & ( 1U < < ( def - > shift - 10 ) ) ) {
def - > flags | = MMU_PAGE_SIZE_DIRECT ;
2013-10-11 19:22:38 -05:00
if ( book3e_htw_mode & & psize = = MMU_PAGE_2M )
def - > flags | = MMU_PAGE_SIZE_INDIRECT ;
2013-03-05 12:08:32 -06:00
}
}
2013-10-11 19:22:38 -05:00
goto out ;
2013-03-05 12:08:32 -06:00
}
2010-10-08 02:13:25 -05:00
# endif
tlb0cfg = mfspr ( SPRN_TLB0CFG ) ;
tlb0ps = mfspr ( SPRN_TLB0PS ) ;
eptcfg = mfspr ( SPRN_EPTCFG ) ;
2010-07-09 14:57:43 +10:00
/* Look for supported direct sizes */
for ( psize = 0 ; psize < MMU_PAGE_COUNT ; + + psize ) {
struct mmu_psize_def * def = & mmu_psize_defs [ psize ] ;
if ( tlb0ps & ( 1U < < ( def - > shift - 10 ) ) )
def - > flags | = MMU_PAGE_SIZE_DIRECT ;
}
/* Indirect page sizes supported ? */
2013-10-11 19:22:38 -05:00
if ( ( tlb0cfg & TLBnCFG_IND ) = = 0 | |
( tlb0cfg & TLBnCFG_PT ) = = 0 )
goto out ;
book3e_htw_mode = PPC_HTW_IBM ;
2010-07-09 14:57:43 +10:00
/* Now, we only deal with one IND page size for each
* direct size . Hopefully all implementations today are
* unambiguous , but we might want to be careful in the
* future .
*/
for ( i = 0 ; i < 3 ; i + + ) {
unsigned int ps , sps ;
sps = eptcfg & 0x1f ;
eptcfg > > = 5 ;
ps = eptcfg & 0x1f ;
eptcfg > > = 5 ;
if ( ! ps | | ! sps )
continue ;
for ( psize = 0 ; psize < MMU_PAGE_COUNT ; psize + + ) {
struct mmu_psize_def * def = & mmu_psize_defs [ psize ] ;
if ( ps = = ( def - > shift - 10 ) )
def - > flags | = MMU_PAGE_SIZE_INDIRECT ;
if ( sps = = ( def - > shift - 10 ) )
def - > ind = ps + 10 ;
}
}
2013-10-11 19:22:38 -05:00
out :
2010-07-09 14:57:43 +10:00
/* Cleanup array and print summary */
pr_info ( " MMU: Supported page sizes \n " ) ;
for ( psize = 0 ; psize < MMU_PAGE_COUNT ; + + psize ) {
struct mmu_psize_def * def = & mmu_psize_defs [ psize ] ;
const char * __page_type_names [ ] = {
" unsupported " ,
" direct " ,
" indirect " ,
" direct & indirect "
} ;
if ( def - > flags = = 0 ) {
def - > shift = 0 ;
continue ;
}
pr_info ( " %8ld KB as %s \n " , 1ul < < ( def - > shift - 10 ) ,
__page_type_names [ def - > flags & 0x3 ] ) ;
}
}
2021-12-16 17:00:18 -05:00
static void __init setup_mmu_htw ( void )
2011-06-22 11:25:42 +00:00
{
2013-10-11 19:22:38 -05:00
/*
* If we want to use HW tablewalk , enable it by patching the TLB miss
* handlers to branch to the one dedicated to it .
*/
switch ( book3e_htw_mode ) {
case PPC_HTW_IBM :
2011-06-22 11:25:42 +00:00
patch_exception ( 0x1c0 , exc_data_tlb_miss_htw_book3e ) ;
patch_exception ( 0x1e0 , exc_instruction_tlb_miss_htw_book3e ) ;
2013-10-11 19:22:38 -05:00
break ;
2014-01-17 18:36:38 -06:00
# ifdef CONFIG_PPC_FSL_BOOK3E
2013-10-11 19:22:38 -05:00
case PPC_HTW_E6500 :
2014-03-10 17:29:38 -05:00
extlb_level_exc = EX_TLB_SIZE ;
2013-10-11 19:22:38 -05:00
patch_exception ( 0x1c0 , exc_data_tlb_miss_e6500_book3e ) ;
patch_exception ( 0x1e0 , exc_instruction_tlb_miss_e6500_book3e ) ;
break ;
2014-01-17 18:36:38 -06:00
# endif
2010-07-09 14:57:43 +10:00
}
2011-05-19 20:09:28 +00:00
pr_info ( " MMU: Book3E HW tablewalk %s \n " ,
2013-10-11 19:22:38 -05:00
book3e_htw_mode ! = PPC_HTW_NONE ? " enabled " : " not supported " ) ;
2010-07-09 14:57:43 +10:00
}
/*
* Early initialization of the MMU TLB code
*/
2014-08-08 18:44:01 -05:00
static void early_init_this_mmu ( void )
2010-07-09 14:57:43 +10:00
{
2009-07-23 23:15:47 +00:00
unsigned int mas4 ;
/* Set MAS4 based on page table setting */
mas4 = 0x4 < < MAS4_WIMGED_SHIFT ;
2013-10-11 19:22:38 -05:00
switch ( book3e_htw_mode ) {
case PPC_HTW_E6500 :
mas4 | = MAS4_INDD ;
mas4 | = BOOK3E_PAGESZ_2M < < MAS4_TSIZED_SHIFT ;
mas4 | = MAS4_TLBSELD ( 1 ) ;
mmu_pte_psize = MMU_PAGE_2M ;
break ;
case PPC_HTW_IBM :
mas4 | = MAS4_INDD ;
2009-07-23 23:15:47 +00:00
mas4 | = BOOK3E_PAGESZ_1M < < MAS4_TSIZED_SHIFT ;
mmu_pte_psize = MMU_PAGE_1M ;
2013-10-11 19:22:38 -05:00
break ;
case PPC_HTW_NONE :
2009-07-23 23:15:47 +00:00
mas4 | = BOOK3E_PAGESZ_4K < < MAS4_TSIZED_SHIFT ;
mmu_pte_psize = mmu_virtual_psize ;
2013-10-11 19:22:38 -05:00
break ;
2009-07-23 23:15:47 +00:00
}
mtspr ( SPRN_MAS4 , mas4 ) ;
2009-10-16 18:48:40 -05:00
# ifdef CONFIG_PPC_FSL_BOOK3E
if ( mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) ) {
unsigned int num_cams ;
2015-10-06 22:48:09 -05:00
bool map = true ;
2009-10-16 18:48:40 -05:00
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = ( mfspr ( SPRN_TLB1CFG ) & TLBnCFG_N_ENTRY ) / 4 ;
2015-10-06 22:48:09 -05:00
/*
* Only do the mapping once per core , or else the
* transient mapping would cause problems .
*/
# ifdef CONFIG_SMP
2015-12-24 08:39:57 +08:00
if ( hweight32 ( get_tensr ( ) ) > 1 )
2015-10-06 22:48:09 -05:00
map = false ;
# endif
if ( map )
linear_map_top = map_mem_in_cams ( linear_map_top ,
2021-11-15 10:05:32 +01:00
num_cams , false , true ) ;
2014-08-08 18:44:01 -05:00
}
# endif
2009-10-16 18:48:40 -05:00
2014-08-08 18:44:01 -05:00
/* A sync won't hurt us after mucking around with
* the MMU configuration
*/
mb ( ) ;
}
2011-06-22 11:25:42 +00:00
2014-08-08 18:44:01 -05:00
static void __init early_init_mmu_global ( void )
{
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB , but for now let ' s assume 16 M is
* always there and a good fit ( which it probably is )
*
* Freescale booke only supports 4 K pages in TLB0 , so use that .
*/
if ( mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) )
mmu_vmemmap_psize = MMU_PAGE_4K ;
else
mmu_vmemmap_psize = MMU_PAGE_16M ;
/* XXX This code only checks for TLB 0 capabilities and doesn't
* check what page size combos are supported by the HW . It
* also doesn ' t handle the case where a separate array holds
* the IND entries from the array loaded by the PT .
*/
/* Look for supported page sizes */
setup_page_sizes ( ) ;
/* Look for HW tablewalk support */
setup_mmu_htw ( ) ;
# ifdef CONFIG_PPC_FSL_BOOK3E
if ( mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) ) {
2013-10-11 19:22:38 -05:00
if ( book3e_htw_mode = = PPC_HTW_NONE ) {
2014-03-10 17:29:38 -05:00
extlb_level_exc = EX_TLB_SIZE ;
2013-10-11 19:22:38 -05:00
patch_exception ( 0x1c0 , exc_data_tlb_miss_bolted_book3e ) ;
patch_exception ( 0x1e0 ,
exc_instruction_tlb_miss_bolted_book3e ) ;
}
2009-10-16 18:48:40 -05:00
}
# endif
2014-08-08 18:44:01 -05:00
/* Set the global containing the top of the linear mapping
* for use by the TLB miss code
2009-07-23 23:15:47 +00:00
*/
2014-08-08 18:44:01 -05:00
linear_map_top = memblock_end_of_DRAM ( ) ;
2019-08-20 14:07:16 +00:00
ioremap_bot = IOREMAP_BASE ;
2014-08-08 18:44:01 -05:00
}
static void __init early_mmu_set_memory_limit ( void )
{
# ifdef CONFIG_PPC_FSL_BOOK3E
if ( mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) ) {
/*
* Limit memory so we dont have linear faults .
* Unlike memblock_set_current_limit , which limits
* memory available during early boot , this permanently
* reduces the memory available to Linux . We need to
* do this because highmem is not supported on 64 - bit .
*/
memblock_enforce_memory_limit ( linear_map_top ) ;
}
# endif
2010-07-06 15:39:01 -07:00
memblock_set_current_limit ( linear_map_top ) ;
2009-07-23 23:15:47 +00:00
}
2014-08-08 18:44:01 -05:00
/* boot cpu only */
2009-07-23 23:15:47 +00:00
void __init early_init_mmu ( void )
{
2014-08-08 18:44:01 -05:00
early_init_mmu_global ( ) ;
early_init_this_mmu ( ) ;
early_mmu_set_memory_limit ( ) ;
2009-07-23 23:15:47 +00:00
}
2013-06-24 15:30:09 -04:00
void early_init_mmu_secondary ( void )
2009-07-23 23:15:47 +00:00
{
2014-08-08 18:44:01 -05:00
early_init_this_mmu ( ) ;
2009-07-23 23:15:47 +00:00
}
2010-07-06 15:39:02 -07:00
void setup_initial_memory_limit ( phys_addr_t first_memblock_base ,
phys_addr_t first_memblock_size )
{
2011-09-16 10:39:59 -05:00
/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
2010-07-06 15:39:02 -07:00
* the bolted TLB entry . We know for now that only 1 G
* entries are supported though that may eventually
2011-09-16 10:39:59 -05:00
* change .
*
2015-10-06 22:48:10 -05:00
* on FSL Embedded 64 - bit , usually all RAM is bolted , but with
* unusual memory sizes it ' s possible for some RAM to not be mapped
* ( such RAM is not used at all by Linux , since we don ' t support
* highmem on 64 - bit ) . We limit ppc64_rma_size to what would be
* mappable if this memblock is the only one . Additional memblocks
* can only increase , not decrease , the amount that ends up getting
* mapped . We still limit max to 1 G even if we ' ll eventually map
* more . This is due to what the early init code is set up to do .
2011-09-16 10:39:59 -05:00
*
* We crop it to the size of the first MEMBLOCK to
2010-07-06 15:39:02 -07:00
* avoid going over total available memory just in case . . .
*/
2011-09-16 10:39:59 -05:00
# ifdef CONFIG_PPC_FSL_BOOK3E
2017-04-03 12:05:55 +10:00
if ( early_mmu_has_feature ( MMU_FTR_TYPE_FSL_E ) ) {
2011-09-16 10:39:59 -05:00
unsigned long linear_sz ;
2015-10-06 22:48:10 -05:00
unsigned int num_cams ;
/* use a quarter of the TLBCAM for bolted linear map */
num_cams = ( mfspr ( SPRN_TLB1CFG ) & TLBnCFG_N_ENTRY ) / 4 ;
linear_sz = map_mem_in_cams ( first_memblock_size , num_cams ,
2021-11-15 10:05:32 +01:00
true , true ) ;
2015-10-06 22:48:10 -05:00
2011-09-16 10:39:59 -05:00
ppc64_rma_size = min_t ( u64 , linear_sz , 0x40000000 ) ;
} else
# endif
ppc64_rma_size = min_t ( u64 , first_memblock_size , 0x40000000 ) ;
2010-07-06 15:39:02 -07:00
/* Finally limit subsequent allocations */
2010-11-10 12:29:49 +00:00
memblock_set_current_limit ( first_memblock_base + ppc64_rma_size ) ;
2010-07-06 15:39:02 -07:00
}
2011-07-04 18:38:03 +00:00
# else /* ! CONFIG_PPC64 */
void __init early_init_mmu ( void )
{
# ifdef CONFIG_PPC_47x
early_init_mmu_47x ( ) ;
# endif
}
2009-07-23 23:15:47 +00:00
# endif /* CONFIG_PPC64 */