2005-06-24 09:01:24 +04:00
/*
2006-10-04 01:01:26 +04:00
* arch / xtensa / mm / tlb . c
2005-06-24 09:01:24 +04:00
*
* Logic that manipulates the Xtensa MMU . Derived from MIPS .
*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 2001 - 2003 Tensilica Inc .
*
* Joe Taylor
* Chris Zankel < chris @ zankel . net >
* Marc Gauthier
*/
# include <linux/mm.h>
# include <asm/processor.h>
# include <asm/mmu_context.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
static inline void __flush_itlb_all ( void )
{
2006-12-10 13:18:48 +03:00
int w , i ;
2005-06-24 09:01:24 +04:00
2006-12-10 13:18:48 +03:00
for ( w = 0 ; w < ITLB_ARF_WAYS ; w + + ) {
for ( i = 0 ; i < ( 1 < < XCHAL_ITLB_ARF_ENTRIES_LOG2 ) ; i + + ) {
int e = w + ( i < < PAGE_SHIFT ) ;
invalidate_itlb_entry_no_isync ( e ) ;
2005-06-24 09:01:24 +04:00
}
}
asm volatile ( " isync \n " ) ;
}
static inline void __flush_dtlb_all ( void )
{
2006-12-10 13:18:48 +03:00
int w , i ;
2005-06-24 09:01:24 +04:00
2006-12-10 13:18:48 +03:00
for ( w = 0 ; w < DTLB_ARF_WAYS ; w + + ) {
for ( i = 0 ; i < ( 1 < < XCHAL_DTLB_ARF_ENTRIES_LOG2 ) ; i + + ) {
int e = w + ( i < < PAGE_SHIFT ) ;
invalidate_dtlb_entry_no_isync ( e ) ;
2005-06-24 09:01:24 +04:00
}
}
asm volatile ( " isync \n " ) ;
}
2013-10-17 02:42:26 +04:00
void local_flush_tlb_all ( void )
2005-06-24 09:01:24 +04:00
{
__flush_itlb_all ( ) ;
__flush_dtlb_all ( ) ;
}
/* If mm is current, we simply assign the current task a new ASID, thus,
* invalidating all previous tlb entries . If mm is someone else ' s user mapping ,
* wie invalidate the context , thus , when that user mapping is swapped in ,
* a new context will be assigned to it .
*/
2013-10-17 02:42:26 +04:00
void local_flush_tlb_mm ( struct mm_struct * mm )
2005-06-24 09:01:24 +04:00
{
2013-10-17 02:42:26 +04:00
int cpu = smp_processor_id ( ) ;
2005-06-24 09:01:24 +04:00
if ( mm = = current - > active_mm ) {
2012-11-05 07:44:03 +04:00
unsigned long flags ;
2013-05-15 19:02:06 +04:00
local_irq_save ( flags ) ;
2013-10-17 02:42:26 +04:00
mm - > context . asid [ cpu ] = NO_CONTEXT ;
activate_context ( mm , cpu ) ;
2005-06-24 09:01:24 +04:00
local_irq_restore ( flags ) ;
2013-10-17 02:42:26 +04:00
} else {
mm - > context . asid [ cpu ] = NO_CONTEXT ;
mm - > context . cpu = - 1 ;
2005-06-24 09:01:24 +04:00
}
}
2013-10-17 02:42:26 +04:00
2006-12-10 13:18:48 +03:00
# define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
# define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
# if _ITLB_ENTRIES > _DTLB_ENTRIES
# define _TLB_ENTRIES _ITLB_ENTRIES
# else
# define _TLB_ENTRIES _DTLB_ENTRIES
# endif
2013-10-17 02:42:26 +04:00
void local_flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
2005-06-24 09:01:24 +04:00
{
2013-10-17 02:42:26 +04:00
int cpu = smp_processor_id ( ) ;
2005-06-24 09:01:24 +04:00
struct mm_struct * mm = vma - > vm_mm ;
unsigned long flags ;
2013-10-17 02:42:26 +04:00
if ( mm - > context . asid [ cpu ] = = NO_CONTEXT )
2005-06-24 09:01:24 +04:00
return ;
#if 0
printk ( " [tlbrange<%02lx,%08lx,%08lx>] \n " ,
2013-10-17 02:42:26 +04:00
( unsigned long ) mm - > context . asid [ cpu ] , start , end ) ;
2005-06-24 09:01:24 +04:00
# endif
2013-05-15 19:02:06 +04:00
local_irq_save ( flags ) ;
2005-06-24 09:01:24 +04:00
2006-12-10 13:18:48 +03:00
if ( end - start + ( PAGE_SIZE - 1 ) < = _TLB_ENTRIES < < PAGE_SHIFT ) {
2005-06-24 09:01:24 +04:00
int oldpid = get_rasid_register ( ) ;
2013-10-17 02:42:26 +04:00
set_rasid_register ( ASID_INSERT ( mm - > context . asid [ cpu ] ) ) ;
2005-06-24 09:01:24 +04:00
start & = PAGE_MASK ;
2012-11-29 04:53:51 +04:00
if ( vma - > vm_flags & VM_EXEC )
2005-06-24 09:01:24 +04:00
while ( start < end ) {
invalidate_itlb_mapping ( start ) ;
invalidate_dtlb_mapping ( start ) ;
start + = PAGE_SIZE ;
}
else
while ( start < end ) {
invalidate_dtlb_mapping ( start ) ;
start + = PAGE_SIZE ;
}
set_rasid_register ( oldpid ) ;
} else {
2013-10-17 02:42:26 +04:00
local_flush_tlb_mm ( mm ) ;
2005-06-24 09:01:24 +04:00
}
local_irq_restore ( flags ) ;
}
2013-10-17 02:42:26 +04:00
void local_flush_tlb_page ( struct vm_area_struct * vma , unsigned long page )
2005-06-24 09:01:24 +04:00
{
2013-10-17 02:42:26 +04:00
int cpu = smp_processor_id ( ) ;
2005-06-24 09:01:24 +04:00
struct mm_struct * mm = vma - > vm_mm ;
unsigned long flags ;
int oldpid ;
2013-10-17 02:42:26 +04:00
if ( mm - > context . asid [ cpu ] = = NO_CONTEXT )
2005-06-24 09:01:24 +04:00
return ;
2013-05-15 19:02:06 +04:00
local_irq_save ( flags ) ;
2005-06-24 09:01:24 +04:00
2012-11-29 04:53:51 +04:00
oldpid = get_rasid_register ( ) ;
2013-10-17 02:42:26 +04:00
set_rasid_register ( ASID_INSERT ( mm - > context . asid [ cpu ] ) ) ;
2005-06-24 09:01:24 +04:00
if ( vma - > vm_flags & VM_EXEC )
invalidate_itlb_mapping ( page ) ;
invalidate_dtlb_mapping ( page ) ;
set_rasid_register ( oldpid ) ;
local_irq_restore ( flags ) ;
}
2013-05-15 19:34:05 +04:00
# ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr ( unsigned vaddr )
{
struct task_struct * task = get_current ( ) ;
struct mm_struct * mm = task - > mm ;
pgd_t * pgd ;
pmd_t * pmd ;
pte_t * pte ;
if ( ! mm )
mm = task - > active_mm ;
pgd = pgd_offset ( mm , vaddr ) ;
if ( pgd_none_or_clear_bad ( pgd ) )
return 0 ;
pmd = pmd_offset ( pgd , vaddr ) ;
if ( pmd_none_or_clear_bad ( pmd ) )
return 0 ;
pte = pte_offset_map ( pmd , vaddr ) ;
if ( ! pte )
return 0 ;
return pte_val ( * pte ) ;
}
enum {
TLB_SUSPICIOUS = 1 ,
TLB_INSANE = 2 ,
} ;
static void tlb_insane ( void )
{
BUG_ON ( 1 ) ;
}
static void tlb_suspicious ( void )
{
WARN_ON ( 1 ) ;
}
/*
* Check that TLB entries with kernel ASID ( 1 ) have kernel VMA ( > = TASK_SIZE ) ,
* and TLB entries with user ASID ( > = 4 ) have VMA < TASK_SIZE .
*
* Check that valid TLB entries either have the same PA as the PTE , or PTE is
* marked as non - present . Non - present PTE and the page with non - zero refcount
* and zero mapcount is normal for batched TLB flush operation . Zero refcount
* means that the page was freed prematurely . Non - zero mapcount is unusual ,
* but does not necessary means an error , thus marked as suspicious .
*/
static int check_tlb_entry ( unsigned w , unsigned e , bool dtlb )
{
unsigned tlbidx = w | ( e < < PAGE_SHIFT ) ;
unsigned r0 = dtlb ?
read_dtlb_virtual ( tlbidx ) : read_itlb_virtual ( tlbidx ) ;
unsigned vpn = ( r0 & PAGE_MASK ) | ( e < < PAGE_SHIFT ) ;
unsigned pte = get_pte_for_vaddr ( vpn ) ;
unsigned mm_asid = ( get_rasid_register ( ) > > 8 ) & ASID_MASK ;
unsigned tlb_asid = r0 & ASID_MASK ;
bool kernel = tlb_asid = = 1 ;
int rc = 0 ;
if ( tlb_asid > 0 & & ( ( vpn < TASK_SIZE ) = = kernel ) ) {
pr_err ( " %cTLB: way: %u, entry: %u, VPN %08x in %s PTE \n " ,
dtlb ? ' D ' : ' I ' , w , e , vpn ,
kernel ? " kernel " : " user " ) ;
rc | = TLB_INSANE ;
}
if ( tlb_asid = = mm_asid ) {
unsigned r1 = dtlb ? read_dtlb_translation ( tlbidx ) :
read_itlb_translation ( tlbidx ) ;
if ( ( pte ^ r1 ) & PAGE_MASK ) {
pr_err ( " %cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x \n " ,
dtlb ? ' D ' : ' I ' , w , e , r0 , r1 , pte ) ;
if ( pte = = 0 | | ! pte_present ( __pte ( pte ) ) ) {
struct page * p = pfn_to_page ( r1 > > PAGE_SHIFT ) ;
pr_err ( " page refcount: %d, mapcount: %d \n " ,
page_count ( p ) ,
page_mapcount ( p ) ) ;
if ( ! page_count ( p ) )
rc | = TLB_INSANE ;
else if ( page_mapped ( p ) )
rc | = TLB_SUSPICIOUS ;
} else {
rc | = TLB_INSANE ;
}
}
}
return rc ;
}
void check_tlb_sanity ( void )
{
unsigned long flags ;
unsigned w , e ;
int bug = 0 ;
local_irq_save ( flags ) ;
for ( w = 0 ; w < DTLB_ARF_WAYS ; + + w )
for ( e = 0 ; e < ( 1 < < XCHAL_DTLB_ARF_ENTRIES_LOG2 ) ; + + e )
bug | = check_tlb_entry ( w , e , true ) ;
for ( w = 0 ; w < ITLB_ARF_WAYS ; + + w )
for ( e = 0 ; e < ( 1 < < XCHAL_ITLB_ARF_ENTRIES_LOG2 ) ; + + e )
bug | = check_tlb_entry ( w , e , false ) ;
if ( bug & TLB_INSANE )
tlb_insane ( ) ;
if ( bug & TLB_SUSPICIOUS )
tlb_suspicious ( ) ;
local_irq_restore ( flags ) ;
}
# endif /* CONFIG_DEBUG_TLB_SANITY */