2005-04-16 15:20:36 -07:00
/*
2008-08-02 10:55:55 +01:00
* arch / arm / include / asm / tlb . h
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 2002 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Experimentation shows that on a StrongARM , it appears to be faster
* to use the " invalidate whole tlb " rather than " invalidate single
* tlb " for this.
*
* This appears true for both the process fork + exit case , as well as
* the munmap - large - area case .
*/
# ifndef __ASMARM_TLB_H
# define __ASMARM_TLB_H
# include <asm/cacheflush.h>
2006-02-24 21:41:25 +00:00
# ifndef CONFIG_MMU
# include <linux/pagemap.h>
2011-02-20 12:27:49 +00:00
# define tlb_flush(tlb) ((void) tlb)
2006-02-24 21:41:25 +00:00
# include <asm-generic/tlb.h>
# else /* !CONFIG_MMU */
2011-02-20 12:16:45 +00:00
# include <linux/swap.h>
2005-04-16 15:20:36 -07:00
# include <asm/pgalloc.h>
2011-02-20 12:16:45 +00:00
# include <asm/tlbflush.h>
2011-05-24 17:11:53 -07:00
# define MMU_GATHER_BUNDLE 8
2005-04-16 15:20:36 -07:00
/*
* TLB handling . This allows us to remove pages from the page
* tables , and efficiently handle the TLB issues .
*/
struct mmu_gather {
struct mm_struct * mm ;
unsigned int fullmm ;
2011-02-20 12:16:45 +00:00
struct vm_area_struct * vma ;
2009-04-14 13:07:35 +01:00
unsigned long range_start ;
unsigned long range_end ;
2011-02-20 12:16:45 +00:00
unsigned int nr ;
2011-05-24 17:11:53 -07:00
unsigned int max ;
struct page * * pages ;
struct page * local [ MMU_GATHER_BUNDLE ] ;
2005-04-16 15:20:36 -07:00
} ;
DECLARE_PER_CPU ( struct mmu_gather , mmu_gathers ) ;
2011-02-20 12:16:45 +00:00
/*
* This is unnecessarily complex . There ' s three ways the TLB shootdown
* code is used :
* 1. Unmapping a range of vmas . See zap_page_range ( ) , unmap_region ( ) .
* tlb - > fullmm = 0 , and tlb_start_vma / tlb_end_vma will be called .
* tlb - > vma will be non - NULL .
* 2. Unmapping all vmas . See exit_mmap ( ) .
* tlb - > fullmm = 1 , and tlb_start_vma / tlb_end_vma will be called .
* tlb - > vma will be non - NULL . Additionally , page tables will be freed .
* 3. Unmapping argument pages . See shift_arg_pages ( ) .
* tlb - > fullmm = 0 , but tlb_start_vma / tlb_end_vma will not be called .
* tlb - > vma will be NULL .
*/
static inline void tlb_flush ( struct mmu_gather * tlb )
{
if ( tlb - > fullmm | | ! tlb - > vma )
flush_tlb_mm ( tlb - > mm ) ;
else if ( tlb - > range_end > 0 ) {
flush_tlb_range ( tlb - > vma , tlb - > range_start , tlb - > range_end ) ;
tlb - > range_start = TASK_SIZE ;
tlb - > range_end = 0 ;
}
}
static inline void tlb_add_flush ( struct mmu_gather * tlb , unsigned long addr )
{
if ( ! tlb - > fullmm ) {
if ( addr < tlb - > range_start )
tlb - > range_start = addr ;
if ( addr + PAGE_SIZE > tlb - > range_end )
tlb - > range_end = addr + PAGE_SIZE ;
}
}
2011-05-24 17:11:53 -07:00
static inline void __tlb_alloc_page ( struct mmu_gather * tlb )
{
unsigned long addr = __get_free_pages ( GFP_NOWAIT | __GFP_NOWARN , 0 ) ;
if ( addr ) {
tlb - > pages = ( void * ) addr ;
tlb - > max = PAGE_SIZE / sizeof ( struct page * ) ;
}
}
2011-02-20 12:16:45 +00:00
static inline void tlb_flush_mmu ( struct mmu_gather * tlb )
{
tlb_flush ( tlb ) ;
2013-06-05 12:26:50 +02:00
free_pages_and_swap_cache ( tlb - > pages , tlb - > nr ) ;
tlb - > nr = 0 ;
if ( tlb - > pages = = tlb - > local )
__tlb_alloc_page ( tlb ) ;
2011-02-20 12:16:45 +00:00
}
2011-05-24 17:11:53 -07:00
static inline void
tlb_gather_mmu ( struct mmu_gather * tlb , struct mm_struct * mm , unsigned int fullmm )
2005-04-16 15:20:36 -07:00
{
tlb - > mm = mm ;
2011-05-24 17:11:53 -07:00
tlb - > fullmm = fullmm ;
2011-02-20 12:16:45 +00:00
tlb - > vma = NULL ;
2011-05-24 17:11:53 -07:00
tlb - > max = ARRAY_SIZE ( tlb - > local ) ;
tlb - > pages = tlb - > local ;
2011-02-20 12:16:45 +00:00
tlb - > nr = 0 ;
2011-05-24 17:11:53 -07:00
__tlb_alloc_page ( tlb ) ;
2005-04-16 15:20:36 -07:00
}
static inline void
tlb_finish_mmu ( struct mmu_gather * tlb , unsigned long start , unsigned long end )
{
2011-02-20 12:16:45 +00:00
tlb_flush_mmu ( tlb ) ;
2005-04-16 15:20:36 -07:00
/* keep the page table cache within bounds */
check_pgt_cache ( ) ;
2005-10-29 18:16:01 -07:00
2011-05-24 17:11:53 -07:00
if ( tlb - > pages ! = tlb - > local )
free_pages ( ( unsigned long ) tlb - > pages , 0 ) ;
2005-04-16 15:20:36 -07:00
}
2009-04-14 13:07:35 +01:00
/*
* Memorize the range for the TLB flush .
*/
static inline void
tlb_remove_tlb_entry ( struct mmu_gather * tlb , pte_t * ptep , unsigned long addr )
{
2011-02-20 12:16:45 +00:00
tlb_add_flush ( tlb , addr ) ;
2009-04-14 13:07:35 +01:00
}
2005-04-16 15:20:36 -07:00
/*
* In the case of tlb vma handling , we can optimise these away in the
* case where we ' re doing a full MM flush . When we ' re doing a munmap ,
* the vmas are adjusted to only cover the region to be torn down .
*/
static inline void
tlb_start_vma ( struct mmu_gather * tlb , struct vm_area_struct * vma )
{
2009-04-14 13:07:35 +01:00
if ( ! tlb - > fullmm ) {
2005-04-16 15:20:36 -07:00
flush_cache_range ( vma , vma - > vm_start , vma - > vm_end ) ;
2011-02-20 12:16:45 +00:00
tlb - > vma = vma ;
2009-04-14 13:07:35 +01:00
tlb - > range_start = TASK_SIZE ;
tlb - > range_end = 0 ;
}
2005-04-16 15:20:36 -07:00
}
static inline void
tlb_end_vma ( struct mmu_gather * tlb , struct vm_area_struct * vma )
{
2011-02-20 12:16:45 +00:00
if ( ! tlb - > fullmm )
tlb_flush ( tlb ) ;
}
2011-05-24 17:11:53 -07:00
static inline int __tlb_remove_page ( struct mmu_gather * tlb , struct page * page )
2011-02-20 12:16:45 +00:00
{
2011-05-24 17:11:53 -07:00
tlb - > pages [ tlb - > nr + + ] = page ;
VM_BUG_ON ( tlb - > nr > tlb - > max ) ;
return tlb - > max - tlb - > nr ;
}
static inline void tlb_remove_page ( struct mmu_gather * tlb , struct page * page )
{
if ( ! __tlb_remove_page ( tlb , page ) )
tlb_flush_mmu ( tlb ) ;
2011-02-20 12:16:45 +00:00
}
static inline void __pte_free_tlb ( struct mmu_gather * tlb , pgtable_t pte ,
unsigned long addr )
{
pgtable_page_dtor ( pte ) ;
2012-01-25 11:54:22 +01:00
2012-08-24 15:23:06 +01:00
# ifdef CONFIG_ARM_LPAE
tlb_add_flush ( tlb , addr ) ;
# else
2012-01-25 11:54:22 +01:00
/*
* With the classic ARM MMU , a pte page has two corresponding pmd
* entries , each covering 1 MB .
*/
addr & = PMD_MASK ;
tlb_add_flush ( tlb , addr + SZ_1M - PAGE_SIZE ) ;
tlb_add_flush ( tlb , addr + SZ_1M ) ;
2012-08-24 15:23:06 +01:00
# endif
2012-01-25 11:54:22 +01:00
2011-02-20 12:16:45 +00:00
tlb_remove_page ( tlb , pte ) ;
2005-04-16 15:20:36 -07:00
}
2011-11-22 17:30:29 +00:00
static inline void __pmd_free_tlb ( struct mmu_gather * tlb , pmd_t * pmdp ,
unsigned long addr )
{
# ifdef CONFIG_ARM_LPAE
tlb_add_flush ( tlb , addr ) ;
tlb_remove_page ( tlb , virt_to_page ( pmdp ) ) ;
# endif
}
2012-07-25 14:39:26 +01:00
static inline void
tlb_remove_pmd_tlb_entry ( struct mmu_gather * tlb , pmd_t * pmdp , unsigned long addr )
{
tlb_add_flush ( tlb , addr ) ;
}
2011-02-20 12:16:45 +00:00
# define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
2011-11-22 17:30:29 +00:00
# define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
2011-11-22 17:30:28 +00:00
# define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
2005-04-16 15:20:36 -07:00
# define tlb_migrate_finish(mm) do { } while (0)
2006-02-24 21:41:25 +00:00
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
# endif