2005-04-17 02:20:36 +04:00
/*
* linux / include / asm - arm / tlb . h
*
* Copyright ( C ) 2002 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* Experimentation shows that on a StrongARM , it appears to be faster
* to use the " invalidate whole tlb " rather than " invalidate single
* tlb " for this.
*
* This appears true for both the process fork + exit case , as well as
* the munmap - large - area case .
*/
# ifndef __ASMARM_TLB_H
# define __ASMARM_TLB_H
# include <asm/cacheflush.h>
# include <asm/tlbflush.h>
2006-02-25 00:41:25 +03:00
# ifndef CONFIG_MMU
# include <linux/pagemap.h>
# include <asm-generic/tlb.h>
# else /* !CONFIG_MMU */
2005-04-17 02:20:36 +04:00
# include <asm/pgalloc.h>
/*
* TLB handling . This allows us to remove pages from the page
* tables , and efficiently handle the TLB issues .
*/
struct mmu_gather {
struct mm_struct * mm ;
unsigned int fullmm ;
} ;
DECLARE_PER_CPU ( struct mmu_gather , mmu_gathers ) ;
static inline struct mmu_gather *
tlb_gather_mmu ( struct mm_struct * mm , unsigned int full_mm_flush )
{
2005-10-30 04:16:01 +03:00
struct mmu_gather * tlb = & get_cpu_var ( mmu_gathers ) ;
2005-04-17 02:20:36 +04:00
tlb - > mm = mm ;
tlb - > fullmm = full_mm_flush ;
return tlb ;
}
static inline void
tlb_finish_mmu ( struct mmu_gather * tlb , unsigned long start , unsigned long end )
{
if ( tlb - > fullmm )
2005-10-30 04:16:03 +03:00
flush_tlb_mm ( tlb - > mm ) ;
2005-04-17 02:20:36 +04:00
/* keep the page table cache within bounds */
check_pgt_cache ( ) ;
2005-10-30 04:16:01 +03:00
put_cpu_var ( mmu_gathers ) ;
2005-04-17 02:20:36 +04:00
}
# define tlb_remove_tlb_entry(tlb,ptep,address) do { } while (0)
/*
* In the case of tlb vma handling , we can optimise these away in the
* case where we ' re doing a full MM flush . When we ' re doing a munmap ,
* the vmas are adjusted to only cover the region to be torn down .
*/
static inline void
tlb_start_vma ( struct mmu_gather * tlb , struct vm_area_struct * vma )
{
if ( ! tlb - > fullmm )
flush_cache_range ( vma , vma - > vm_start , vma - > vm_end ) ;
}
static inline void
tlb_end_vma ( struct mmu_gather * tlb , struct vm_area_struct * vma )
{
if ( ! tlb - > fullmm )
flush_tlb_range ( vma , vma - > vm_start , vma - > vm_end ) ;
}
# define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
2008-02-05 09:29:14 +03:00
# define pte_free_tlb(tlb, ptep) pte_free((tlb)->mm, ptep)
# define pmd_free_tlb(tlb, pmdp) pmd_free((tlb)->mm, pmdp)
2005-04-17 02:20:36 +04:00
# define tlb_migrate_finish(mm) do { } while (0)
2006-02-25 00:41:25 +03:00
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
# endif