2005-04-17 02:20:36 +04:00
/*
2008-08-02 13:55:55 +04:00
* arch / arm / include / asm / tlbflush . h
2005-04-17 02:20:36 +04:00
*
* Copyright ( C ) 1999 - 2003 Russell King
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# ifndef _ASMARM_TLBFLUSH_H
# define _ASMARM_TLBFLUSH_H
2006-02-25 00:41:25 +03:00
# ifndef CONFIG_MMU
# define tlb_flush(tlb) ((void) tlb)
2006-02-25 00:44:56 +03:00
# else /* CONFIG_MMU */
2006-02-25 00:41:25 +03:00
2005-04-17 02:20:36 +04:00
# include <asm/glue.h>
# define TLB_V3_PAGE (1 << 0)
# define TLB_V4_U_PAGE (1 << 1)
# define TLB_V4_D_PAGE (1 << 2)
# define TLB_V4_I_PAGE (1 << 3)
# define TLB_V6_U_PAGE (1 << 4)
# define TLB_V6_D_PAGE (1 << 5)
# define TLB_V6_I_PAGE (1 << 6)
# define TLB_V3_FULL (1 << 8)
# define TLB_V4_U_FULL (1 << 9)
# define TLB_V4_D_FULL (1 << 10)
# define TLB_V4_I_FULL (1 << 11)
# define TLB_V6_U_FULL (1 << 12)
# define TLB_V6_D_FULL (1 << 13)
# define TLB_V6_I_FULL (1 << 14)
# define TLB_V6_U_ASID (1 << 16)
# define TLB_V6_D_ASID (1 << 17)
# define TLB_V6_I_ASID (1 << 18)
2008-06-23 00:45:04 +04:00
# define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
2005-04-17 02:20:36 +04:00
# define TLB_DCLEAN (1 << 30)
# define TLB_WB (1 << 31)
/*
* MMU TLB Model
* = = = = = = = = = = = = =
*
* We have the following to choose from :
* v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
2008-06-23 00:45:04 +04:00
* fr - Feroceon ( v4wbi with non - outer - cacheable page table walks )
2005-04-17 02:20:36 +04:00
* v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
2008-08-12 03:04:15 +04:00
* v7wbi - identical to v6wbi
2005-04-17 02:20:36 +04:00
*/
# undef _TLB
# undef MULTI_TLB
# define v3_tlb_flags (TLB_V3_FULL | TLB_V3_PAGE)
# ifdef CONFIG_CPU_TLB_V3
# define v3_possible_flags v3_tlb_flags
# define v3_always_flags v3_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v3
# endif
# else
# define v3_possible_flags 0
# define v3_always_flags (-1UL)
# endif
# define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
# ifdef CONFIG_CPU_TLB_V4WT
# define v4_possible_flags v4_tlb_flags
# define v4_always_flags v4_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4
# endif
# else
# define v4_possible_flags 0
# define v4_always_flags (-1UL)
# endif
# define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE )
# ifdef CONFIG_CPU_TLB_V4WBI
# define v4wbi_possible_flags v4wbi_tlb_flags
# define v4wbi_always_flags v4wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
# else
# define v4wbi_possible_flags 0
# define v4wbi_always_flags (-1UL)
# endif
2008-06-23 00:45:04 +04:00
# define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_I_PAGE | TLB_V4_D_PAGE )
# ifdef CONFIG_CPU_TLB_FEROCEON
# define fr_possible_flags fr_tlb_flags
# define fr_always_flags fr_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wbi
# endif
# else
# define fr_possible_flags 0
# define fr_always_flags (-1UL)
# endif
2005-04-17 02:20:36 +04:00
# define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V4_I_FULL | TLB_V4_D_FULL | \
TLB_V4_D_PAGE )
# ifdef CONFIG_CPU_TLB_V4WB
# define v4wb_possible_flags v4wb_tlb_flags
# define v4wb_always_flags v4wb_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v4wb
# endif
# else
# define v4wb_possible_flags 0
# define v4wb_always_flags (-1UL)
# endif
# define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
TLB_V6_I_FULL | TLB_V6_D_FULL | \
TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
TLB_V6_I_ASID | TLB_V6_D_ASID )
# ifdef CONFIG_CPU_TLB_V6
# define v6wbi_possible_flags v6wbi_tlb_flags
# define v6wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v6wbi
# endif
# else
# define v6wbi_possible_flags 0
# define v6wbi_always_flags (-1UL)
# endif
2007-05-18 14:25:31 +04:00
# ifdef CONFIG_CPU_TLB_V7
# define v7wbi_possible_flags v6wbi_tlb_flags
# define v7wbi_always_flags v6wbi_tlb_flags
# ifdef _TLB
# define MULTI_TLB 1
# else
# define _TLB v7wbi
# endif
# else
# define v7wbi_possible_flags 0
# define v7wbi_always_flags (-1UL)
# endif
2005-04-17 02:20:36 +04:00
# ifndef _TLB
# error Unknown TLB model
# endif
# ifndef __ASSEMBLY__
Detach sched.h from mm.h
First thing mm.h does is including sched.h solely for can_do_mlock() inline
function which has "current" dereference inside. By dealing with can_do_mlock()
mm.h can be detached from sched.h which is good. See below, why.
This patch
a) removes unconditional inclusion of sched.h from mm.h
b) makes can_do_mlock() normal function in mm/mlock.c
c) exports can_do_mlock() to not break compilation
d) adds sched.h inclusions back to files that were getting it indirectly.
e) adds less bloated headers to some files (asm/signal.h, jiffies.h) that were
getting them indirectly
Net result is:
a) mm.h users would get less code to open, read, preprocess, parse, ... if
they don't need sched.h
b) sched.h stops being dependency for significant number of files:
on x86_64 allmodconfig touching sched.h results in recompile of 4083 files,
after patch it's only 3744 (-8.3%).
Cross-compile tested on
all arm defconfigs, all mips defconfigs, all powerpc defconfigs,
alpha alpha-up
arm
i386 i386-up i386-defconfig i386-allnoconfig
ia64 ia64-up
m68k
mips
parisc parisc-up
powerpc powerpc-up
s390 s390-up
sparc sparc-up
sparc64 sparc64-up
um-x86_64
x86_64 x86_64-up x86_64-defconfig x86_64-allnoconfig
as well as my two usual configs.
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-21 01:22:52 +04:00
# include <linux/sched.h>
2005-04-17 02:20:36 +04:00
struct cpu_tlb_fns {
void ( * flush_user_range ) ( unsigned long , unsigned long , struct vm_area_struct * ) ;
void ( * flush_kern_range ) ( unsigned long , unsigned long ) ;
unsigned long tlb_flags ;
} ;
/*
* Select the calling method
*/
# ifdef MULTI_TLB
# define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
# define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
# else
# define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
# define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
extern void __cpu_flush_user_tlb_range ( unsigned long , unsigned long , struct vm_area_struct * ) ;
extern void __cpu_flush_kern_tlb_range ( unsigned long , unsigned long ) ;
# endif
extern struct cpu_tlb_fns cpu_tlb ;
# define __cpu_tlb_flags cpu_tlb.tlb_flags
/*
* TLB Management
* = = = = = = = = = = = = = =
*
* The arch / arm / mm / tlb - * . S files implement these methods .
*
* The TLB specific code is expected to perform whatever tests it
* needs to determine if it should invalidate the TLB for each
* call . Start addresses are inclusive and end addresses are
* exclusive ; it is safe to round these addresses down .
*
* flush_tlb_all ( )
*
* Invalidate the entire TLB .
*
* flush_tlb_mm ( mm )
*
* Invalidate all TLB entries in a particular address
* space .
* - mm - mm_struct describing address space
*
* flush_tlb_range ( mm , start , end )
*
* Invalidate a range of TLB entries in the specified
* address space .
* - mm - mm_struct describing address space
* - start - start address ( may not be aligned )
* - end - end address ( exclusive , may not be aligned )
*
* flush_tlb_page ( vaddr , vma )
*
* Invalidate the specified page in the specified address range .
* - vaddr - virtual address ( may not be aligned )
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page ( kaddr )
*
* Invalidate the TLB entry for the specified page . The address
* will be in the kernels virtual memory space . Current uses
* only require the D - TLB to be invalidated .
* - kaddr - Kernel virtual memory address
*/
/*
* We optimise the code below by :
* - building a set of TLB flags that might be set in __cpu_tlb_flags
* - building a set of TLB flags that will always be set in __cpu_tlb_flags
* - if we ' re going to need __cpu_tlb_flags , access it once and only once
*
* This allows us to build optimal assembly for the single - CPU type case ,
* and as close to optimal given the compiler constrants for multi - CPU
* case . We could do better for the multi - CPU case if the compiler
* implemented the " %? " method , but this has been discontinued due to too
* many people getting it wrong .
*/
# define possible_tlb_flags (v3_possible_flags | \
v4_possible_flags | \
v4wbi_possible_flags | \
2008-06-23 00:45:04 +04:00
fr_possible_flags | \
2005-04-17 02:20:36 +04:00
v4wb_possible_flags | \
2008-08-12 03:04:15 +04:00
v6wbi_possible_flags | \
v7wbi_possible_flags )
2005-04-17 02:20:36 +04:00
# define always_tlb_flags (v3_always_flags & \
v4_always_flags & \
v4wbi_always_flags & \
2008-06-23 00:45:04 +04:00
fr_always_flags & \
2005-04-17 02:20:36 +04:00
v4wb_always_flags & \
2008-08-12 03:04:15 +04:00
v6wbi_always_flags & \
v7wbi_always_flags )
2005-04-17 02:20:36 +04:00
# define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
2005-06-28 16:40:39 +04:00
static inline void local_flush_tlb_all ( void )
2005-04-17 02:20:36 +04:00
{
const int zero = 0 ;
const unsigned int __tlb_flag = __cpu_tlb_flags ;
if ( tlb_flag ( TLB_WB ) )
2007-02-05 16:47:51 +03:00
dsb ( ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V3_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c6, c0, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_U_FULL | TLB_V6_U_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_D_FULL | TLB_V6_D_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_I_FULL | TLB_V6_I_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 0 " : : " r " ( zero ) : " cc " ) ;
2007-02-05 16:47:51 +03:00
if ( tlb_flag ( TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID ) ) {
/* flush the branch target cache */
asm ( " mcr p15, 0, %0, c7, c5, 6 " : : " r " ( zero ) : " cc " ) ;
dsb ( ) ;
isb ( ) ;
}
2005-04-17 02:20:36 +04:00
}
2005-06-28 16:40:39 +04:00
static inline void local_flush_tlb_mm ( struct mm_struct * mm )
2005-04-17 02:20:36 +04:00
{
const int zero = 0 ;
const int asid = ASID ( mm ) ;
const unsigned int __tlb_flag = __cpu_tlb_flags ;
if ( tlb_flag ( TLB_WB ) )
2007-02-05 16:47:51 +03:00
dsb ( ) ;
2005-04-17 02:20:36 +04:00
if ( cpu_isset ( smp_processor_id ( ) , mm - > cpu_vm_mask ) ) {
if ( tlb_flag ( TLB_V3_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c6, c0, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_U_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_D_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_I_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
}
if ( tlb_flag ( TLB_V6_U_ASID ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 2 " : : " r " ( asid ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_D_ASID ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 2 " : : " r " ( asid ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_I_ASID ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 2 " : : " r " ( asid ) : " cc " ) ;
2007-02-05 16:47:51 +03:00
if ( tlb_flag ( TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID ) ) {
/* flush the branch target cache */
asm ( " mcr p15, 0, %0, c7, c5, 6 " : : " r " ( zero ) : " cc " ) ;
dsb ( ) ;
}
2005-04-17 02:20:36 +04:00
}
static inline void
2005-06-28 16:40:39 +04:00
local_flush_tlb_page ( struct vm_area_struct * vma , unsigned long uaddr )
2005-04-17 02:20:36 +04:00
{
const int zero = 0 ;
const unsigned int __tlb_flag = __cpu_tlb_flags ;
uaddr = ( uaddr & PAGE_MASK ) | ASID ( vma - > vm_mm ) ;
if ( tlb_flag ( TLB_WB ) )
2007-02-05 16:47:51 +03:00
dsb ( ) ;
2005-04-17 02:20:36 +04:00
if ( cpu_isset ( smp_processor_id ( ) , vma - > vm_mm - > cpu_vm_mask ) ) {
if ( tlb_flag ( TLB_V3_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c6, c0, 0 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_U_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 1 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_D_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 1 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_I_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 1 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( ! tlb_flag ( TLB_V4_I_PAGE ) & & tlb_flag ( TLB_V4_I_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
}
if ( tlb_flag ( TLB_V6_U_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 1 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_D_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 1 " : : " r " ( uaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_I_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 1 " : : " r " ( uaddr ) : " cc " ) ;
2007-02-05 16:47:51 +03:00
if ( tlb_flag ( TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID ) ) {
/* flush the branch target cache */
asm ( " mcr p15, 0, %0, c7, c5, 6 " : : " r " ( zero ) : " cc " ) ;
dsb ( ) ;
}
2005-04-17 02:20:36 +04:00
}
2005-06-28 16:40:39 +04:00
static inline void local_flush_tlb_kernel_page ( unsigned long kaddr )
2005-04-17 02:20:36 +04:00
{
const int zero = 0 ;
const unsigned int __tlb_flag = __cpu_tlb_flags ;
kaddr & = PAGE_MASK ;
if ( tlb_flag ( TLB_WB ) )
2007-02-05 16:47:51 +03:00
dsb ( ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V3_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c6, c0, 0 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_U_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 1 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_D_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 1 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V4_I_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 1 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( ! tlb_flag ( TLB_V4_I_PAGE ) & & tlb_flag ( TLB_V4_I_FULL ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 0 " : : " r " ( zero ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_U_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c7, 1 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_D_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c6, 1 " : : " r " ( kaddr ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_V6_I_PAGE ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c8, c5, 1 " : : " r " ( kaddr ) : " cc " ) ;
2006-03-07 17:42:27 +03:00
2007-02-05 16:47:51 +03:00
if ( tlb_flag ( TLB_V6_I_FULL | TLB_V6_D_FULL |
TLB_V6_I_PAGE | TLB_V6_D_PAGE |
TLB_V6_I_ASID | TLB_V6_D_ASID ) ) {
/* flush the branch target cache */
asm ( " mcr p15, 0, %0, c7, c5, 6 " : : " r " ( zero ) : " cc " ) ;
dsb ( ) ;
isb ( ) ;
}
2005-04-17 02:20:36 +04:00
}
/*
* flush_pmd_entry
*
* Flush a PMD entry ( word aligned , or double - word aligned ) to
* RAM if the TLB for the CPU we are running on requires this .
* This is typically used when we are creating PMD entries .
*
* clean_pmd_entry
*
* Clean ( but don ' t drain the write buffer ) if the CPU requires
* these operations . This is typically used when we are removing
* PMD entries .
*/
static inline void flush_pmd_entry ( pmd_t * pmd )
{
const unsigned int __tlb_flag = __cpu_tlb_flags ;
if ( tlb_flag ( TLB_DCLEAN ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c7, c10, 1 @ flush_pmd "
: : " r " ( pmd ) : " cc " ) ;
2008-06-23 00:45:04 +04:00
if ( tlb_flag ( TLB_L2CLEAN_FR ) )
asm ( " mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd "
: : " r " ( pmd ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
if ( tlb_flag ( TLB_WB ) )
2007-02-05 16:47:51 +03:00
dsb ( ) ;
2005-04-17 02:20:36 +04:00
}
static inline void clean_pmd_entry ( pmd_t * pmd )
{
const unsigned int __tlb_flag = __cpu_tlb_flags ;
if ( tlb_flag ( TLB_DCLEAN ) )
2006-08-30 18:02:08 +04:00
asm ( " mcr p15, 0, %0, c7, c10, 1 @ flush_pmd "
: : " r " ( pmd ) : " cc " ) ;
2008-06-23 00:45:04 +04:00
if ( tlb_flag ( TLB_L2CLEAN_FR ) )
asm ( " mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd "
: : " r " ( pmd ) : " cc " ) ;
2005-04-17 02:20:36 +04:00
}
# undef tlb_flag
# undef always_tlb_flags
# undef possible_tlb_flags
/*
* Convert calls to our calling convention .
*/
2005-06-28 16:40:39 +04:00
# define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
# define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
# ifndef CONFIG_SMP
# define flush_tlb_all local_flush_tlb_all
# define flush_tlb_mm local_flush_tlb_mm
# define flush_tlb_page local_flush_tlb_page
# define flush_tlb_kernel_page local_flush_tlb_kernel_page
# define flush_tlb_range local_flush_tlb_range
# define flush_tlb_kernel_range local_flush_tlb_kernel_range
# else
extern void flush_tlb_all ( void ) ;
extern void flush_tlb_mm ( struct mm_struct * mm ) ;
extern void flush_tlb_page ( struct vm_area_struct * vma , unsigned long uaddr ) ;
extern void flush_tlb_kernel_page ( unsigned long kaddr ) ;
extern void flush_tlb_range ( struct vm_area_struct * vma , unsigned long start , unsigned long end ) ;
extern void flush_tlb_kernel_range ( unsigned long start , unsigned long end ) ;
# endif
2005-04-17 02:20:36 +04:00
/*
* if PG_dcache_dirty is set for the page , we need to ensure that any
* cache entries for the kernels virtual memory range are written
* back to the page .
*/
extern void update_mmu_cache ( struct vm_area_struct * vma , unsigned long addr , pte_t pte ) ;
# endif
2006-02-25 00:41:25 +03:00
# endif /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
# endif