2005-04-17 02:20:36 +04:00
/*
* This file is subject to the terms and conditions of the GNU General Public
* License . See the file " COPYING " in the main directory of this archive
* for more details .
*
* Copyright ( C ) 1994 , 95 , 96 , 97 , 98 , 99 , 2000 , 01 , 02 , 03 by Ralf Baechle
* Copyright ( C ) 1999 , 2000 , 2001 Silicon Graphics , Inc .
*/
# ifndef _ASM_CACHEFLUSH_H
# define _ASM_CACHEFLUSH_H
/* Keep includes the same across arches. */
# include <linux/mm.h>
# include <asm/cpu-features.h>
/* Cache flushing:
*
* - flush_cache_all ( ) flushes entire cache
* - flush_cache_mm ( mm ) flushes the specified mm context ' s cache lines
2006-12-12 20:14:57 +03:00
* - flush_cache_dup mm ( mm ) handles cache flushing when forking
2005-04-17 02:20:36 +04:00
* - flush_cache_page ( mm , vmaddr , pfn ) flushes a single page
* - flush_cache_range ( vma , start , end ) flushes a range of pages
* - flush_icache_range ( start , end ) flush a range of instructions
* - flush_dcache_page ( pg ) flushes ( wback & invalidates ) a page for dcache
*
* MIPS specific flush operations :
*
* - flush_cache_sigtramp ( ) flush signal trampoline
* - flush_icache_all ( ) flush the entire instruction cache
* - flush_data_cache_page ( ) flushes a page from the data cache
*/
extern void ( * flush_cache_all ) ( void ) ;
extern void ( * __flush_cache_all ) ( void ) ;
extern void ( * flush_cache_mm ) ( struct mm_struct * mm ) ;
2006-12-12 20:14:57 +03:00
# define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
2005-04-17 02:20:36 +04:00
extern void ( * flush_cache_range ) ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end ) ;
extern void ( * flush_cache_page ) ( struct vm_area_struct * vma , unsigned long page , unsigned long pfn ) ;
extern void __flush_dcache_page ( struct page * page ) ;
static inline void flush_dcache_page ( struct page * page )
{
2006-08-12 19:40:08 +04:00
if ( cpu_has_dc_aliases | | ! cpu_has_ic_fills_f_dc )
2005-04-17 02:20:36 +04:00
__flush_dcache_page ( page ) ;
}
# define flush_dcache_mmap_lock(mapping) do { } while (0)
# define flush_dcache_mmap_unlock(mapping) do { } while (0)
2007-03-24 00:36:37 +03:00
# define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page ( struct page * , unsigned long ) ;
static inline void flush_anon_page ( struct vm_area_struct * vma ,
struct page * page , unsigned long vmaddr )
{
if ( cpu_has_dc_aliases & & PageAnon ( page ) )
__flush_anon_page ( page , vmaddr ) ;
}
2006-08-12 19:40:08 +04:00
static inline void flush_icache_page ( struct vm_area_struct * vma ,
struct page * page )
{
}
2006-01-28 20:27:51 +03:00
extern void ( * flush_icache_range ) ( unsigned long start , unsigned long end ) ;
2008-08-04 22:53:57 +04:00
extern void ( * local_flush_icache_range ) ( unsigned long start , unsigned long end ) ;
2008-04-05 18:13:23 +04:00
extern void ( * __flush_cache_vmap ) ( void ) ;
static inline void flush_cache_vmap ( unsigned long start , unsigned long end )
{
if ( cpu_has_dc_aliases )
__flush_cache_vmap ( ) ;
}
extern void ( * __flush_cache_vunmap ) ( void ) ;
static inline void flush_cache_vunmap ( unsigned long start , unsigned long end )
{
if ( cpu_has_dc_aliases )
__flush_cache_vunmap ( ) ;
}
2005-04-17 02:20:36 +04:00
2006-10-22 02:17:35 +04:00
extern void copy_to_user_page ( struct vm_area_struct * vma ,
2005-03-18 20:36:42 +03:00
struct page * page , unsigned long vaddr , void * dst , const void * src ,
2006-10-22 02:17:35 +04:00
unsigned long len ) ;
2005-03-18 20:36:42 +03:00
2006-10-22 02:17:35 +04:00
extern void copy_from_user_page ( struct vm_area_struct * vma ,
2005-03-18 20:36:42 +03:00
struct page * page , unsigned long vaddr , void * dst , const void * src ,
2006-10-22 02:17:35 +04:00
unsigned long len ) ;
2005-04-17 02:20:36 +04:00
extern void ( * flush_cache_sigtramp ) ( unsigned long addr ) ;
extern void ( * flush_icache_all ) ( void ) ;
2006-04-05 23:42:04 +04:00
extern void ( * local_flush_data_cache_page ) ( void * addr ) ;
2005-04-17 02:20:36 +04:00
extern void ( * flush_data_cache_page ) ( unsigned long addr ) ;
/*
* This flag is used to indicate that the page pointed to by a pte
* is dirty and requires cleaning before returning it to the user .
*/
# define PG_dcache_dirty PG_arch_1
# define Page_dcache_dirty(page) \
test_bit ( PG_dcache_dirty , & ( page ) - > flags )
# define SetPageDcacheDirty(page) \
set_bit ( PG_dcache_dirty , & ( page ) - > flags )
# define ClearPageDcacheDirty(page) \
clear_bit ( PG_dcache_dirty , & ( page ) - > flags )
2005-04-25 20:36:23 +04:00
/* Run kernel code uncached, useful for cache probing functions. */
2008-03-08 12:56:28 +03:00
unsigned long run_uncached ( void * func ) ;
2005-04-25 20:36:23 +04:00
2007-03-24 00:36:37 +03:00
extern void * kmap_coherent ( struct page * page , unsigned long addr ) ;
2007-04-26 18:46:25 +04:00
extern void kunmap_coherent ( void ) ;
2007-03-24 00:36:37 +03:00
2005-04-17 02:20:36 +04:00
# endif /* _ASM_CACHEFLUSH_H */