2005-04-16 15:20:36 -07:00
# ifndef __ASM_SH_CACHEFLUSH_H
# define __ASM_SH_CACHEFLUSH_H
2007-07-31 17:07:28 +09:00
2005-04-16 15:20:36 -07:00
# ifdef __KERNEL__
2009-08-15 12:29:49 +09:00
# include <linux/mm.h>
2009-08-21 16:20:57 +09:00
/*
* Cache flushing :
*
* - flush_cache_all ( ) flushes entire cache
* - flush_cache_mm ( mm ) flushes the specified mm context ' s cache lines
* - flush_cache_dup mm ( mm ) handles cache flushing when forking
* - flush_cache_page ( mm , vmaddr , pfn ) flushes a single page
* - flush_cache_range ( vma , start , end ) flushes a range of pages
*
* - flush_dcache_page ( pg ) flushes ( wback & invalidates ) a page for dcache
* - flush_icache_range ( start , end ) flushes ( invalidates ) a range for icache
* - flush_icache_page ( vma , pg ) flushes ( invalidates ) a page for icache
* - flush_cache_sigtramp ( vaddr ) flushes the signal trampoline
*/
2009-08-21 17:23:14 +09:00
extern void ( * local_flush_cache_all ) ( void * args ) ;
extern void ( * local_flush_cache_mm ) ( void * args ) ;
extern void ( * local_flush_cache_dup_mm ) ( void * args ) ;
extern void ( * local_flush_cache_page ) ( void * args ) ;
extern void ( * local_flush_cache_range ) ( void * args ) ;
extern void ( * local_flush_dcache_page ) ( void * args ) ;
extern void ( * local_flush_icache_range ) ( void * args ) ;
extern void ( * local_flush_icache_page ) ( void * args ) ;
extern void ( * local_flush_cache_sigtramp ) ( void * args ) ;
static inline void cache_noop ( void * args ) { }
2009-08-21 16:20:57 +09:00
extern void ( * __flush_wback_region ) ( void * start , int size ) ;
extern void ( * __flush_purge_region ) ( void * start , int size ) ;
extern void ( * __flush_invalidate_region ) ( void * start , int size ) ;
2005-04-16 15:20:36 -07:00
2009-08-21 17:23:14 +09:00
extern void flush_cache_all ( void ) ;
extern void flush_cache_mm ( struct mm_struct * mm ) ;
extern void flush_cache_dup_mm ( struct mm_struct * mm ) ;
extern void flush_cache_page ( struct vm_area_struct * vma ,
unsigned long addr , unsigned long pfn ) ;
extern void flush_cache_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end ) ;
2009-11-26 09:16:19 +01:00
# define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
2009-08-21 17:23:14 +09:00
extern void flush_dcache_page ( struct page * page ) ;
extern void flush_icache_range ( unsigned long start , unsigned long end ) ;
extern void flush_icache_page ( struct vm_area_struct * vma ,
struct page * page ) ;
extern void flush_cache_sigtramp ( unsigned long address ) ;
struct flusher_data {
struct vm_area_struct * vma ;
unsigned long addr1 , addr2 ;
} ;
2009-08-04 16:02:43 +09:00
# define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page ( struct page * page , unsigned long ) ;
static inline void flush_anon_page ( struct vm_area_struct * vma ,
struct page * page , unsigned long vmaddr )
{
if ( boot_cpu_data . dcache . n_aliases & & PageAnon ( page ) )
__flush_anon_page ( page , vmaddr ) ;
}
2010-01-25 11:42:23 -06:00
static inline void flush_kernel_vmap_range ( void * addr , int size )
{
__flush_wback_region ( addr , size ) ;
}
static inline void invalidate_kernel_vmap_range ( void * addr , int size )
{
__flush_invalidate_region ( addr , size ) ;
}
2009-08-04 16:02:43 +09:00
2008-01-07 13:50:18 +09:00
# define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page ( struct page * page )
{
flush_dcache_page ( page ) ;
}
2007-11-05 16:18:16 +09:00
extern void copy_to_user_page ( struct vm_area_struct * vma ,
struct page * page , unsigned long vaddr , void * dst , const void * src ,
unsigned long len ) ;
2005-04-16 15:20:36 -07:00
2007-11-05 16:18:16 +09:00
extern void copy_from_user_page ( struct vm_area_struct * vma ,
struct page * page , unsigned long vaddr , void * dst , const void * src ,
unsigned long len ) ;
2010-03-04 16:47:30 +09:00
# define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
# define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
2005-04-16 15:20:36 -07:00
2009-08-15 11:25:32 +09:00
# define flush_dcache_mmap_lock(mapping) do { } while (0)
# define flush_dcache_mmap_unlock(mapping) do { } while (0)
2009-08-15 09:19:19 +09:00
void kmap_coherent_init ( void ) ;
void * kmap_coherent ( struct page * page , unsigned long addr ) ;
2009-09-03 17:21:10 +09:00
void kunmap_coherent ( void * kvaddr ) ;
2009-08-15 09:19:19 +09:00
2010-12-01 15:39:51 +09:00
# define PG_dcache_clean PG_arch_1
2009-08-15 09:49:32 +09:00
2009-08-15 11:05:42 +09:00
void cpu_cache_init ( void ) ;
2005-04-16 15:20:36 -07:00
# endif /* __KERNEL__ */
# endif /* __ASM_SH_CACHEFLUSH_H */