2005-04-16 15:20:36 -07:00
# ifndef _PARISC_CACHEFLUSH_H
# define _PARISC_CACHEFLUSH_H
# include <linux/mm.h>
2010-04-11 16:36:14 +00:00
# include <linux/uaccess.h>
2011-04-15 12:37:22 -05:00
# include <asm/tlbflush.h>
2005-04-16 15:20:36 -07:00
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately , that doesn ' t apply to PA - RISC . */
2006-12-12 05:51:54 -08:00
/* Internal implementation */
void flush_data_cache_local ( void * ) ; /* flushes local data-cache only */
void flush_instruction_cache_local ( void * ) ; /* flushes local code-cache only */
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2006-12-12 05:51:54 -08:00
void flush_data_cache ( void ) ; /* flushes data-cache only (all processors) */
void flush_instruction_cache ( void ) ; /* flushes i-cache only (all processors) */
2005-04-16 15:20:36 -07:00
# else
2006-12-12 05:51:54 -08:00
# define flush_data_cache() flush_data_cache_local(NULL)
# define flush_instruction_cache() flush_instruction_cache_local(NULL)
2005-04-16 15:20:36 -07:00
# endif
2006-12-12 17:14:57 +00:00
# define flush_cache_dup_mm(mm) flush_cache_mm(mm)
2006-12-12 05:51:54 -08:00
void flush_user_icache_range_asm ( unsigned long , unsigned long ) ;
void flush_kernel_icache_range_asm ( unsigned long , unsigned long ) ;
void flush_user_dcache_range_asm ( unsigned long , unsigned long ) ;
void flush_kernel_dcache_range_asm ( unsigned long , unsigned long ) ;
void flush_kernel_dcache_page_asm ( void * ) ;
void flush_kernel_icache_page ( void * ) ;
2006-12-15 09:29:39 -07:00
void flush_user_dcache_range ( unsigned long , unsigned long ) ;
void flush_user_icache_range ( unsigned long , unsigned long ) ;
2005-04-16 15:20:36 -07:00
2006-12-12 05:51:54 -08:00
/* Cache flush operations */
2005-04-16 15:20:36 -07:00
2006-12-12 05:51:54 -08:00
void flush_cache_all_local ( void ) ;
void flush_cache_all ( void ) ;
void flush_cache_mm ( struct mm_struct * mm ) ;
2005-04-16 15:20:36 -07:00
2011-01-20 12:54:18 -06:00
# define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
void flush_kernel_dcache_page_addr ( void * addr ) ;
static inline void flush_kernel_dcache_page ( struct page * page )
{
flush_kernel_dcache_page_addr ( page_address ( page ) ) ;
}
2006-12-12 05:51:54 -08:00
# define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm ( ( start ) , ( start ) + ( size ) ) ;
2010-01-25 11:42:21 -06:00
/* vmap range flushes and invalidates. Architecturally, we don't need
* the invalidate , because the CPU should refuse to speculate once an
* area has been flushed , so invalidate is left empty */
static inline void flush_kernel_vmap_range ( void * vaddr , int size )
{
unsigned long start = ( unsigned long ) vaddr ;
flush_kernel_dcache_range_asm ( start , start + size ) ;
}
static inline void invalidate_kernel_vmap_range ( void * vaddr , int size )
{
2011-01-20 12:54:18 -06:00
unsigned long start = ( unsigned long ) vaddr ;
void * cursor = vaddr ;
for ( ; cursor < vaddr + size ; cursor + = PAGE_SIZE ) {
struct page * page = vmalloc_to_page ( cursor ) ;
if ( test_and_clear_bit ( PG_dcache_dirty , & page - > flags ) )
flush_kernel_dcache_page ( page ) ;
}
flush_kernel_dcache_range_asm ( start , start + size ) ;
2010-01-25 11:42:21 -06:00
}
2005-04-16 15:20:36 -07:00
# define flush_cache_vmap(start, end) flush_cache_all()
# define flush_cache_vunmap(start, end) flush_cache_all()
2009-11-26 09:16:19 +01:00
# define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
2005-04-16 15:20:36 -07:00
extern void flush_dcache_page ( struct page * page ) ;
# define flush_dcache_mmap_lock(mapping) \
2008-07-25 19:45:32 -07:00
spin_lock_irq ( & ( mapping ) - > tree_lock )
2005-04-16 15:20:36 -07:00
# define flush_dcache_mmap_unlock(mapping) \
2008-07-25 19:45:32 -07:00
spin_unlock_irq ( & ( mapping ) - > tree_lock )
2005-04-16 15:20:36 -07:00
2006-12-12 05:51:54 -08:00
# define flush_icache_page(vma,page) do { \
flush_kernel_dcache_page ( page ) ; \
flush_kernel_icache_page ( page_address ( page ) ) ; \
} while ( 0 )
2005-04-16 15:20:36 -07:00
2006-12-12 05:51:54 -08:00
# define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm ( s , e ) ; \
flush_kernel_icache_range_asm ( s , e ) ; \
} while ( 0 )
2005-04-16 15:20:36 -07:00
# define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page ( vma , vaddr , page_to_pfn ( page ) ) ; \
memcpy ( dst , src , len ) ; \
flush_kernel_dcache_range_asm ( ( unsigned long ) dst , ( unsigned long ) dst + len ) ; \
} while ( 0 )
# define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page ( vma , vaddr , page_to_pfn ( page ) ) ; \
memcpy ( dst , src , len ) ; \
} while ( 0 )
2006-12-12 05:51:54 -08:00
void flush_cache_page ( struct vm_area_struct * vma , unsigned long vmaddr , unsigned long pfn ) ;
void flush_cache_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end ) ;
2006-01-13 13:21:06 -07:00
2010-12-22 10:22:11 -06:00
/* defined in pacache.S exported in cache.c used by flush_anon_page */
void flush_dcache_page_asm ( unsigned long phys_addr , unsigned long vaddr ) ;
2006-12-12 05:51:54 -08:00
# define ARCH_HAS_FLUSH_ANON_PAGE
2006-03-22 08:28:59 -07:00
static inline void
2006-12-30 22:24:19 +00:00
flush_anon_page ( struct vm_area_struct * vma , struct page * page , unsigned long vmaddr )
2006-03-22 08:28:59 -07:00
{
2011-04-15 12:37:22 -05:00
if ( PageAnon ( page ) ) {
flush_tlb_page ( vma , vmaddr ) ;
2010-12-22 10:22:11 -06:00
flush_dcache_page_asm ( page_to_phys ( page ) , vmaddr ) ;
2011-04-15 12:37:22 -05:00
}
2006-03-22 08:28:59 -07:00
}
2006-01-13 13:21:06 -07:00
# ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro ( void ) ;
2005-04-16 15:20:36 -07:00
# endif
2006-01-13 13:21:06 -07:00
2006-08-23 09:00:04 -07:00
# ifdef CONFIG_PA8X00
/* Only pa8800, pa8900 needs this */
2009-04-02 02:40:41 +00:00
# include <asm/kmap_types.h>
2006-08-23 09:00:04 -07:00
# define ARCH_HAS_KMAP
void kunmap_parisc ( void * addr ) ;
static inline void * kmap ( struct page * page )
{
might_sleep ( ) ;
return page_address ( page ) ;
}
# define kunmap(page) kunmap_parisc(page_address(page))
2010-10-28 10:14:41 -05:00
static inline void * __kmap_atomic ( struct page * page )
2010-04-11 16:36:14 +00:00
{
pagefault_disable ( ) ;
return page_address ( page ) ;
}
2006-08-23 09:00:04 -07:00
2010-10-28 10:14:41 -05:00
static inline void __kunmap_atomic ( void * addr )
2010-04-11 16:36:14 +00:00
{
kunmap_parisc ( addr ) ;
pagefault_enable ( ) ;
}
2006-08-23 09:00:04 -07:00
2010-10-28 10:14:41 -05:00
# define kmap_atomic_prot(page, prot) kmap_atomic(page)
# define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
2006-08-23 09:00:04 -07:00
# define kmap_atomic_to_page(ptr) virt_to_page(ptr)
# endif
2006-01-13 13:21:06 -07:00
# endif /* _PARISC_CACHEFLUSH_H */