2005-04-17 02:20:36 +04:00
# ifndef _PARISC_TLBFLUSH_H
# define _PARISC_TLBFLUSH_H
/* TLB flushing routines.... */
# include <linux/mm.h>
# include <asm/mmu_context.h>
2005-10-22 06:40:24 +04:00
/* This is for the serialisation of PxTLB broadcasts. At least on the
* N class systems , only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus . This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
2005-11-18 00:44:14 +03:00
* it on all SMP systems not just the N class . We also need to have
* preemption disabled on uniprocessor machines , and spin_lock does that
* nicely .
*/
2005-10-22 06:40:24 +04:00
extern spinlock_t pa_tlb_lock ;
# define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
# define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
2005-04-17 02:20:36 +04:00
extern void flush_tlb_all ( void ) ;
2006-01-11 04:47:49 +03:00
extern void flush_tlb_all_local ( void * ) ;
2005-04-17 02:20:36 +04:00
/*
* flush_tlb_mm ( )
*
* XXX This code is NOT valid for HP - UX compatibility processes ,
* ( although it will probably work 99 % of the time ) . HP - UX
* processes are free to play with the space id ' s and save them
* over long periods of time , etc . so we have to preserve the
* space and just flush the entire tlb . We need to check the
* personality in order to do that , but the personality is not
* currently being set correctly .
*
* Of course , Linux processes could do the same thing , but
* we don ' t support that ( and the compilers , dynamic linker ,
* etc . do not do that ) .
*/
2006-12-12 03:07:51 +03:00
static inline void flush_tlb_mm ( struct mm_struct * mm )
{
2007-02-18 22:35:45 +03:00
BUG_ON ( mm = = & init_mm ) ; /* Should never happen */
# ifdef CONFIG_SMP
flush_tlb_all ( ) ;
# else
if ( mm ) {
if ( mm - > context ! = 0 )
free_sid ( mm - > context ) ;
mm - > context = alloc_sid ( ) ;
if ( mm = = current - > active_mm )
load_context ( mm - > context ) ;
}
# endif
2005-04-17 02:20:36 +04:00
}
extern __inline__ void flush_tlb_pgtables ( struct mm_struct * mm , unsigned long start , unsigned long end )
{
}
static inline void flush_tlb_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
/* For one page, it's not worth testing the split_tlb variable */
mb ( ) ;
mtsp ( vma - > vm_mm - > context , 1 ) ;
purge_tlb_start ( ) ;
pdtlb ( addr ) ;
pitlb ( addr ) ;
purge_tlb_end ( ) ;
}
2006-12-12 16:51:54 +03:00
void __flush_tlb_range ( unsigned long sid ,
unsigned long start , unsigned long end ) ;
2005-04-17 02:20:36 +04:00
2006-12-12 16:51:54 +03:00
# define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
2005-04-17 02:20:36 +04:00
2006-12-12 16:51:54 +03:00
# define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
2005-04-17 02:20:36 +04:00
# endif