2005-04-17 02:20:36 +04:00
# ifndef _PARISC_TLBFLUSH_H
# define _PARISC_TLBFLUSH_H
/* TLB flushing routines.... */
# include <linux/config.h>
# include <linux/mm.h>
# include <asm/mmu_context.h>
2005-10-22 06:40:24 +04:00
/* This is for the serialisation of PxTLB broadcasts. At least on the
* N class systems , only one PxTLB inter processor broadcast can be
* active at any one time on the Merced bus . This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
2005-11-18 00:44:14 +03:00
* it on all SMP systems not just the N class . We also need to have
* preemption disabled on uniprocessor machines , and spin_lock does that
* nicely .
*/
2005-10-22 06:40:24 +04:00
extern spinlock_t pa_tlb_lock ;
# define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
# define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
2005-04-17 02:20:36 +04:00
extern void flush_tlb_all ( void ) ;
2006-01-11 04:47:49 +03:00
extern void flush_tlb_all_local ( void * ) ;
2005-04-17 02:20:36 +04:00
/*
* flush_tlb_mm ( )
*
* XXX This code is NOT valid for HP - UX compatibility processes ,
* ( although it will probably work 99 % of the time ) . HP - UX
* processes are free to play with the space id ' s and save them
* over long periods of time , etc . so we have to preserve the
* space and just flush the entire tlb . We need to check the
* personality in order to do that , but the personality is not
* currently being set correctly .
*
* Of course , Linux processes could do the same thing , but
* we don ' t support that ( and the compilers , dynamic linker ,
* etc . do not do that ) .
*/
static inline void flush_tlb_mm ( struct mm_struct * mm )
{
BUG_ON ( mm = = & init_mm ) ; /* Should never happen */
# ifdef CONFIG_SMP
flush_tlb_all ( ) ;
# else
if ( mm ) {
if ( mm - > context ! = 0 )
free_sid ( mm - > context ) ;
mm - > context = alloc_sid ( ) ;
if ( mm = = current - > active_mm )
load_context ( mm - > context ) ;
}
# endif
}
extern __inline__ void flush_tlb_pgtables ( struct mm_struct * mm , unsigned long start , unsigned long end )
{
}
static inline void flush_tlb_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
/* For one page, it's not worth testing the split_tlb variable */
mb ( ) ;
mtsp ( vma - > vm_mm - > context , 1 ) ;
purge_tlb_start ( ) ;
pdtlb ( addr ) ;
pitlb ( addr ) ;
purge_tlb_end ( ) ;
}
static inline void flush_tlb_range ( struct vm_area_struct * vma ,
unsigned long start , unsigned long end )
{
unsigned long npages ;
npages = ( ( end - ( start & PAGE_MASK ) ) + ( PAGE_SIZE - 1 ) ) > > PAGE_SHIFT ;
2005-10-22 06:40:07 +04:00
if ( npages > = 512 ) /* 2MB of space: arbitrary, should be tuned */
2005-04-17 02:20:36 +04:00
flush_tlb_all ( ) ;
else {
mtsp ( vma - > vm_mm - > context , 1 ) ;
2005-10-22 06:40:07 +04:00
purge_tlb_start ( ) ;
2005-04-17 02:20:36 +04:00
if ( split_tlb ) {
while ( npages - - ) {
pdtlb ( start ) ;
pitlb ( start ) ;
start + = PAGE_SIZE ;
}
} else {
while ( npages - - ) {
pdtlb ( start ) ;
start + = PAGE_SIZE ;
}
}
2005-10-22 06:40:07 +04:00
purge_tlb_end ( ) ;
2005-04-17 02:20:36 +04:00
}
}
# define flush_tlb_kernel_range(start, end) flush_tlb_all()
# endif