2005-04-16 15:20:36 -07:00
/*
* TLB support routines .
*
* Copyright ( C ) 1998 - 2001 , 2003 Hewlett - Packard Co
* David Mosberger - Tang < davidm @ hpl . hp . com >
*
* 08 / 02 / 00 A . Mallick < asit . k . mallick @ intel . com >
* Modified RID allocation for SMP
* Goutham Rao < goutham . rao @ intel . com >
* IPI based ptc implementation and A - step IPI implementation .
2005-10-31 16:44:47 -05:00
* Rohit Seth < rohit . seth @ intel . com >
* Ken Chen < kenneth . w . chen @ intel . com >
2007-12-13 15:03:07 +00:00
* Christophe de Dinechin < ddd @ hp . com > : Avoid ptc . e on memory allocation
2005-04-16 15:20:36 -07:00
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/sched.h>
# include <linux/smp.h>
# include <linux/mm.h>
2005-10-31 16:44:47 -05:00
# include <linux/bootmem.h>
2005-04-16 15:20:36 -07:00
# include <asm/delay.h>
# include <asm/mmu_context.h>
# include <asm/pgalloc.h>
# include <asm/pal.h>
# include <asm/tlbflush.h>
2005-10-31 16:44:47 -05:00
# include <asm/dma.h>
2005-04-16 15:20:36 -07:00
static struct {
unsigned long mask ; /* mask of supported purge page-sizes */
2005-10-29 18:47:04 -07:00
unsigned long max_bits ; /* log2 of largest supported purge page-size */
2005-04-16 15:20:36 -07:00
} purge ;
struct ia64_ctx ia64_ctx = {
2007-04-15 22:51:23 +05:30
. lock = __SPIN_LOCK_UNLOCKED ( ia64_ctx . lock ) ,
. next = 1 ,
. max_ctx = ~ 0U
2005-04-16 15:20:36 -07:00
} ;
DEFINE_PER_CPU ( u8 , ia64_need_tlb_flush ) ;
2005-10-31 16:44:47 -05:00
/*
* Initializes the ia64_ctx . bitmap array based on max_ctx + 1.
* Called after cpu_init ( ) has setup ia64_ctx . max_ctx based on
* maximum RID that is supported by boot CPU .
*/
void __init
mmu_context_init ( void )
{
ia64_ctx . bitmap = alloc_bootmem ( ( ia64_ctx . max_ctx + 1 ) > > 3 ) ;
ia64_ctx . flushmap = alloc_bootmem ( ( ia64_ctx . max_ctx + 1 ) > > 3 ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Acquire the ia64_ctx . lock before calling this function !
*/
void
wrap_mmu_context ( struct mm_struct * mm )
{
2005-10-29 18:47:04 -07:00
int i , cpu ;
2005-10-31 16:44:47 -05:00
unsigned long flush_bit ;
2005-04-16 15:20:36 -07:00
2005-10-31 16:44:47 -05:00
for ( i = 0 ; i < = ia64_ctx . max_ctx / BITS_PER_LONG ; i + + ) {
flush_bit = xchg ( & ia64_ctx . flushmap [ i ] , 0 ) ;
ia64_ctx . bitmap [ i ] ^ = flush_bit ;
2005-04-16 15:20:36 -07:00
}
2005-10-31 16:44:47 -05:00
/* use offset at 300 to skip daemons */
ia64_ctx . next = find_next_zero_bit ( ia64_ctx . bitmap ,
ia64_ctx . max_ctx , 300 ) ;
ia64_ctx . limit = find_next_bit ( ia64_ctx . bitmap ,
ia64_ctx . max_ctx , ia64_ctx . next ) ;
2005-10-29 18:47:04 -07:00
/*
* can ' t call flush_tlb_all ( ) here because of race condition
* with O ( 1 ) scheduler [ EF ]
*/
cpu = get_cpu ( ) ; /* prevent preemption/migration */
for_each_online_cpu ( i )
if ( i ! = cpu )
per_cpu ( ia64_need_tlb_flush , i ) = 1 ;
put_cpu ( ) ;
2005-04-16 15:20:36 -07:00
local_flush_tlb_all ( ) ;
}
void
2005-10-29 18:47:04 -07:00
ia64_global_tlb_purge ( struct mm_struct * mm , unsigned long start ,
unsigned long end , unsigned long nbits )
2005-04-16 15:20:36 -07:00
{
static DEFINE_SPINLOCK ( ptcg_lock ) ;
2007-12-13 15:03:07 +00:00
struct mm_struct * active_mm = current - > active_mm ;
if ( mm ! = active_mm ) {
/* Restore region IDs for mm */
if ( mm & & active_mm ) {
activate_context ( mm ) ;
} else {
flush_tlb_all ( ) ;
return ;
}
2005-10-27 15:41:04 -05:00
}
2005-04-16 15:20:36 -07:00
/* HW requires global serialization of ptc.ga. */
spin_lock ( & ptcg_lock ) ;
{
do {
/*
* Flush ALAT entries also .
*/
ia64_ptcga ( start , ( nbits < < 2 ) ) ;
ia64_srlz_i ( ) ;
start + = ( 1UL < < nbits ) ;
} while ( start < end ) ;
}
spin_unlock ( & ptcg_lock ) ;
2007-12-13 15:03:07 +00:00
if ( mm ! = active_mm ) {
activate_context ( active_mm ) ;
}
2005-04-16 15:20:36 -07:00
}
void
local_flush_tlb_all ( void )
{
unsigned long i , j , flags , count0 , count1 , stride0 , stride1 , addr ;
addr = local_cpu_data - > ptce_base ;
count0 = local_cpu_data - > ptce_count [ 0 ] ;
count1 = local_cpu_data - > ptce_count [ 1 ] ;
stride0 = local_cpu_data - > ptce_stride [ 0 ] ;
stride1 = local_cpu_data - > ptce_stride [ 1 ] ;
local_irq_save ( flags ) ;
for ( i = 0 ; i < count0 ; + + i ) {
for ( j = 0 ; j < count1 ; + + j ) {
ia64_ptce ( addr ) ;
addr + = stride1 ;
}
addr + = stride0 ;
}
local_irq_restore ( flags ) ;
ia64_srlz_i ( ) ; /* srlz.i implies srlz.d */
}
void
2005-10-29 18:47:04 -07:00
flush_tlb_range ( struct vm_area_struct * vma , unsigned long start ,
unsigned long end )
2005-04-16 15:20:36 -07:00
{
struct mm_struct * mm = vma - > vm_mm ;
unsigned long size = end - start ;
unsigned long nbits ;
2005-10-27 15:41:04 -05:00
# ifndef CONFIG_SMP
2005-04-16 15:20:36 -07:00
if ( mm ! = current - > active_mm ) {
mm - > context = 0 ;
return ;
}
2005-10-27 15:41:04 -05:00
# endif
2005-04-16 15:20:36 -07:00
nbits = ia64_fls ( size + 0xfff ) ;
2005-10-29 18:47:04 -07:00
while ( unlikely ( ( ( 1UL < < nbits ) & purge . mask ) = = 0 ) & &
( nbits < purge . max_bits ) )
2005-04-16 15:20:36 -07:00
+ + nbits ;
if ( nbits > purge . max_bits )
nbits = purge . max_bits ;
start & = ~ ( ( 1UL < < nbits ) - 1 ) ;
2005-10-29 18:16:28 -07:00
preempt_disable ( ) ;
2006-03-06 14:12:54 -08:00
# ifdef CONFIG_SMP
if ( mm ! = current - > active_mm | | cpus_weight ( mm - > cpu_vm_mask ) ! = 1 ) {
platform_global_tlb_purge ( mm , start , end , nbits ) ;
preempt_enable ( ) ;
return ;
}
# endif
2005-04-16 15:20:36 -07:00
do {
ia64_ptcl ( start , ( nbits < < 2 ) ) ;
start + = ( 1UL < < nbits ) ;
} while ( start < end ) ;
2005-10-29 18:16:28 -07:00
preempt_enable ( ) ;
2005-04-16 15:20:36 -07:00
ia64_srlz_i ( ) ; /* srlz.i implies srlz.d */
}
EXPORT_SYMBOL ( flush_tlb_range ) ;
void __devinit
ia64_tlb_init ( void )
{
2007-07-11 17:26:30 +02:00
ia64_ptce_info_t uninitialized_var ( ptce_info ) ; /* GCC be quiet */
2005-04-16 15:20:36 -07:00
unsigned long tr_pgbits ;
long status ;
if ( ( status = ia64_pal_vm_page_size ( & tr_pgbits , & purge . mask ) ) ! = 0 ) {
2007-11-19 17:47:53 -08:00
printk ( KERN_ERR " PAL_VM_PAGE_SIZE failed with status=%ld; "
2005-04-16 15:20:36 -07:00
" defaulting to architected purge page-sizes. \n " , status ) ;
purge . mask = 0x115557000UL ;
}
purge . max_bits = ia64_fls ( purge . mask ) ;
ia64_get_ptce ( & ptce_info ) ;
local_cpu_data - > ptce_base = ptce_info . base ;
local_cpu_data - > ptce_count [ 0 ] = ptce_info . count [ 0 ] ;
local_cpu_data - > ptce_count [ 1 ] = ptce_info . count [ 1 ] ;
local_cpu_data - > ptce_stride [ 0 ] = ptce_info . stride [ 0 ] ;
local_cpu_data - > ptce_stride [ 1 ] = ptce_info . stride [ 1 ] ;
2005-10-29 18:47:04 -07:00
local_flush_tlb_all ( ) ; /* nuke left overs from bootstrapping... */
2005-04-16 15:20:36 -07:00
}