2005-04-16 15:20:36 -07:00
/* arch/sparc64/mm/tlb.c
*
* Copyright ( C ) 2004 David S . Miller < davem @ redhat . com >
*/
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/percpu.h>
# include <linux/mm.h>
# include <linux/swap.h>
2006-04-30 22:54:27 -07:00
# include <linux/preempt.h>
2005-04-16 15:20:36 -07:00
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
# include <asm/mmu_context.h>
# include <asm/tlb.h>
/* Heavily inspired by the ppc64 code. */
2011-05-24 17:11:50 -07:00
static DEFINE_PER_CPU ( struct tlb_batch , tlb_batch ) ;
2005-04-16 15:20:36 -07:00
void flush_tlb_pending ( void )
{
2011-05-24 17:11:50 -07:00
struct tlb_batch * tb = & get_cpu_var ( tlb_batch ) ;
2005-04-16 15:20:36 -07:00
2011-05-24 17:11:50 -07:00
if ( tb - > tlb_nr ) {
flush_tsb_user ( tb ) ;
2006-01-31 18:29:18 -08:00
2011-05-24 17:11:50 -07:00
if ( CTX_VALID ( tb - > mm - > context ) ) {
2005-04-16 15:20:36 -07:00
# ifdef CONFIG_SMP
2011-05-24 17:11:50 -07:00
smp_flush_tlb_pending ( tb - > mm , tb - > tlb_nr ,
& tb - > vaddrs [ 0 ] ) ;
2005-04-16 15:20:36 -07:00
# else
2011-05-24 17:11:50 -07:00
__flush_tlb_pending ( CTX_HWBITS ( tb - > mm - > context ) ,
tb - > tlb_nr , & tb - > vaddrs [ 0 ] ) ;
2005-04-16 15:20:36 -07:00
# endif
}
2011-05-24 17:11:50 -07:00
tb - > tlb_nr = 0 ;
2005-04-16 15:20:36 -07:00
}
2006-04-30 22:54:27 -07:00
2011-05-24 17:11:50 -07:00
put_cpu_var ( tlb_batch ) ;
2005-04-16 15:20:36 -07:00
}
2012-10-08 16:34:29 -07:00
static void tlb_batch_add_one ( struct mm_struct * mm , unsigned long vaddr ,
bool exec )
2005-04-16 15:20:36 -07:00
{
2011-05-24 17:11:50 -07:00
struct tlb_batch * tb = & get_cpu_var ( tlb_batch ) ;
2005-04-16 15:20:36 -07:00
unsigned long nr ;
vaddr & = PAGE_MASK ;
2012-10-08 16:34:29 -07:00
if ( exec )
2005-04-16 15:20:36 -07:00
vaddr | = 0x1UL ;
2012-10-08 16:34:29 -07:00
nr = tb - > tlb_nr ;
if ( unlikely ( nr ! = 0 & & mm ! = tb - > mm ) ) {
flush_tlb_pending ( ) ;
nr = 0 ;
}
if ( nr = = 0 )
tb - > mm = mm ;
tb - > vaddrs [ nr ] = vaddr ;
tb - > tlb_nr = + + nr ;
if ( nr > = TLB_BATCH_NR )
flush_tlb_pending ( ) ;
put_cpu_var ( tlb_batch ) ;
}
void tlb_batch_add ( struct mm_struct * mm , unsigned long vaddr ,
pte_t * ptep , pte_t orig , int fullmm )
{
2006-02-26 19:44:50 -08:00
if ( tlb_type ! = hypervisor & &
pte_dirty ( orig ) ) {
2005-04-16 15:20:36 -07:00
unsigned long paddr , pfn = pte_pfn ( orig ) ;
struct address_space * mapping ;
struct page * page ;
if ( ! pfn_valid ( pfn ) )
goto no_cache_flush ;
page = pfn_to_page ( pfn ) ;
if ( PageReserved ( page ) )
goto no_cache_flush ;
/* A real file page? */
mapping = page_mapping ( page ) ;
if ( ! mapping )
goto no_cache_flush ;
paddr = ( unsigned long ) page_address ( page ) ;
if ( ( paddr ^ vaddr ) & ( 1 < < 13 ) )
flush_dcache_page_all ( mm , page ) ;
}
no_cache_flush :
2012-10-08 16:34:29 -07:00
if ( ! fullmm )
tlb_batch_add_one ( mm , vaddr , pte_exec ( orig ) ) ;
}
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void tlb_batch_pmd_scan ( struct mm_struct * mm , unsigned long vaddr ,
pmd_t pmd , bool exec )
{
unsigned long end ;
pte_t * pte ;
pte = pte_offset_map ( & pmd , vaddr ) ;
end = vaddr + HPAGE_SIZE ;
while ( vaddr < end ) {
if ( pte_val ( * pte ) & _PAGE_VALID )
tlb_batch_add_one ( mm , vaddr , exec ) ;
pte + + ;
vaddr + = PAGE_SIZE ;
}
pte_unmap ( pte ) ;
}
2005-04-16 15:20:36 -07:00
2012-10-08 16:34:29 -07:00
void set_pmd_at ( struct mm_struct * mm , unsigned long addr ,
pmd_t * pmdp , pmd_t pmd )
{
pmd_t orig = * pmdp ;
* pmdp = pmd ;
if ( mm = = & init_mm )
2005-04-16 15:20:36 -07:00
return ;
2012-10-08 16:34:29 -07:00
if ( ( pmd_val ( pmd ) ^ pmd_val ( orig ) ) & PMD_ISHUGE ) {
if ( pmd_val ( pmd ) & PMD_ISHUGE )
mm - > context . huge_pte_count + + ;
else
mm - > context . huge_pte_count - - ;
if ( mm - > context . huge_pte_count = = 1 )
hugetlb_setup ( mm ) ;
2011-05-24 17:11:50 -07:00
}
2005-04-16 15:20:36 -07:00
2012-10-08 16:34:29 -07:00
if ( ! pmd_none ( orig ) ) {
bool exec = ( ( pmd_val ( orig ) & PMD_HUGE_EXEC ) ! = 0 ) ;
2005-04-16 15:20:36 -07:00
2012-10-08 16:34:29 -07:00
addr & = HPAGE_MASK ;
if ( pmd_val ( orig ) & PMD_ISHUGE )
tlb_batch_add_one ( mm , addr , exec ) ;
else
tlb_batch_pmd_scan ( mm , addr , orig , exec ) ;
2005-04-16 15:20:36 -07:00
}
2012-10-08 16:34:29 -07:00
}
2005-04-16 15:20:36 -07:00
2012-10-08 16:34:29 -07:00
void pgtable_trans_huge_deposit ( struct mm_struct * mm , pgtable_t pgtable )
{
struct list_head * lh = ( struct list_head * ) pgtable ;
2005-04-16 15:20:36 -07:00
2012-10-08 16:34:29 -07:00
assert_spin_locked ( & mm - > page_table_lock ) ;
2011-05-24 17:11:50 -07:00
2012-10-08 16:34:29 -07:00
/* FIFO */
if ( ! mm - > pmd_huge_pte )
INIT_LIST_HEAD ( lh ) ;
else
list_add ( lh , ( struct list_head * ) mm - > pmd_huge_pte ) ;
mm - > pmd_huge_pte = pgtable ;
}
pgtable_t pgtable_trans_huge_withdraw ( struct mm_struct * mm )
{
struct list_head * lh ;
pgtable_t pgtable ;
assert_spin_locked ( & mm - > page_table_lock ) ;
/* FIFO */
pgtable = mm - > pmd_huge_pte ;
lh = ( struct list_head * ) pgtable ;
if ( list_empty ( lh ) )
mm - > pmd_huge_pte = NULL ;
else {
mm - > pmd_huge_pte = ( pgtable_t ) lh - > next ;
list_del ( lh ) ;
}
pte_val ( pgtable [ 0 ] ) = 0 ;
pte_val ( pgtable [ 1 ] ) = 0 ;
return pgtable ;
2005-04-16 15:20:36 -07:00
}
2012-10-08 16:34:29 -07:00
# endif /* CONFIG_TRANSPARENT_HUGEPAGE */