2011-01-14 02:46:40 +03:00
/*
* mm / pgtable - generic . c
*
* Generic pgtable methods declared in asm - generic / pgtable . h
*
* Copyright ( C ) 2010 Linus Torvalds
*/
2011-01-26 02:07:11 +03:00
# include <linux/pagemap.h>
2011-01-14 02:46:40 +03:00
# include <asm/tlb.h>
# include <asm-generic/pgtable.h>
2013-09-12 01:21:28 +04:00
/*
* If a p ? d_bad entry is found while walking page tables , report
* the error , before resetting entry to p ? d_none . Usually ( but
* very seldom ) called out from the p ? d_none_or_clear_bad macros .
*/
void pgd_clear_bad ( pgd_t * pgd )
{
pgd_ERROR ( * pgd ) ;
pgd_clear ( pgd ) ;
}
void pud_clear_bad ( pud_t * pud )
{
pud_ERROR ( * pud ) ;
pud_clear ( pud ) ;
}
void pmd_clear_bad ( pmd_t * pmd )
{
pmd_ERROR ( * pmd ) ;
pmd_clear ( pmd ) ;
}
2011-01-14 02:46:40 +03:00
# ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
/*
2012-11-06 13:56:01 +04:00
* Only sets the access flags ( dirty , accessed ) , as well as write
* permission . Furthermore , we know it always gets set to a " more
2011-01-14 02:46:40 +03:00
* permissive " setting, which allows most architectures to optimize
* this . We return whether the PTE actually changed , which in turn
* instructs the caller to do things like update__mmu_cache . This
* used to be done in the caller , but sparc needs minor faults to
* force that call on sun4c so we changed this macro slightly
*/
int ptep_set_access_flags ( struct vm_area_struct * vma ,
unsigned long address , pte_t * ptep ,
pte_t entry , int dirty )
{
int changed = ! pte_same ( * ptep , entry ) ;
if ( changed ) {
set_pte_at ( vma - > vm_mm , address , ptep , entry ) ;
2012-11-06 13:56:01 +04:00
flush_tlb_fix_spurious_fault ( vma , address ) ;
2011-01-14 02:46:40 +03:00
}
return changed ;
}
# endif
2015-07-09 14:49:30 +03:00
# ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
int ptep_clear_flush_young ( struct vm_area_struct * vma ,
unsigned long address , pte_t * ptep )
{
int young ;
young = ptep_test_and_clear_young ( vma , address , ptep ) ;
if ( young )
flush_tlb_page ( vma , address ) ;
return young ;
}
# endif
# ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
pte_t ptep_clear_flush ( struct vm_area_struct * vma , unsigned long address ,
pte_t * ptep )
{
struct mm_struct * mm = ( vma ) - > vm_mm ;
pte_t pte ;
pte = ptep_get_and_clear ( mm , address , ptep ) ;
if ( pte_accessible ( mm , pte ) )
flush_tlb_page ( vma , address ) ;
return pte ;
}
# endif
2015-07-09 14:52:44 +03:00
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
2015-02-20 08:06:28 +03:00
# ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
/*
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this . Otherwise also , it can help optimize normal TLB flush in
* THP regime . stock flush_tlb_range ( ) typically has optimization to nuke the
* entire TLB TLB if flush span is greater than a threshhold , which will
* likely be true for a single huge page . Thus a single thp flush will
* invalidate the entire TLB which is not desitable .
* e . g . see arch / arc : flush_pmd_tlb_range
*/
# define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
# endif
2011-01-14 02:46:40 +03:00
# ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
int pmdp_set_access_flags ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ,
pmd_t entry , int dirty )
{
int changed = ! pmd_same ( * pmdp , entry ) ;
VM_BUG_ON ( address & ~ HPAGE_PMD_MASK ) ;
if ( changed ) {
set_pmd_at ( vma - > vm_mm , address , pmdp , entry ) ;
2015-02-20 08:06:28 +03:00
flush_pmd_tlb_range ( vma , address , address + HPAGE_PMD_SIZE ) ;
2011-01-14 02:46:40 +03:00
}
return changed ;
}
# endif
# ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
int pmdp_clear_flush_young ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp )
{
int young ;
2012-03-22 03:34:27 +04:00
VM_BUG_ON ( address & ~ HPAGE_PMD_MASK ) ;
2011-01-14 02:46:40 +03:00
young = pmdp_test_and_clear_young ( vma , address , pmdp ) ;
if ( young )
2015-02-20 08:06:28 +03:00
flush_pmd_tlb_range ( vma , address , address + HPAGE_PMD_SIZE ) ;
2011-01-14 02:46:40 +03:00
return young ;
}
# endif
2015-06-25 02:57:44 +03:00
# ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmd_t pmdp_huge_clear_flush ( struct vm_area_struct * vma , unsigned long address ,
pmd_t * pmdp )
2011-01-14 02:46:40 +03:00
{
pmd_t pmd ;
VM_BUG_ON ( address & ~ HPAGE_PMD_MASK ) ;
2016-01-16 03:56:52 +03:00
VM_BUG_ON ( ! pmd_trans_huge ( * pmdp ) & & ! pmd_devmap ( * pmdp ) ) ;
2015-06-25 02:57:44 +03:00
pmd = pmdp_huge_get_and_clear ( vma - > vm_mm , address , pmdp ) ;
2015-02-20 08:06:28 +03:00
flush_pmd_tlb_range ( vma , address , address + HPAGE_PMD_SIZE ) ;
2011-01-14 02:46:40 +03:00
return pmd ;
}
# endif
2012-10-09 03:30:07 +04:00
# ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
2013-06-06 04:14:02 +04:00
void pgtable_trans_huge_deposit ( struct mm_struct * mm , pmd_t * pmdp ,
pgtable_t pgtable )
2012-10-09 03:30:07 +04:00
{
2013-11-15 02:31:04 +04:00
assert_spin_locked ( pmd_lockptr ( mm , pmdp ) ) ;
2012-10-09 03:30:07 +04:00
/* FIFO */
2013-11-15 02:30:59 +04:00
if ( ! pmd_huge_pte ( mm , pmdp ) )
2012-10-09 03:30:07 +04:00
INIT_LIST_HEAD ( & pgtable - > lru ) ;
else
2013-11-15 02:30:59 +04:00
list_add ( & pgtable - > lru , & pmd_huge_pte ( mm , pmdp ) - > lru ) ;
pmd_huge_pte ( mm , pmdp ) = pgtable ;
2012-10-09 03:30:07 +04:00
}
# endif
# ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
/* no "address" argument so destroys page coloring of some arch */
2013-06-06 04:14:02 +04:00
pgtable_t pgtable_trans_huge_withdraw ( struct mm_struct * mm , pmd_t * pmdp )
2012-10-09 03:30:07 +04:00
{
pgtable_t pgtable ;
2013-11-15 02:31:04 +04:00
assert_spin_locked ( pmd_lockptr ( mm , pmdp ) ) ;
2012-10-09 03:30:07 +04:00
/* FIFO */
2013-11-15 02:30:59 +04:00
pgtable = pmd_huge_pte ( mm , pmdp ) ;
2016-01-15 02:19:32 +03:00
pmd_huge_pte ( mm , pmdp ) = list_first_entry_or_null ( & pgtable - > lru ,
struct page , lru ) ;
if ( pmd_huge_pte ( mm , pmdp ) )
2012-10-09 03:30:07 +04:00
list_del ( & pgtable - > lru ) ;
return pgtable ;
}
# endif
2012-10-09 03:30:09 +04:00
# ifndef __HAVE_ARCH_PMDP_INVALIDATE
void pmdp_invalidate ( struct vm_area_struct * vma , unsigned long address ,
pmd_t * pmdp )
{
2013-12-19 05:08:34 +04:00
pmd_t entry = * pmdp ;
2014-08-30 02:18:33 +04:00
set_pmd_at ( vma - > vm_mm , address , pmdp , pmd_mknotpresent ( entry ) ) ;
2015-02-20 08:06:28 +03:00
flush_pmd_tlb_range ( vma , address , address + HPAGE_PMD_SIZE ) ;
2012-10-09 03:30:09 +04:00
}
# endif
2015-06-25 02:57:42 +03:00
# ifndef pmdp_collapse_flush
pmd_t pmdp_collapse_flush ( struct vm_area_struct * vma , unsigned long address ,
pmd_t * pmdp )
{
2015-06-25 02:57:44 +03:00
/*
* pmd and hugepage pte format are same . So we could
* use the same function .
*/
2015-06-25 02:57:42 +03:00
pmd_t pmd ;
VM_BUG_ON ( address & ~ HPAGE_PMD_MASK ) ;
VM_BUG_ON ( pmd_trans_huge ( * pmdp ) ) ;
2015-06-25 02:57:44 +03:00
pmd = pmdp_huge_get_and_clear ( vma - > vm_mm , address , pmdp ) ;
2015-02-20 08:06:28 +03:00
flush_pmd_tlb_range ( vma , address , address + HPAGE_PMD_SIZE ) ;
2015-06-25 02:57:42 +03:00
return pmd ;
}
# endif
2015-07-09 14:52:44 +03:00
# endif /* CONFIG_TRANSPARENT_HUGEPAGE */