2015-12-01 09:06:28 +05:30
# ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
# define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
/*
* This file contains the functions and defines necessary to modify and use
* the ppc64 hashed page table .
*/
2015-12-01 09:06:30 +05:30
# include <asm/book3s/64/hash.h>
2015-12-01 09:06:28 +05:30
# include <asm/barrier.h>
/*
* The second half of the kernel virtual space is used for IO mappings ,
* it ' s itself carved into the PIO region ( ISA and PHB IO space ) and
* the ioremap space
*
* ISA_IO_BASE = KERN_IO_START , 64 K reserved area
* PHB_IO_BASE = ISA_IO_BASE + 64 K to ISA_IO_BASE + 2 G , PHB IO spaces
* IOREMAP_BASE = ISA_IO_BASE + 2 G to VMALLOC_START + PGTABLE_RANGE
*/
# define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
# define FULL_IO_SIZE 0x80000000ul
# define ISA_IO_BASE (KERN_IO_START)
# define ISA_IO_END (KERN_IO_START + 0x10000ul)
# define PHB_IO_BASE (ISA_IO_END)
# define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
# define IOREMAP_BASE (PHB_IO_END)
# define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
# define vmemmap ((struct page *)VMEMMAP_BASE)
2015-12-01 09:06:33 +05:30
/* Advertise special mapping type for AGP */
# define HAVE_PAGE_AGP
/* Advertise support for _PAGE_SPECIAL */
# define __HAVE_ARCH_PTE_SPECIAL
2015-12-01 09:06:28 +05:30
# ifndef __ASSEMBLY__
/*
* This is the default implementation of various PTE accessors , it ' s
* used in all cases except Book3S with 64 K pages where we have a
* concept of sub - pages
*/
# ifndef __real_pte
# define __real_pte(e,p) ((real_pte_t){(e)})
# define __rpte_to_pte(r) ((r).pte)
2015-12-01 09:06:56 +05:30
# define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
2015-12-01 09:06:28 +05:30
# define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
do { \
index = 0 ; \
shift = mmu_psize_defs [ psize ] . shift ; \
# define pte_iterate_hashed_end() } while(0)
/*
* We expect this to be called only for user addresses or kernel virtual
* addresses other than the linear mapping .
*/
# define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
# endif /* __real_pte */
2015-12-01 09:06:35 +05:30
static inline void pmd_set ( pmd_t * pmdp , unsigned long val )
{
* pmdp = __pmd ( val ) ;
}
static inline void pmd_clear ( pmd_t * pmdp )
{
* pmdp = __pmd ( 0 ) ;
}
2015-12-01 09:06:28 +05:30
# define pmd_none(pmd) (!pmd_val(pmd))
# define pmd_present(pmd) (!pmd_none(pmd))
2015-12-01 09:06:35 +05:30
static inline void pud_set ( pud_t * pudp , unsigned long val )
{
* pudp = __pud ( val ) ;
}
static inline void pud_clear ( pud_t * pudp )
{
* pudp = __pud ( 0 ) ;
}
2015-12-01 09:06:28 +05:30
# define pud_none(pud) (!pud_val(pud))
# define pud_present(pud) (pud_val(pud) != 0)
extern struct page * pud_page ( pud_t pud ) ;
2015-12-01 09:06:36 +05:30
extern struct page * pmd_page ( pmd_t pmd ) ;
2015-12-01 09:06:28 +05:30
static inline pte_t pud_pte ( pud_t pud )
{
return __pte ( pud_val ( pud ) ) ;
}
static inline pud_t pte_pud ( pte_t pte )
{
return __pud ( pte_val ( pte ) ) ;
}
# define pud_write(pud) pte_write(pud_pte(pud))
# define pgd_write(pgd) pte_write(pgd_pte(pgd))
2015-12-01 09:06:35 +05:30
static inline void pgd_set ( pgd_t * pgdp , unsigned long val )
{
* pgdp = __pgd ( val ) ;
}
2015-12-01 09:06:28 +05:30
2016-03-01 09:45:13 +05:30
static inline void pgd_clear ( pgd_t * pgdp )
{
* pgdp = __pgd ( 0 ) ;
}
# define pgd_none(pgd) (!pgd_val(pgd))
# define pgd_present(pgd) (!pgd_none(pgd))
static inline pte_t pgd_pte ( pgd_t pgd )
{
return __pte ( pgd_val ( pgd ) ) ;
}
static inline pgd_t pte_pgd ( pte_t pte )
{
return __pgd ( pte_val ( pte ) ) ;
}
extern struct page * pgd_page ( pgd_t pgd ) ;
2015-12-01 09:06:28 +05:30
/*
* Find an entry in a page - table - directory . We combine the address region
* ( the high order N bits ) and the pgd portion of the address .
*/
# define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
2016-03-01 09:45:13 +05:30
# define pud_offset(pgdp, addr) \
( ( ( pud_t * ) pgd_page_vaddr ( * ( pgdp ) ) ) + pud_index ( addr ) )
2015-12-01 09:06:28 +05:30
# define pmd_offset(pudp,addr) \
2015-12-01 09:06:36 +05:30
( ( ( pmd_t * ) pud_page_vaddr ( * ( pudp ) ) ) + pmd_index ( addr ) )
2015-12-01 09:06:28 +05:30
# define pte_offset_kernel(dir,addr) \
2015-12-01 09:06:36 +05:30
( ( ( pte_t * ) pmd_page_vaddr ( * ( dir ) ) ) + pte_index ( addr ) )
2015-12-01 09:06:28 +05:30
# define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
# define pte_unmap(pte) do { } while(0)
/* to find an entry in a kernel page-table-directory */
/* This now only contains the vmalloc pages */
# define pgd_offset_k(address) pgd_offset(&init_mm, address)
# define pte_ERROR(e) \
pr_err ( " %s:%d: bad pte %08lx. \n " , __FILE__ , __LINE__ , pte_val ( e ) )
# define pmd_ERROR(e) \
pr_err ( " %s:%d: bad pmd %08lx. \n " , __FILE__ , __LINE__ , pmd_val ( e ) )
2016-03-01 09:45:13 +05:30
# define pud_ERROR(e) \
pr_err ( " %s:%d: bad pud %08lx. \n " , __FILE__ , __LINE__ , pud_val ( e ) )
2015-12-01 09:06:28 +05:30
# define pgd_ERROR(e) \
pr_err ( " %s:%d: bad pgd %08lx. \n " , __FILE__ , __LINE__ , pgd_val ( e ) )
/* Encode and de-code a swap entry */
# define MAX_SWAPFILES_CHECK() do { \
BUILD_BUG_ON ( MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS ) ; \
/* \
* Don ' t have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte . \
*/ \
BUILD_BUG_ON ( _PAGE_HPTEFLAGS & ( 0x1f < < _PAGE_BIT_SWAP_TYPE ) ) ; \
2015-12-03 11:29:19 +01:00
BUILD_BUG_ON ( _PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY ) ; \
2015-12-01 09:06:28 +05:30
} while ( 0 )
/*
* on pte we don ' t need handle RADIX_TREE_EXCEPTIONAL_SHIFT ;
*/
# define SWP_TYPE_BITS 5
# define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
& ( ( 1UL < < SWP_TYPE_BITS ) - 1 ) )
2016-02-22 13:41:13 +11:00
# define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PTE_RPN_SHIFT)
2015-12-01 09:06:28 +05:30
# define __swp_entry(type, offset) ((swp_entry_t) { \
2016-02-22 13:41:13 +11:00
( ( type ) < < _PAGE_BIT_SWAP_TYPE ) \
| ( ( ( offset ) < < PTE_RPN_SHIFT ) & PTE_RPN_MASK ) } )
2016-01-11 21:19:34 +05:30
/*
* swp_entry_t must be independent of pte bits . We build a swp_entry_t from
* swap type and offset we get from swap and convert that to pte to find a
* matching pte in linux page table .
* Clear bits not found in swap entries here .
*/
# define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
# define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
2015-12-01 09:06:28 +05:30
2016-01-09 16:54:59 -08:00
# ifdef CONFIG_MEM_SOFT_DIRTY
2015-12-03 11:29:19 +01:00
# define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
2016-01-09 16:54:59 -08:00
# else
# define _PAGE_SWP_SOFT_DIRTY 0UL
# endif /* CONFIG_MEM_SOFT_DIRTY */
# ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
2015-12-03 11:29:19 +01:00
static inline pte_t pte_swp_mksoft_dirty ( pte_t pte )
{
return __pte ( pte_val ( pte ) | _PAGE_SWP_SOFT_DIRTY ) ;
}
static inline bool pte_swp_soft_dirty ( pte_t pte )
{
return ! ! ( pte_val ( pte ) & _PAGE_SWP_SOFT_DIRTY ) ;
}
static inline pte_t pte_swp_clear_soft_dirty ( pte_t pte )
{
return __pte ( pte_val ( pte ) & ~ _PAGE_SWP_SOFT_DIRTY ) ;
}
# endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
2015-12-01 09:06:28 +05:30
void pgtable_cache_add ( unsigned shift , void ( * ctor ) ( void * ) ) ;
void pgtable_cache_init ( void ) ;
struct page * realmode_pfn_to_page ( unsigned long pfn ) ;
# ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern pmd_t pfn_pmd ( unsigned long pfn , pgprot_t pgprot ) ;
extern pmd_t mk_pmd ( struct page * page , pgprot_t pgprot ) ;
extern pmd_t pmd_modify ( pmd_t pmd , pgprot_t newprot ) ;
extern void set_pmd_at ( struct mm_struct * mm , unsigned long addr ,
pmd_t * pmdp , pmd_t pmd ) ;
extern void update_mmu_cache_pmd ( struct vm_area_struct * vma , unsigned long addr ,
pmd_t * pmd ) ;
arch: fix has_transparent_hugepage()
I've just discovered that the useful-sounding has_transparent_hugepage()
is actually an architecture-dependent minefield: on some arches it only
builds if CONFIG_TRANSPARENT_HUGEPAGE=y, on others it's also there when
not, but on some of those (arm and arm64) it then gives the wrong
answer; and on mips alone it's marked __init, which would crash if
called later (but so far it has not been called later).
Straighten this out: make it available to all configs, with a sensible
default in asm-generic/pgtable.h, removing its definitions from those
arches (arc, arm, arm64, sparc, tile) which are served by the default,
adding #define has_transparent_hugepage has_transparent_hugepage to
those (mips, powerpc, s390, x86) which need to override the default at
runtime, and removing the __init from mips (but maybe that kind of code
should be avoided after init: set a static variable the first time it's
called).
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andres Lagar-Cavilla <andreslc@google.com>
Cc: Yang Shi <yang.shi@linaro.org>
Cc: Ning Qu <quning@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Konstantin Khlebnikov <koct9i@gmail.com>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arch/arc]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [arch/s390]
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-19 17:13:00 -07:00
# define has_transparent_hugepage has_transparent_hugepage
2015-12-01 09:06:28 +05:30
extern int has_transparent_hugepage ( void ) ;
# endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pmd_pte ( pmd_t pmd )
{
return __pte ( pmd_val ( pmd ) ) ;
}
static inline pmd_t pte_pmd ( pte_t pte )
{
return __pmd ( pte_val ( pte ) ) ;
}
static inline pte_t * pmdp_ptep ( pmd_t * pmd )
{
return ( pte_t * ) pmd ;
}
# define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
# define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
# define pmd_young(pmd) pte_young(pmd_pte(pmd))
# define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
# define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
# define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
2016-01-15 16:55:29 -08:00
# define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
2015-12-01 09:06:28 +05:30
# define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
# define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
2015-12-03 11:29:19 +01:00
# ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
# define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
# define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
# define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
# endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
2015-12-01 09:06:37 +05:30
# ifdef CONFIG_NUMA_BALANCING
static inline int pmd_protnone ( pmd_t pmd )
{
return pte_protnone ( pmd_pte ( pmd ) ) ;
}
# endif /* CONFIG_NUMA_BALANCING */
2015-12-01 09:06:28 +05:30
# define __HAVE_ARCH_PMD_WRITE
# define pmd_write(pmd) pte_write(pmd_pte(pmd))
static inline pmd_t pmd_mkhuge ( pmd_t pmd )
{
2015-12-01 09:06:54 +05:30
return __pmd ( pmd_val ( pmd ) | ( _PAGE_PTE | _PAGE_THP_HUGE ) ) ;
2015-12-01 09:06:28 +05:30
}
# define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ,
pmd_t entry , int dirty ) ;
# define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ) ;
# define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ) ;
# define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
extern pmd_t pmdp_huge_get_and_clear ( struct mm_struct * mm ,
unsigned long addr , pmd_t * pmdp ) ;
extern pmd_t pmdp_collapse_flush ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ) ;
# define pmdp_collapse_flush pmdp_collapse_flush
# define __HAVE_ARCH_PGTABLE_DEPOSIT
extern void pgtable_trans_huge_deposit ( struct mm_struct * mm , pmd_t * pmdp ,
pgtable_t pgtable ) ;
# define __HAVE_ARCH_PGTABLE_WITHDRAW
extern pgtable_t pgtable_trans_huge_withdraw ( struct mm_struct * mm , pmd_t * pmdp ) ;
# define __HAVE_ARCH_PMDP_INVALIDATE
extern void pmdp_invalidate ( struct vm_area_struct * vma , unsigned long address ,
pmd_t * pmdp ) ;
2016-02-09 06:50:31 +05:30
# define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
extern void pmdp_huge_split_prepare ( struct vm_area_struct * vma ,
unsigned long address , pmd_t * pmdp ) ;
2015-12-01 09:06:28 +05:30
# define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock ;
static inline int pmd_move_must_withdraw ( struct spinlock * new_pmd_ptl ,
struct spinlock * old_pmd_ptl )
{
/*
* Archs like ppc64 use pgtable to store per pmd
* specific information . So when we switch the pmd ,
* we should also withdraw and deposit the pgtable
*/
return true ;
}
# endif /* __ASSEMBLY__ */
# endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */