2008-04-28 02:13:27 -07:00
# ifndef _ASM_POWERPC_HUGETLB_H
# define _ASM_POWERPC_HUGETLB_H
2011-06-28 09:54:48 +00:00
# ifdef CONFIG_HUGETLB_PAGE
2008-04-28 02:13:27 -07:00
# include <asm/page.h>
2013-04-29 15:07:23 -07:00
# include <asm-generic/hugetlb.h>
2008-04-28 02:13:27 -07:00
2011-06-28 09:54:48 +00:00
extern struct kmem_cache * hugepte_cache ;
2013-04-28 09:37:29 +00:00
# ifdef CONFIG_PPC_BOOK3S_64
2016-04-29 23:26:25 +10:00
# include <asm/book3s/64/hugetlb-radix.h>
2013-04-28 09:37:29 +00:00
/*
* This should work for other subarchs too . But right now we use the
* new format only for 64 bit book3s
*/
static inline pte_t * hugepd_page ( hugepd_t hpd )
{
BUG_ON ( ! hugepd_ok ( hpd ) ) ;
/*
* We have only four bits to encode , MMU page size
*/
BUILD_BUG_ON ( ( MMU_PAGE_COUNT - 1 ) > 0xf ) ;
2016-02-23 13:36:17 +11:00
return __va ( hpd . pd & HUGEPD_ADDR_MASK ) ;
2013-04-28 09:37:29 +00:00
}
static inline unsigned int hugepd_mmu_psize ( hugepd_t hpd )
{
return ( hpd . pd & HUGEPD_SHIFT_MASK ) > > 2 ;
}
static inline unsigned int hugepd_shift ( hugepd_t hpd )
{
return mmu_psize_to_shift ( hugepd_mmu_psize ( hpd ) ) ;
}
2016-04-29 23:26:25 +10:00
static inline void flush_hugetlb_page ( struct vm_area_struct * vma ,
unsigned long vmaddr )
{
if ( radix_enabled ( ) )
return radix__flush_hugetlb_page ( vma , vmaddr ) ;
}
2013-04-28 09:37:29 +00:00
2016-04-29 23:26:25 +10:00
static inline void __local_flush_hugetlb_page ( struct vm_area_struct * vma ,
unsigned long vmaddr )
{
if ( radix_enabled ( ) )
return radix__local_flush_hugetlb_page ( vma , vmaddr ) ;
}
2013-04-28 09:37:29 +00:00
# else
2011-06-28 09:54:48 +00:00
static inline pte_t * hugepd_page ( hugepd_t hpd )
{
BUG_ON ( ! hugepd_ok ( hpd ) ) ;
return ( pte_t * ) ( ( hpd . pd & ~ HUGEPD_SHIFT_MASK ) | PD_HUGE ) ;
}
static inline unsigned int hugepd_shift ( hugepd_t hpd )
{
return hpd . pd & HUGEPD_SHIFT_MASK ;
}
2013-04-28 09:37:29 +00:00
# endif /* CONFIG_PPC_BOOK3S_64 */
2014-11-05 21:57:41 +05:30
static inline pte_t * hugepte_offset ( hugepd_t hpd , unsigned long addr ,
2011-06-28 09:54:48 +00:00
unsigned pdshift )
{
/*
2011-10-10 10:50:40 +00:00
* On FSL BookE , we have multiple higher - level table entries that
* point to the same hugepte . Just use the first one since they ' re all
2011-06-28 09:54:48 +00:00
* identical . So for that case , idx = 0.
*/
unsigned long idx = 0 ;
2014-11-05 21:57:41 +05:30
pte_t * dir = hugepd_page ( hpd ) ;
2011-10-10 10:50:40 +00:00
# ifndef CONFIG_PPC_FSL_BOOK3E
2014-11-05 21:57:41 +05:30
idx = ( addr & ( ( 1UL < < pdshift ) - 1 ) ) > > hugepd_shift ( hpd ) ;
2011-06-28 09:54:48 +00:00
# endif
return dir + idx ;
}
2009-10-26 19:24:31 +00:00
pte_t * huge_pte_offset_and_shift ( struct mm_struct * mm ,
unsigned long addr , unsigned * shift ) ;
2009-10-26 19:24:31 +00:00
void flush_dcache_icache_hugepage ( struct page * page ) ;
2014-10-21 14:25:59 +11:00
# if defined(CONFIG_PPC_MM_SLICES)
2008-04-28 02:13:27 -07:00
int is_hugepage_only_range ( struct mm_struct * mm , unsigned long addr ,
unsigned long len ) ;
2011-06-28 09:54:48 +00:00
# else
static inline int is_hugepage_only_range ( struct mm_struct * mm ,
unsigned long addr ,
unsigned long len )
{
return 0 ;
}
# endif
2011-11-28 14:43:33 +00:00
void book3e_hugetlb_preload ( struct vm_area_struct * vma , unsigned long ea ,
pte_t pte ) ;
2011-06-28 09:54:48 +00:00
void flush_hugetlb_page ( struct vm_area_struct * vma , unsigned long vmaddr ) ;
2008-04-28 02:13:27 -07:00
2008-07-23 21:27:10 -07:00
void hugetlb_free_pgd_range ( struct mmu_gather * tlb , unsigned long addr ,
2008-04-28 02:13:27 -07:00
unsigned long end , unsigned long floor ,
unsigned long ceiling ) ;
2009-01-06 14:38:54 -08:00
/*
* The version of vma_mmu_pagesize ( ) in arch / powerpc / mm / hugetlbpage . c needs
* to override the version in mm / hugetlb . c
*/
# define vma_mmu_pagesize vma_mmu_pagesize
2008-04-28 02:13:27 -07:00
/*
* If the arch doesn ' t supply something else , assume that hugepage
* size aligned regions are ok without further preparation .
*/
2008-07-23 21:27:41 -07:00
static inline int prepare_hugepage_range ( struct file * file ,
unsigned long addr , unsigned long len )
2008-04-28 02:13:27 -07:00
{
2008-07-23 21:27:56 -07:00
struct hstate * h = hstate_file ( file ) ;
if ( len & ~ huge_page_mask ( h ) )
2008-04-28 02:13:27 -07:00
return - EINVAL ;
2008-07-23 21:27:56 -07:00
if ( addr & ~ huge_page_mask ( h ) )
2008-04-28 02:13:27 -07:00
return - EINVAL ;
return 0 ;
}
2009-10-26 19:24:31 +00:00
static inline void set_huge_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t pte )
{
set_pte_at ( mm , addr , ptep , pte ) ;
}
static inline pte_t huge_ptep_get_and_clear ( struct mm_struct * mm ,
unsigned long addr , pte_t * ptep )
{
2011-06-28 09:54:48 +00:00
# ifdef CONFIG_PPC64
2014-02-12 09:13:36 +05:30
return __pte ( pte_update ( mm , addr , ptep , ~ 0UL , 0 , 1 ) ) ;
2011-06-28 09:54:48 +00:00
# else
return __pte ( pte_update ( ptep , ~ 0UL , 0 ) ) ;
# endif
2009-10-26 19:24:31 +00:00
}
2008-04-28 02:13:28 -07:00
static inline void huge_ptep_clear_flush ( struct vm_area_struct * vma ,
unsigned long addr , pte_t * ptep )
{
2009-10-26 19:24:31 +00:00
pte_t pte ;
pte = huge_ptep_get_and_clear ( vma - > vm_mm , addr , ptep ) ;
flush_tlb_page ( vma , addr ) ;
2008-04-28 02:13:28 -07:00
}
2008-04-28 02:13:29 -07:00
static inline int huge_pte_none ( pte_t pte )
{
return pte_none ( pte ) ;
}
static inline pte_t huge_pte_wrprotect ( pte_t pte )
{
return pte_wrprotect ( pte ) ;
}
static inline int huge_ptep_set_access_flags ( struct vm_area_struct * vma ,
unsigned long addr , pte_t * ptep ,
pte_t pte , int dirty )
{
2011-11-29 15:10:39 +00:00
# ifdef HUGETLB_NEED_PRELOAD
2011-10-10 10:50:37 +00:00
/*
* The " return 1 " forces a call of update_mmu_cache , which will write a
* TLB entry . Without this , platforms that don ' t do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum .
*/
ptep_set_access_flags ( vma , addr , ptep , pte , dirty ) ;
return 1 ;
# else
2008-04-28 02:13:29 -07:00
return ptep_set_access_flags ( vma , addr , ptep , pte , dirty ) ;
2011-10-10 10:50:37 +00:00
# endif
2008-04-28 02:13:29 -07:00
}
static inline pte_t huge_ptep_get ( pte_t * ptep )
{
return * ptep ;
}
2012-10-08 16:29:32 -07:00
static inline void arch_clear_hugepage_flags ( struct page * page )
{
}
2011-06-28 09:54:48 +00:00
# else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page ( struct vm_area_struct * vma ,
unsigned long vmaddr )
{
}
2011-10-10 10:50:43 +00:00
2013-06-20 14:30:16 +05:30
# define hugepd_shift(x) 0
2014-11-05 21:57:41 +05:30
static inline pte_t * hugepte_offset ( hugepd_t hpd , unsigned long addr ,
2013-06-20 14:30:16 +05:30
unsigned pdshift )
{
return 0 ;
}
# endif /* CONFIG_HUGETLB_PAGE */
2011-10-10 10:50:43 +00:00
/*
* FSL Book3E platforms require special gpage handling - the gpages
* are reserved early in the boot process by memblock instead of via
* the . dts as on IBM platforms .
*/
# if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
extern void __init reserve_hugetlb_gpages ( void ) ;
# else
static inline void reserve_hugetlb_gpages ( void )
{
}
2011-06-28 09:54:48 +00:00
# endif
2008-04-28 02:13:27 -07:00
# endif /* _ASM_POWERPC_HUGETLB_H */