2008-04-28 02:13:27 -07:00
# ifndef _ASM_POWERPC_HUGETLB_H
# define _ASM_POWERPC_HUGETLB_H
2011-06-28 09:54:48 +00:00
# ifdef CONFIG_HUGETLB_PAGE
2008-04-28 02:13:27 -07:00
# include <asm/page.h>
2011-06-28 09:54:48 +00:00
extern struct kmem_cache * hugepte_cache ;
static inline pte_t * hugepd_page ( hugepd_t hpd )
{
BUG_ON ( ! hugepd_ok ( hpd ) ) ;
return ( pte_t * ) ( ( hpd . pd & ~ HUGEPD_SHIFT_MASK ) | PD_HUGE ) ;
}
static inline unsigned int hugepd_shift ( hugepd_t hpd )
{
return hpd . pd & HUGEPD_SHIFT_MASK ;
}
static inline pte_t * hugepte_offset ( hugepd_t * hpdp , unsigned long addr ,
unsigned pdshift )
{
/*
2011-10-10 10:50:40 +00:00
* On FSL BookE , we have multiple higher - level table entries that
* point to the same hugepte . Just use the first one since they ' re all
2011-06-28 09:54:48 +00:00
* identical . So for that case , idx = 0.
*/
unsigned long idx = 0 ;
pte_t * dir = hugepd_page ( * hpdp ) ;
2011-10-10 10:50:40 +00:00
# ifndef CONFIG_PPC_FSL_BOOK3E
2011-06-28 09:54:48 +00:00
idx = ( addr & ( ( 1UL < < pdshift ) - 1 ) ) > > hugepd_shift ( * hpdp ) ;
# endif
return dir + idx ;
}
2009-10-26 19:24:31 +00:00
pte_t * huge_pte_offset_and_shift ( struct mm_struct * mm ,
unsigned long addr , unsigned * shift ) ;
2009-10-26 19:24:31 +00:00
void flush_dcache_icache_hugepage ( struct page * page ) ;
2011-06-28 09:54:48 +00:00
# if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT)
2008-04-28 02:13:27 -07:00
int is_hugepage_only_range ( struct mm_struct * mm , unsigned long addr ,
unsigned long len ) ;
2011-06-28 09:54:48 +00:00
# else
static inline int is_hugepage_only_range ( struct mm_struct * mm ,
unsigned long addr ,
unsigned long len )
{
return 0 ;
}
# endif
2011-11-28 14:43:33 +00:00
void book3e_hugetlb_preload ( struct vm_area_struct * vma , unsigned long ea ,
pte_t pte ) ;
2011-06-28 09:54:48 +00:00
void flush_hugetlb_page ( struct vm_area_struct * vma , unsigned long vmaddr ) ;
2008-04-28 02:13:27 -07:00
2008-07-23 21:27:10 -07:00
void hugetlb_free_pgd_range ( struct mmu_gather * tlb , unsigned long addr ,
2008-04-28 02:13:27 -07:00
unsigned long end , unsigned long floor ,
unsigned long ceiling ) ;
2009-01-06 14:38:54 -08:00
/*
* The version of vma_mmu_pagesize ( ) in arch / powerpc / mm / hugetlbpage . c needs
* to override the version in mm / hugetlb . c
*/
# define vma_mmu_pagesize vma_mmu_pagesize
2008-04-28 02:13:27 -07:00
/*
* If the arch doesn ' t supply something else , assume that hugepage
* size aligned regions are ok without further preparation .
*/
2008-07-23 21:27:41 -07:00
static inline int prepare_hugepage_range ( struct file * file ,
unsigned long addr , unsigned long len )
2008-04-28 02:13:27 -07:00
{
2008-07-23 21:27:56 -07:00
struct hstate * h = hstate_file ( file ) ;
if ( len & ~ huge_page_mask ( h ) )
2008-04-28 02:13:27 -07:00
return - EINVAL ;
2008-07-23 21:27:56 -07:00
if ( addr & ~ huge_page_mask ( h ) )
2008-04-28 02:13:27 -07:00
return - EINVAL ;
return 0 ;
}
static inline void hugetlb_prefault_arch_hook ( struct mm_struct * mm )
{
}
2009-10-26 19:24:31 +00:00
static inline void set_huge_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t pte )
{
set_pte_at ( mm , addr , ptep , pte ) ;
}
static inline pte_t huge_ptep_get_and_clear ( struct mm_struct * mm ,
unsigned long addr , pte_t * ptep )
{
2011-06-28 09:54:48 +00:00
# ifdef CONFIG_PPC64
return __pte ( pte_update ( mm , addr , ptep , ~ 0UL , 1 ) ) ;
# else
return __pte ( pte_update ( ptep , ~ 0UL , 0 ) ) ;
# endif
2009-10-26 19:24:31 +00:00
}
2008-04-28 02:13:28 -07:00
static inline void huge_ptep_clear_flush ( struct vm_area_struct * vma ,
unsigned long addr , pte_t * ptep )
{
2009-10-26 19:24:31 +00:00
pte_t pte ;
pte = huge_ptep_get_and_clear ( vma - > vm_mm , addr , ptep ) ;
flush_tlb_page ( vma , addr ) ;
2008-04-28 02:13:28 -07:00
}
2008-04-28 02:13:29 -07:00
static inline int huge_pte_none ( pte_t pte )
{
return pte_none ( pte ) ;
}
static inline pte_t huge_pte_wrprotect ( pte_t pte )
{
return pte_wrprotect ( pte ) ;
}
static inline int huge_ptep_set_access_flags ( struct vm_area_struct * vma ,
unsigned long addr , pte_t * ptep ,
pte_t pte , int dirty )
{
2011-11-29 15:10:39 +00:00
# ifdef HUGETLB_NEED_PRELOAD
2011-10-10 10:50:37 +00:00
/*
* The " return 1 " forces a call of update_mmu_cache , which will write a
* TLB entry . Without this , platforms that don ' t do a write of the TLB
* entry in the TLB miss handler asm will fault ad infinitum .
*/
ptep_set_access_flags ( vma , addr , ptep , pte , dirty ) ;
return 1 ;
# else
2008-04-28 02:13:29 -07:00
return ptep_set_access_flags ( vma , addr , ptep , pte , dirty ) ;
2011-10-10 10:50:37 +00:00
# endif
2008-04-28 02:13:29 -07:00
}
static inline pte_t huge_ptep_get ( pte_t * ptep )
{
return * ptep ;
}
static inline int arch_prepare_hugepage ( struct page * page )
{
return 0 ;
}
static inline void arch_release_hugepage ( struct page * page )
{
}
2011-06-28 09:54:48 +00:00
# else /* ! CONFIG_HUGETLB_PAGE */
static inline void flush_hugetlb_page ( struct vm_area_struct * vma ,
unsigned long vmaddr )
{
}
2011-10-10 10:50:43 +00:00
# endif /* CONFIG_HUGETLB_PAGE */
/*
* FSL Book3E platforms require special gpage handling - the gpages
* are reserved early in the boot process by memblock instead of via
* the . dts as on IBM platforms .
*/
# if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_FSL_BOOK3E)
extern void __init reserve_hugetlb_gpages ( void ) ;
# else
static inline void reserve_hugetlb_gpages ( void )
{
}
2011-06-28 09:54:48 +00:00
# endif
2008-04-28 02:13:27 -07:00
# endif /* _ASM_POWERPC_HUGETLB_H */