2005-04-17 02:20:36 +04:00
/*
* IA - 32 Huge TLB Page Support for Kernel .
*
* Copyright ( C ) 2002 , Rohit Seth < rohit . seth @ intel . com >
*/
# include <linux/init.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/hugetlb.h>
# include <linux/pagemap.h>
# include <linux/err.h>
# include <linux/sysctl.h>
# include <asm/mman.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
2008-01-30 15:33:39 +03:00
# include <asm/pgalloc.h>
2005-04-17 02:20:36 +04:00
#if 0 /* This is just for testing */
struct page *
follow_huge_addr ( struct mm_struct * mm , unsigned long address , int write )
{
unsigned long start = address ;
int length = 1 ;
int nr ;
struct page * page ;
struct vm_area_struct * vma ;
vma = find_vma ( mm , addr ) ;
if ( ! vma | | ! is_vm_hugetlb_page ( vma ) )
return ERR_PTR ( - EINVAL ) ;
pte = huge_pte_offset ( mm , address ) ;
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON ( ! pte | | pte_none ( * pte ) ) ;
page = & pte_page ( * pte ) [ vpfn % ( HPAGE_SIZE / PAGE_SIZE ) ] ;
2008-03-27 07:03:04 +03:00
WARN_ON ( ! PageHead ( page ) ) ;
2005-04-17 02:20:36 +04:00
return page ;
}
int pmd_huge ( pmd_t pmd )
{
return 0 ;
}
2008-07-24 08:27:50 +04:00
int pud_huge ( pud_t pud )
{
return 0 ;
}
2005-04-17 02:20:36 +04:00
# else
2015-02-12 02:25:19 +03:00
/*
* pmd_huge ( ) returns 1 if @ pmd is hugetlb related entry , that is normal
* hugetlb entry or non - present ( migration or hwpoisoned ) hugetlb entry .
* Otherwise , returns 0.
*/
2005-04-17 02:20:36 +04:00
int pmd_huge ( pmd_t pmd )
{
2015-02-12 02:25:19 +03:00
return ! pmd_none ( pmd ) & &
( pmd_val ( pmd ) & ( _PAGE_PRESENT | _PAGE_PSE ) ) ! = _PAGE_PRESENT ;
2005-04-17 02:20:36 +04:00
}
2008-07-24 08:27:50 +04:00
int pud_huge ( pud_t pud )
{
2008-07-24 08:27:50 +04:00
return ! ! ( pud_val ( pud ) & _PAGE_PSE ) ;
2008-07-24 08:27:50 +04:00
}
2005-04-17 02:20:36 +04:00
# endif
2013-11-19 17:17:50 +04:00
# ifdef CONFIG_HUGETLB_PAGE
2005-04-17 02:20:36 +04:00
static unsigned long hugetlb_get_unmapped_area_bottomup ( struct file * file ,
unsigned long addr , unsigned long len ,
unsigned long pgoff , unsigned long flags )
{
2008-07-24 08:27:50 +04:00
struct hstate * h = hstate_file ( file ) ;
2012-12-12 04:02:02 +04:00
struct vm_unmapped_area_info info ;
info . flags = 0 ;
info . length = len ;
2013-11-19 17:17:50 +04:00
info . low_limit = current - > mm - > mmap_legacy_base ;
2012-12-12 04:02:02 +04:00
info . high_limit = TASK_SIZE ;
info . align_mask = PAGE_MASK & ~ huge_page_mask ( h ) ;
info . align_offset = 0 ;
return vm_unmapped_area ( & info ) ;
2005-04-17 02:20:36 +04:00
}
static unsigned long hugetlb_get_unmapped_area_topdown ( struct file * file ,
unsigned long addr0 , unsigned long len ,
unsigned long pgoff , unsigned long flags )
{
2008-07-24 08:27:50 +04:00
struct hstate * h = hstate_file ( file ) ;
2012-12-12 04:02:02 +04:00
struct vm_unmapped_area_info info ;
unsigned long addr ;
2005-04-17 02:20:36 +04:00
2012-12-12 04:02:02 +04:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = current - > mm - > mmap_base ;
info . align_mask = PAGE_MASK & ~ huge_page_mask ( h ) ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
2005-04-17 02:20:36 +04:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-12 04:02:02 +04:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = TASK_SIZE ;
addr = vm_unmapped_area ( & info ) ;
}
2005-04-17 02:20:36 +04:00
return addr ;
}
unsigned long
hugetlb_get_unmapped_area ( struct file * file , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
2008-07-24 08:27:50 +04:00
struct hstate * h = hstate_file ( file ) ;
2005-04-17 02:20:36 +04:00
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
2008-07-24 08:27:50 +04:00
if ( len & ~ huge_page_mask ( h ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
if ( len > TASK_SIZE )
return - ENOMEM ;
2007-05-07 01:50:08 +04:00
if ( flags & MAP_FIXED ) {
2008-07-24 08:27:41 +04:00
if ( prepare_hugepage_range ( file , addr , len ) )
2007-05-07 01:50:08 +04:00
return - EINVAL ;
return addr ;
}
2005-04-17 02:20:36 +04:00
if ( addr ) {
2008-07-24 08:27:50 +04:00
addr = ALIGN ( addr , huge_page_size ( h ) ) ;
2005-04-17 02:20:36 +04:00
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
if ( mm - > get_unmapped_area = = arch_get_unmapped_area )
return hugetlb_get_unmapped_area_bottomup ( file , addr , len ,
pgoff , flags ) ;
else
return hugetlb_get_unmapped_area_topdown ( file , addr , len ,
pgoff , flags ) ;
}
2013-11-19 17:17:50 +04:00
# endif /* CONFIG_HUGETLB_PAGE */
2005-04-17 02:20:36 +04:00
2008-07-24 08:27:51 +04:00
# ifdef CONFIG_X86_64
static __init int setup_hugepagesz ( char * opt )
{
unsigned long ps = memparse ( opt , & opt ) ;
if ( ps = = PMD_SIZE ) {
hugetlb_add_hstate ( PMD_SHIFT - PAGE_SHIFT ) ;
2016-03-29 18:41:58 +03:00
} else if ( ps = = PUD_SIZE & & boot_cpu_has ( X86_FEATURE_GBPAGES ) ) {
2008-07-24 08:27:51 +04:00
hugetlb_add_hstate ( PUD_SHIFT - PAGE_SHIFT ) ;
} else {
printk ( KERN_ERR " hugepagesz: Unsupported page size %lu M \n " ,
ps > > 20 ) ;
return 0 ;
}
return 1 ;
}
__setup ( " hugepagesz= " , setup_hugepagesz ) ;
2015-02-11 01:08:19 +03:00
2016-02-06 02:36:41 +03:00
# if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
2015-02-11 01:08:19 +03:00
static __init int gigantic_pages_init ( void )
{
2016-02-06 02:36:41 +03:00
/* With compaction or CMA we can allocate gigantic pages at runtime */
2016-03-29 18:41:58 +03:00
if ( boot_cpu_has ( X86_FEATURE_GBPAGES ) & & ! size_to_hstate ( 1UL < < PUD_SHIFT ) )
2015-02-11 01:08:19 +03:00
hugetlb_add_hstate ( PUD_SHIFT - PAGE_SHIFT ) ;
return 0 ;
}
arch_initcall ( gigantic_pages_init ) ;
# endif
2008-07-24 08:27:51 +04:00
# endif