2005-04-16 15:20:36 -07:00
/*
* IA - 32 Huge TLB Page Support for Kernel .
*
* Copyright ( C ) 2002 , Rohit Seth < rohit . seth @ intel . com >
*/
# include <linux/init.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/hugetlb.h>
# include <linux/pagemap.h>
# include <linux/err.h>
# include <linux/sysctl.h>
# include <asm/mman.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
2008-01-30 13:33:39 +01:00
# include <asm/pgalloc.h>
2005-04-16 15:20:36 -07:00
#if 0 /* This is just for testing */
struct page *
follow_huge_addr ( struct mm_struct * mm , unsigned long address , int write )
{
unsigned long start = address ;
int length = 1 ;
int nr ;
struct page * page ;
struct vm_area_struct * vma ;
vma = find_vma ( mm , addr ) ;
if ( ! vma | | ! is_vm_hugetlb_page ( vma ) )
return ERR_PTR ( - EINVAL ) ;
pte = huge_pte_offset ( mm , address ) ;
/* hugetlb should be locked, and hence, prefaulted */
WARN_ON ( ! pte | | pte_none ( * pte ) ) ;
page = & pte_page ( * pte ) [ vpfn % ( HPAGE_SIZE / PAGE_SIZE ) ] ;
2008-03-26 21:03:04 -07:00
WARN_ON ( ! PageHead ( page ) ) ;
2005-04-16 15:20:36 -07:00
return page ;
}
int pmd_huge ( pmd_t pmd )
{
return 0 ;
}
2008-07-23 21:27:50 -07:00
int pud_huge ( pud_t pud )
{
return 0 ;
}
2005-04-16 15:20:36 -07:00
struct page *
follow_huge_pmd ( struct mm_struct * mm , unsigned long address ,
pmd_t * pmd , int write )
{
return NULL ;
}
# else
struct page *
follow_huge_addr ( struct mm_struct * mm , unsigned long address , int write )
{
return ERR_PTR ( - EINVAL ) ;
}
int pmd_huge ( pmd_t pmd )
{
return ! ! ( pmd_val ( pmd ) & _PAGE_PSE ) ;
}
2008-07-23 21:27:50 -07:00
int pud_huge ( pud_t pud )
{
2008-07-23 21:27:50 -07:00
return ! ! ( pud_val ( pud ) & _PAGE_PSE ) ;
2008-07-23 21:27:50 -07:00
}
2005-04-16 15:20:36 -07:00
# endif
2013-11-19 15:17:50 +02:00
# ifdef CONFIG_HUGETLB_PAGE
2005-04-16 15:20:36 -07:00
static unsigned long hugetlb_get_unmapped_area_bottomup ( struct file * file ,
unsigned long addr , unsigned long len ,
unsigned long pgoff , unsigned long flags )
{
2008-07-23 21:27:50 -07:00
struct hstate * h = hstate_file ( file ) ;
2012-12-11 16:02:02 -08:00
struct vm_unmapped_area_info info ;
info . flags = 0 ;
info . length = len ;
2013-11-19 15:17:50 +02:00
info . low_limit = current - > mm - > mmap_legacy_base ;
2012-12-11 16:02:02 -08:00
info . high_limit = TASK_SIZE ;
info . align_mask = PAGE_MASK & ~ huge_page_mask ( h ) ;
info . align_offset = 0 ;
return vm_unmapped_area ( & info ) ;
2005-04-16 15:20:36 -07:00
}
static unsigned long hugetlb_get_unmapped_area_topdown ( struct file * file ,
unsigned long addr0 , unsigned long len ,
unsigned long pgoff , unsigned long flags )
{
2008-07-23 21:27:50 -07:00
struct hstate * h = hstate_file ( file ) ;
2012-12-11 16:02:02 -08:00
struct vm_unmapped_area_info info ;
unsigned long addr ;
2005-04-16 15:20:36 -07:00
2012-12-11 16:02:02 -08:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = current - > mm - > mmap_base ;
info . align_mask = PAGE_MASK & ~ huge_page_mask ( h ) ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
2005-04-16 15:20:36 -07:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-11 16:02:02 -08:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = TASK_SIZE ;
addr = vm_unmapped_area ( & info ) ;
}
2005-04-16 15:20:36 -07:00
return addr ;
}
unsigned long
hugetlb_get_unmapped_area ( struct file * file , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
2008-07-23 21:27:50 -07:00
struct hstate * h = hstate_file ( file ) ;
2005-04-16 15:20:36 -07:00
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
2008-07-23 21:27:50 -07:00
if ( len & ~ huge_page_mask ( h ) )
2005-04-16 15:20:36 -07:00
return - EINVAL ;
if ( len > TASK_SIZE )
return - ENOMEM ;
2007-05-06 14:50:08 -07:00
if ( flags & MAP_FIXED ) {
2008-07-23 21:27:41 -07:00
if ( prepare_hugepage_range ( file , addr , len ) )
2007-05-06 14:50:08 -07:00
return - EINVAL ;
return addr ;
}
2005-04-16 15:20:36 -07:00
if ( addr ) {
2008-07-23 21:27:50 -07:00
addr = ALIGN ( addr , huge_page_size ( h ) ) ;
2005-04-16 15:20:36 -07:00
vma = find_vma ( mm , addr ) ;
if ( TASK_SIZE - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
if ( mm - > get_unmapped_area = = arch_get_unmapped_area )
return hugetlb_get_unmapped_area_bottomup ( file , addr , len ,
pgoff , flags ) ;
else
return hugetlb_get_unmapped_area_topdown ( file , addr , len ,
pgoff , flags ) ;
}
2013-11-19 15:17:50 +02:00
# endif /* CONFIG_HUGETLB_PAGE */
2005-04-16 15:20:36 -07:00
2008-07-23 21:27:51 -07:00
# ifdef CONFIG_X86_64
static __init int setup_hugepagesz ( char * opt )
{
unsigned long ps = memparse ( opt , & opt ) ;
if ( ps = = PMD_SIZE ) {
hugetlb_add_hstate ( PMD_SHIFT - PAGE_SHIFT ) ;
} else if ( ps = = PUD_SIZE & & cpu_has_gbpages ) {
hugetlb_add_hstate ( PUD_SHIFT - PAGE_SHIFT ) ;
} else {
printk ( KERN_ERR " hugepagesz: Unsupported page size %lu M \n " ,
ps > > 20 ) ;
return 0 ;
}
return 1 ;
}
__setup ( " hugepagesz= " , setup_hugepagesz ) ;
# endif