2005-04-17 02:20:36 +04:00
/*
* SPARC64 Huge TLB page support .
*
2006-03-20 12:17:17 +03:00
* Copyright ( C ) 2002 , 2003 , 2006 David S . Miller ( davem @ davemloft . net )
2005-04-17 02:20:36 +04:00
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/hugetlb.h>
# include <linux/pagemap.h>
# include <linux/sysctl.h>
# include <asm/mman.h>
# include <asm/pgalloc.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
# include <asm/mmu_context.h>
2006-03-20 12:17:17 +03:00
/* Slightly simplified from the non-hugepage variant because by
* definition we don ' t have to worry about any page coloring stuff
*/
static unsigned long hugetlb_get_unmapped_area_bottomup ( struct file * filp ,
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
unsigned long task_size = TASK_SIZE ;
2012-12-12 04:02:25 +04:00
struct vm_unmapped_area_info info ;
2006-03-20 12:17:17 +03:00
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
2012-12-12 04:02:25 +04:00
info . flags = 0 ;
info . length = len ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = min ( task_size , VA_EXCLUDE_START ) ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
if ( ( addr & ~ PAGE_MASK ) & & task_size > VA_EXCLUDE_END ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . low_limit = VA_EXCLUDE_END ;
info . high_limit = task_size ;
addr = vm_unmapped_area ( & info ) ;
2006-03-20 12:17:17 +03:00
}
2012-12-12 04:02:25 +04:00
return addr ;
2006-03-20 12:17:17 +03:00
}
static unsigned long
hugetlb_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len ,
const unsigned long pgoff ,
const unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
2012-12-12 04:02:25 +04:00
struct vm_unmapped_area_info info ;
2006-03-20 12:17:17 +03:00
/* This should only ever run for 32-bit processes. */
BUG_ON ( ! test_thread_flag ( TIF_32BIT ) ) ;
2012-12-12 04:02:25 +04:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = mm - > mmap_base ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
2006-03-20 12:17:17 +03:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-12 04:02:25 +04:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = STACK_TOP32 ;
addr = vm_unmapped_area ( & info ) ;
}
2006-03-20 12:17:17 +03:00
return addr ;
}
unsigned long
hugetlb_get_unmapped_area ( struct file * file , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long task_size = TASK_SIZE ;
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
if ( len & ~ HPAGE_MASK )
return - EINVAL ;
if ( len > task_size )
return - ENOMEM ;
2007-05-07 01:50:10 +04:00
if ( flags & MAP_FIXED ) {
2008-07-24 08:27:41 +04:00
if ( prepare_hugepage_range ( file , addr , len ) )
2007-05-07 01:50:10 +04:00
return - EINVAL ;
return addr ;
}
2006-03-20 12:17:17 +03:00
if ( addr ) {
addr = ALIGN ( addr , HPAGE_SIZE ) ;
vma = find_vma ( mm , addr ) ;
if ( task_size - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
if ( mm - > get_unmapped_area = = arch_get_unmapped_area )
return hugetlb_get_unmapped_area_bottomup ( file , addr , len ,
pgoff , flags ) ;
else
return hugetlb_get_unmapped_area_topdown ( file , addr , len ,
pgoff , flags ) ;
}
2008-07-24 08:27:41 +04:00
pte_t * huge_pte_alloc ( struct mm_struct * mm ,
unsigned long addr , unsigned long sz )
2005-04-17 02:20:36 +04:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-31 12:36:25 +04:00
/* We must align the address, because our caller will run
* set_huge_pte_at ( ) on whatever we return , which writes out
* all of the sub - ptes for the hugepage range . So we have
* to give it the first such sub - pte .
*/
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-22 11:49:59 +03:00
pud = pud_alloc ( mm , pgd , addr ) ;
if ( pud ) {
pmd = pmd_alloc ( mm , pud , addr ) ;
if ( pmd )
2011-01-14 02:46:43 +03:00
pte = pte_alloc_map ( mm , NULL , pmd , addr ) ;
2005-04-17 02:20:36 +04:00
}
return pte ;
}
2005-06-22 04:14:44 +04:00
pte_t * huge_pte_offset ( struct mm_struct * mm , unsigned long addr )
2005-04-17 02:20:36 +04:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-20 12:17:17 +03:00
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pgd_none ( * pgd ) ) {
2005-04-17 02:20:36 +04:00
pud = pud_offset ( pgd , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pud_none ( * pud ) ) {
2005-04-17 02:20:36 +04:00
pmd = pmd_offset ( pud , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pmd_none ( * pmd ) )
2005-04-17 02:20:36 +04:00
pte = pte_offset_map ( pmd , addr ) ;
}
}
return pte ;
}
2006-12-07 07:32:03 +03:00
int huge_pmd_unshare ( struct mm_struct * mm , unsigned long * addr , pte_t * ptep )
{
return 0 ;
}
2005-06-22 04:14:44 +04:00
void set_huge_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t entry )
2005-04-17 02:20:36 +04:00
{
2005-06-22 04:14:44 +04:00
int i ;
2006-03-22 11:49:59 +03:00
if ( ! pte_present ( * ptep ) & & pte_present ( entry ) )
mm - > context . huge_pte_count + + ;
2007-03-13 08:55:39 +03:00
addr & = HPAGE_MASK ;
2005-06-22 04:14:44 +04:00
for ( i = 0 ; i < ( 1 < < HUGETLB_PAGE_ORDER ) ; i + + ) {
set_pte_at ( mm , addr , ptep , entry ) ;
ptep + + ;
addr + = PAGE_SIZE ;
pte_val ( entry ) + = PAGE_SIZE ;
}
}
2005-04-17 02:20:36 +04:00
2005-06-22 04:14:44 +04:00
pte_t huge_ptep_get_and_clear ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep )
{
pte_t entry ;
int i ;
2005-04-17 02:20:36 +04:00
2005-06-22 04:14:44 +04:00
entry = * ptep ;
2006-03-22 11:49:59 +03:00
if ( pte_present ( entry ) )
mm - > context . huge_pte_count - - ;
2005-04-17 02:20:36 +04:00
2007-03-13 08:55:39 +03:00
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ( 1 < < HUGETLB_PAGE_ORDER ) ; i + + ) {
2005-06-22 04:14:44 +04:00
pte_clear ( mm , addr , ptep ) ;
2005-04-17 02:20:36 +04:00
addr + = PAGE_SIZE ;
2005-06-22 04:14:44 +04:00
ptep + + ;
2005-04-17 02:20:36 +04:00
}
2005-06-22 04:14:44 +04:00
return entry ;
2005-04-17 02:20:36 +04:00
}
struct page * follow_huge_addr ( struct mm_struct * mm ,
unsigned long address , int write )
{
return ERR_PTR ( - EINVAL ) ;
}
int pmd_huge ( pmd_t pmd )
{
return 0 ;
}
2008-07-24 08:27:50 +04:00
int pud_huge ( pud_t pud )
{
return 0 ;
}
2013-09-12 01:22:11 +04:00
int pmd_huge_support ( void )
{
return 0 ;
}
2005-04-17 02:20:36 +04:00
struct page * follow_huge_pmd ( struct mm_struct * mm , unsigned long address ,
pmd_t * pmd , int write )
{
return NULL ;
}