2005-04-16 15:20:36 -07:00
/*
* SPARC64 Huge TLB page support .
*
2006-03-20 01:17:17 -08:00
* Copyright ( C ) 2002 , 2003 , 2006 David S . Miller ( davem @ davemloft . net )
2005-04-16 15:20:36 -07:00
*/
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/hugetlb.h>
# include <linux/pagemap.h>
# include <linux/sysctl.h>
# include <asm/mman.h>
# include <asm/pgalloc.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
# include <asm/mmu_context.h>
2006-03-20 01:17:17 -08:00
/* Slightly simplified from the non-hugepage variant because by
* definition we don ' t have to worry about any page coloring stuff
*/
static unsigned long hugetlb_get_unmapped_area_bottomup ( struct file * filp ,
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
unsigned long task_size = TASK_SIZE ;
2012-12-11 16:02:25 -08:00
struct vm_unmapped_area_info info ;
2006-03-20 01:17:17 -08:00
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
2012-12-11 16:02:25 -08:00
info . flags = 0 ;
info . length = len ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = min ( task_size , VA_EXCLUDE_START ) ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
if ( ( addr & ~ PAGE_MASK ) & & task_size > VA_EXCLUDE_END ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . low_limit = VA_EXCLUDE_END ;
info . high_limit = task_size ;
addr = vm_unmapped_area ( & info ) ;
2006-03-20 01:17:17 -08:00
}
2012-12-11 16:02:25 -08:00
return addr ;
2006-03-20 01:17:17 -08:00
}
static unsigned long
hugetlb_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len ,
const unsigned long pgoff ,
const unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
2012-12-11 16:02:25 -08:00
struct vm_unmapped_area_info info ;
2006-03-20 01:17:17 -08:00
/* This should only ever run for 32-bit processes. */
BUG_ON ( ! test_thread_flag ( TIF_32BIT ) ) ;
2012-12-11 16:02:25 -08:00
info . flags = VM_UNMAPPED_AREA_TOPDOWN ;
info . length = len ;
info . low_limit = PAGE_SIZE ;
info . high_limit = mm - > mmap_base ;
info . align_mask = PAGE_MASK & ~ HPAGE_MASK ;
info . align_offset = 0 ;
addr = vm_unmapped_area ( & info ) ;
2006-03-20 01:17:17 -08:00
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
2012-12-11 16:02:25 -08:00
if ( addr & ~ PAGE_MASK ) {
VM_BUG_ON ( addr ! = - ENOMEM ) ;
info . flags = 0 ;
info . low_limit = TASK_UNMAPPED_BASE ;
info . high_limit = STACK_TOP32 ;
addr = vm_unmapped_area ( & info ) ;
}
2006-03-20 01:17:17 -08:00
return addr ;
}
unsigned long
hugetlb_get_unmapped_area ( struct file * file , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long task_size = TASK_SIZE ;
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
if ( len & ~ HPAGE_MASK )
return - EINVAL ;
if ( len > task_size )
return - ENOMEM ;
2007-05-06 14:50:10 -07:00
if ( flags & MAP_FIXED ) {
2008-07-23 21:27:41 -07:00
if ( prepare_hugepage_range ( file , addr , len ) )
2007-05-06 14:50:10 -07:00
return - EINVAL ;
return addr ;
}
2006-03-20 01:17:17 -08:00
if ( addr ) {
addr = ALIGN ( addr , HPAGE_SIZE ) ;
vma = find_vma ( mm , addr ) ;
if ( task_size - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
if ( mm - > get_unmapped_area = = arch_get_unmapped_area )
return hugetlb_get_unmapped_area_bottomup ( file , addr , len ,
pgoff , flags ) ;
else
return hugetlb_get_unmapped_area_topdown ( file , addr , len ,
pgoff , flags ) ;
}
2008-07-23 21:27:41 -07:00
pte_t * huge_pte_alloc ( struct mm_struct * mm ,
unsigned long addr , unsigned long sz )
2005-04-16 15:20:36 -07:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-31 00:36:25 -08:00
/* We must align the address, because our caller will run
* set_huge_pte_at ( ) on whatever we return , which writes out
* all of the sub - ptes for the hugepage range . So we have
* to give it the first such sub - pte .
*/
addr & = HPAGE_MASK ;
2005-04-16 15:20:36 -07:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-22 00:49:59 -08:00
pud = pud_alloc ( mm , pgd , addr ) ;
if ( pud ) {
pmd = pmd_alloc ( mm , pud , addr ) ;
if ( pmd )
2016-03-17 14:19:11 -07:00
pte = pte_alloc_map ( mm , pmd , addr ) ;
2005-04-16 15:20:36 -07:00
}
return pte ;
}
2005-06-21 17:14:44 -07:00
pte_t * huge_pte_offset ( struct mm_struct * mm , unsigned long addr )
2005-04-16 15:20:36 -07:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-20 01:17:17 -08:00
addr & = HPAGE_MASK ;
2005-04-16 15:20:36 -07:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-20 01:17:17 -08:00
if ( ! pgd_none ( * pgd ) ) {
2005-04-16 15:20:36 -07:00
pud = pud_offset ( pgd , addr ) ;
2006-03-20 01:17:17 -08:00
if ( ! pud_none ( * pud ) ) {
2005-04-16 15:20:36 -07:00
pmd = pmd_offset ( pud , addr ) ;
2006-03-20 01:17:17 -08:00
if ( ! pmd_none ( * pmd ) )
2005-04-16 15:20:36 -07:00
pte = pte_offset_map ( pmd , addr ) ;
}
}
return pte ;
}
2005-06-21 17:14:44 -07:00
void set_huge_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t entry )
2005-04-16 15:20:36 -07:00
{
2005-06-21 17:14:44 -07:00
int i ;
2016-03-30 11:17:13 -07:00
pte_t orig [ 2 ] ;
unsigned long nptes ;
2005-06-21 17:14:44 -07:00
2006-03-22 00:49:59 -08:00
if ( ! pte_present ( * ptep ) & & pte_present ( entry ) )
mm - > context . huge_pte_count + + ;
2007-03-12 22:55:39 -07:00
addr & = HPAGE_MASK ;
2016-03-30 11:17:13 -07:00
nptes = 1 < < HUGETLB_PAGE_ORDER ;
orig [ 0 ] = * ptep ;
orig [ 1 ] = * ( ptep + nptes / 2 ) ;
for ( i = 0 ; i < nptes ; i + + ) {
* ptep = entry ;
2005-06-21 17:14:44 -07:00
ptep + + ;
addr + = PAGE_SIZE ;
pte_val ( entry ) + = PAGE_SIZE ;
}
2016-03-30 11:17:13 -07:00
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr - = REAL_HPAGE_SIZE ;
ptep - = nptes / 2 ;
maybe_tlb_batch_add ( mm , addr , ptep , orig [ 1 ] , 0 ) ;
addr - = REAL_HPAGE_SIZE ;
ptep - = nptes / 2 ;
maybe_tlb_batch_add ( mm , addr , ptep , orig [ 0 ] , 0 ) ;
2005-06-21 17:14:44 -07:00
}
2005-04-16 15:20:36 -07:00
2005-06-21 17:14:44 -07:00
pte_t huge_ptep_get_and_clear ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep )
{
pte_t entry ;
int i ;
2016-03-30 11:17:13 -07:00
unsigned long nptes ;
2005-04-16 15:20:36 -07:00
2005-06-21 17:14:44 -07:00
entry = * ptep ;
2006-03-22 00:49:59 -08:00
if ( pte_present ( entry ) )
mm - > context . huge_pte_count - - ;
2005-04-16 15:20:36 -07:00
2007-03-12 22:55:39 -07:00
addr & = HPAGE_MASK ;
2016-03-30 11:17:13 -07:00
nptes = 1 < < HUGETLB_PAGE_ORDER ;
for ( i = 0 ; i < nptes ; i + + ) {
* ptep = __pte ( 0UL ) ;
2005-04-16 15:20:36 -07:00
addr + = PAGE_SIZE ;
2005-06-21 17:14:44 -07:00
ptep + + ;
2005-04-16 15:20:36 -07:00
}
2005-06-21 17:14:44 -07:00
2016-03-30 11:17:13 -07:00
/* Issue TLB flush at REAL_HPAGE_SIZE boundaries */
addr - = REAL_HPAGE_SIZE ;
ptep - = nptes / 2 ;
maybe_tlb_batch_add ( mm , addr , ptep , entry , 0 ) ;
addr - = REAL_HPAGE_SIZE ;
ptep - = nptes / 2 ;
maybe_tlb_batch_add ( mm , addr , ptep , entry , 0 ) ;
2005-06-21 17:14:44 -07:00
return entry ;
2005-04-16 15:20:36 -07:00
}
int pmd_huge ( pmd_t pmd )
{
return 0 ;
}
2008-07-23 21:27:50 -07:00
int pud_huge ( pud_t pud )
{
return 0 ;
}