2005-04-17 02:20:36 +04:00
/*
* SPARC64 Huge TLB page support .
*
2006-03-20 12:17:17 +03:00
* Copyright ( C ) 2002 , 2003 , 2006 David S . Miller ( davem @ davemloft . net )
2005-04-17 02:20:36 +04:00
*/
# include <linux/init.h>
# include <linux/module.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/hugetlb.h>
# include <linux/pagemap.h>
# include <linux/slab.h>
# include <linux/sysctl.h>
# include <asm/mman.h>
# include <asm/pgalloc.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
# include <asm/cacheflush.h>
# include <asm/mmu_context.h>
2006-03-20 12:17:17 +03:00
/* Slightly simplified from the non-hugepage variant because by
* definition we don ' t have to worry about any page coloring stuff
*/
# define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
# define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
static unsigned long hugetlb_get_unmapped_area_bottomup ( struct file * filp ,
unsigned long addr ,
unsigned long len ,
unsigned long pgoff ,
unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long task_size = TASK_SIZE ;
unsigned long start_addr ;
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
if ( unlikely ( len > = VA_EXCLUDE_START ) )
return - ENOMEM ;
if ( len > mm - > cached_hole_size ) {
start_addr = addr = mm - > free_area_cache ;
} else {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
}
task_size - = len ;
full_search :
addr = ALIGN ( addr , HPAGE_SIZE ) ;
for ( vma = find_vma ( mm , addr ) ; ; vma = vma - > vm_next ) {
/* At this point: (!vma || addr < vma->vm_end). */
if ( addr < VA_EXCLUDE_START & &
( addr + len ) > = VA_EXCLUDE_START ) {
addr = VA_EXCLUDE_END ;
vma = find_vma ( mm , VA_EXCLUDE_END ) ;
}
if ( unlikely ( task_size < addr ) ) {
if ( start_addr ! = TASK_UNMAPPED_BASE ) {
start_addr = addr = TASK_UNMAPPED_BASE ;
mm - > cached_hole_size = 0 ;
goto full_search ;
}
return - ENOMEM ;
}
if ( likely ( ! vma | | addr + len < = vma - > vm_start ) ) {
/*
* Remember the place where we stopped the search :
*/
mm - > free_area_cache = addr + len ;
return addr ;
}
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
addr = ALIGN ( vma - > vm_end , HPAGE_SIZE ) ;
}
}
static unsigned long
hugetlb_get_unmapped_area_topdown ( struct file * filp , const unsigned long addr0 ,
const unsigned long len ,
const unsigned long pgoff ,
const unsigned long flags )
{
struct vm_area_struct * vma ;
struct mm_struct * mm = current - > mm ;
unsigned long addr = addr0 ;
/* This should only ever run for 32-bit processes. */
BUG_ON ( ! test_thread_flag ( TIF_32BIT ) ) ;
/* check if free_area_cache is useful for us */
if ( len < = mm - > cached_hole_size ) {
mm - > cached_hole_size = 0 ;
mm - > free_area_cache = mm - > mmap_base ;
}
/* either no address requested or can't fit in requested address hole */
addr = mm - > free_area_cache & HPAGE_MASK ;
/* make sure it can fit in the remaining address space */
if ( likely ( addr > len ) ) {
vma = find_vma ( mm , addr - len ) ;
if ( ! vma | | addr < = vma - > vm_start ) {
/* remember the address as a hint for next time */
return ( mm - > free_area_cache = addr - len ) ;
}
}
if ( unlikely ( mm - > mmap_base < len ) )
goto bottomup ;
addr = ( mm - > mmap_base - len ) & HPAGE_MASK ;
do {
/*
* Lookup failure means no vma is above this address ,
* else if new region fits below vma - > vm_start ,
* return with success :
*/
vma = find_vma ( mm , addr ) ;
if ( likely ( ! vma | | addr + len < = vma - > vm_start ) ) {
/* remember the address as a hint for next time */
return ( mm - > free_area_cache = addr ) ;
}
/* remember the largest hole we saw so far */
if ( addr + mm - > cached_hole_size < vma - > vm_start )
mm - > cached_hole_size = vma - > vm_start - addr ;
/* try just below the current vma->vm_start */
addr = ( vma - > vm_start - len ) & HPAGE_MASK ;
} while ( likely ( len < vma - > vm_start ) ) ;
bottomup :
/*
* A failed mmap ( ) very likely causes application failure ,
* so fall back to the bottom - up function here . This scenario
* can happen with large stack limits and large mmap ( )
* allocations .
*/
mm - > cached_hole_size = ~ 0UL ;
mm - > free_area_cache = TASK_UNMAPPED_BASE ;
addr = arch_get_unmapped_area ( filp , addr0 , len , pgoff , flags ) ;
/*
* Restore the topdown base :
*/
mm - > free_area_cache = mm - > mmap_base ;
mm - > cached_hole_size = ~ 0UL ;
return addr ;
}
unsigned long
hugetlb_get_unmapped_area ( struct file * file , unsigned long addr ,
unsigned long len , unsigned long pgoff , unsigned long flags )
{
struct mm_struct * mm = current - > mm ;
struct vm_area_struct * vma ;
unsigned long task_size = TASK_SIZE ;
if ( test_thread_flag ( TIF_32BIT ) )
task_size = STACK_TOP32 ;
if ( len & ~ HPAGE_MASK )
return - EINVAL ;
if ( len > task_size )
return - ENOMEM ;
2007-05-07 01:50:10 +04:00
if ( flags & MAP_FIXED ) {
2007-08-31 10:56:40 +04:00
if ( prepare_hugepage_range ( addr , len ) )
2007-05-07 01:50:10 +04:00
return - EINVAL ;
return addr ;
}
2006-03-20 12:17:17 +03:00
if ( addr ) {
addr = ALIGN ( addr , HPAGE_SIZE ) ;
vma = find_vma ( mm , addr ) ;
if ( task_size - len > = addr & &
( ! vma | | addr + len < = vma - > vm_start ) )
return addr ;
}
if ( mm - > get_unmapped_area = = arch_get_unmapped_area )
return hugetlb_get_unmapped_area_bottomup ( file , addr , len ,
pgoff , flags ) ;
else
return hugetlb_get_unmapped_area_topdown ( file , addr , len ,
pgoff , flags ) ;
}
2005-06-22 04:14:44 +04:00
pte_t * huge_pte_alloc ( struct mm_struct * mm , unsigned long addr )
2005-04-17 02:20:36 +04:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-31 12:36:25 +04:00
/* We must align the address, because our caller will run
* set_huge_pte_at ( ) on whatever we return , which writes out
* all of the sub - ptes for the hugepage range . So we have
* to give it the first such sub - pte .
*/
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-22 11:49:59 +03:00
pud = pud_alloc ( mm , pgd , addr ) ;
if ( pud ) {
pmd = pmd_alloc ( mm , pud , addr ) ;
if ( pmd )
pte = pte_alloc_map ( mm , pmd , addr ) ;
2005-04-17 02:20:36 +04:00
}
return pte ;
}
2005-06-22 04:14:44 +04:00
pte_t * huge_pte_offset ( struct mm_struct * mm , unsigned long addr )
2005-04-17 02:20:36 +04:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte = NULL ;
2006-03-20 12:17:17 +03:00
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
pgd = pgd_offset ( mm , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pgd_none ( * pgd ) ) {
2005-04-17 02:20:36 +04:00
pud = pud_offset ( pgd , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pud_none ( * pud ) ) {
2005-04-17 02:20:36 +04:00
pmd = pmd_offset ( pud , addr ) ;
2006-03-20 12:17:17 +03:00
if ( ! pmd_none ( * pmd ) )
2005-04-17 02:20:36 +04:00
pte = pte_offset_map ( pmd , addr ) ;
}
}
return pte ;
}
2006-12-07 07:32:03 +03:00
int huge_pmd_unshare ( struct mm_struct * mm , unsigned long * addr , pte_t * ptep )
{
return 0 ;
}
2005-06-22 04:14:44 +04:00
void set_huge_pte_at ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep , pte_t entry )
2005-04-17 02:20:36 +04:00
{
2005-06-22 04:14:44 +04:00
int i ;
2006-03-22 11:49:59 +03:00
if ( ! pte_present ( * ptep ) & & pte_present ( entry ) )
mm - > context . huge_pte_count + + ;
2007-03-13 08:55:39 +03:00
addr & = HPAGE_MASK ;
2005-06-22 04:14:44 +04:00
for ( i = 0 ; i < ( 1 < < HUGETLB_PAGE_ORDER ) ; i + + ) {
set_pte_at ( mm , addr , ptep , entry ) ;
ptep + + ;
addr + = PAGE_SIZE ;
pte_val ( entry ) + = PAGE_SIZE ;
}
}
2005-04-17 02:20:36 +04:00
2005-06-22 04:14:44 +04:00
pte_t huge_ptep_get_and_clear ( struct mm_struct * mm , unsigned long addr ,
pte_t * ptep )
{
pte_t entry ;
int i ;
2005-04-17 02:20:36 +04:00
2005-06-22 04:14:44 +04:00
entry = * ptep ;
2006-03-22 11:49:59 +03:00
if ( pte_present ( entry ) )
mm - > context . huge_pte_count - - ;
2005-04-17 02:20:36 +04:00
2007-03-13 08:55:39 +03:00
addr & = HPAGE_MASK ;
2005-04-17 02:20:36 +04:00
for ( i = 0 ; i < ( 1 < < HUGETLB_PAGE_ORDER ) ; i + + ) {
2005-06-22 04:14:44 +04:00
pte_clear ( mm , addr , ptep ) ;
2005-04-17 02:20:36 +04:00
addr + = PAGE_SIZE ;
2005-06-22 04:14:44 +04:00
ptep + + ;
2005-04-17 02:20:36 +04:00
}
2005-06-22 04:14:44 +04:00
return entry ;
2005-04-17 02:20:36 +04:00
}
struct page * follow_huge_addr ( struct mm_struct * mm ,
unsigned long address , int write )
{
return ERR_PTR ( - EINVAL ) ;
}
int pmd_huge ( pmd_t pmd )
{
return 0 ;
}
struct page * follow_huge_pmd ( struct mm_struct * mm , unsigned long address ,
pmd_t * pmd , int write )
{
return NULL ;
}
static void context_reload ( void * __data )
{
struct mm_struct * mm = __data ;
if ( mm = = current - > mm )
load_secondary_context ( mm ) ;
}
2005-06-22 04:14:44 +04:00
void hugetlb_prefault_arch_hook ( struct mm_struct * mm )
2005-04-17 02:20:36 +04:00
{
2006-03-22 11:49:59 +03:00
struct tsb_config * tp = & mm - > context . tsb_block [ MM_TSB_HUGE ] ;
if ( likely ( tp - > tsb ! = NULL ) )
return ;
tsb_grow ( mm , MM_TSB_HUGE , 0 ) ;
tsb_context_switch ( mm ) ;
smp_tsb_sync ( mm ) ;
2005-04-17 02:20:36 +04:00
/* On UltraSPARC-III+ and later, configure the second half of
* the Data - TLB for huge pages .
*/
if ( tlb_type = = cheetah_plus ) {
unsigned long ctx ;
spin_lock ( & ctx_alloc_lock ) ;
ctx = mm - > context . sparc64_ctx_val ;
ctx & = ~ CTX_PGSZ_MASK ;
ctx | = CTX_PGSZ_BASE < < CTX_PGSZ0_SHIFT ;
ctx | = CTX_PGSZ_HUGE < < CTX_PGSZ1_SHIFT ;
if ( ctx ! = mm - > context . sparc64_ctx_val ) {
/* When changing the page size fields, we
* must perform a context flush so that no
* stale entries match . This flush must
* occur with the original context register
* settings .
*/
do_flush_tlb_mm ( mm ) ;
/* Reload the context register of all processors
* also executing in this address space .
*/
mm - > context . sparc64_ctx_val = ctx ;
2008-05-09 11:39:44 +04:00
on_each_cpu ( context_reload , mm , 0 ) ;
2005-04-17 02:20:36 +04:00
}
spin_unlock ( & ctx_alloc_lock ) ;
}
}