2007-10-16 12:24:13 +04:00
/*
* Virtual Memory Map support
*
* ( C ) 2007 sgi . Christoph Lameter < clameter @ sgi . com > .
*
* Virtual memory maps allow VM primitives pfn_to_page , page_to_pfn ,
* virt_to_page , page_address ( ) to be implemented as a base offset
* calculation without memory access .
*
* However , virtual mappings need a page table and TLBs . Many Linux
* architectures already map their physical space using 1 - 1 mappings
* via TLBs . For those arches the virtual memmory map is essentially
* for free if we use the same page size as the 1 - 1 mappings . In that
* case the overhead consists of a few additional pages that are
* allocated to create a view of memory for vmemmap .
*
2007-10-16 12:24:14 +04:00
* The architecture is expected to provide a vmemmap_populate ( ) function
* to instantiate the mapping .
2007-10-16 12:24:13 +04:00
*/
# include <linux/mm.h>
# include <linux/mmzone.h>
# include <linux/bootmem.h>
# include <linux/highmem.h>
# include <linux/module.h>
# include <linux/spinlock.h>
# include <linux/vmalloc.h>
# include <asm/dma.h>
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
/*
* Allocate a block of memory to be used to back the virtual memory map
* or to back the page tables that are used to create the mapping .
* Uses the main allocators if they are available , else bootmem .
*/
void * __meminit vmemmap_alloc_block ( unsigned long size , int node )
{
/* If the main allocator is up use that, fallback to bootmem. */
if ( slab_is_available ( ) ) {
struct page * page = alloc_pages_node ( node ,
GFP_KERNEL | __GFP_ZERO , get_order ( size ) ) ;
if ( page )
return page_address ( page ) ;
return NULL ;
} else
return __alloc_bootmem_node ( NODE_DATA ( node ) , size , size ,
__pa ( MAX_DMA_ADDRESS ) ) ;
}
void __meminit vmemmap_verify ( pte_t * pte , int node ,
unsigned long start , unsigned long end )
{
unsigned long pfn = pte_pfn ( * pte ) ;
int actual_node = early_pfn_to_nid ( pfn ) ;
if ( actual_node ! = node )
printk ( KERN_WARNING " [%lx-%lx] potential offnode "
" page_structs \n " , start , end - 1 ) ;
}
2007-10-16 12:24:14 +04:00
pte_t * __meminit vmemmap_pte_populate ( pmd_t * pmd , unsigned long addr , int node )
2007-10-16 12:24:13 +04:00
{
2007-10-16 12:24:14 +04:00
pte_t * pte = pte_offset_kernel ( pmd , addr ) ;
if ( pte_none ( * pte ) ) {
pte_t entry ;
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
entry = pfn_pte ( __pa ( p ) > > PAGE_SHIFT , PAGE_KERNEL ) ;
set_pte_at ( & init_mm , addr , pte , entry ) ;
}
return pte ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
pmd_t * __meminit vmemmap_pmd_populate ( pud_t * pud , unsigned long addr , int node )
2007-10-16 12:24:13 +04:00
{
2007-10-16 12:24:14 +04:00
pmd_t * pmd = pmd_offset ( pud , addr ) ;
if ( pmd_none ( * pmd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pmd_populate_kernel ( & init_mm , pmd , p ) ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
return pmd ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
pud_t * __meminit vmemmap_pud_populate ( pgd_t * pgd , unsigned long addr , int node )
2007-10-16 12:24:13 +04:00
{
2007-10-16 12:24:14 +04:00
pud_t * pud = pud_offset ( pgd , addr ) ;
if ( pud_none ( * pud ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pud_populate ( & init_mm , pud , p ) ;
}
return pud ;
}
2007-10-16 12:24:13 +04:00
2007-10-16 12:24:14 +04:00
pgd_t * __meminit vmemmap_pgd_populate ( unsigned long addr , int node )
{
pgd_t * pgd = pgd_offset_k ( addr ) ;
if ( pgd_none ( * pgd ) ) {
void * p = vmemmap_alloc_block ( PAGE_SIZE , node ) ;
if ( ! p )
return 0 ;
pgd_populate ( & init_mm , pgd , p ) ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
return pgd ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
int __meminit vmemmap_populate_basepages ( struct page * start_page ,
unsigned long size , int node )
2007-10-16 12:24:13 +04:00
{
unsigned long addr = ( unsigned long ) start_page ;
2007-10-16 12:24:14 +04:00
unsigned long end = ( unsigned long ) ( start_page + size ) ;
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
2007-10-16 12:24:13 +04:00
2007-10-16 12:24:14 +04:00
for ( ; addr < end ; addr + = PAGE_SIZE ) {
pgd = vmemmap_pgd_populate ( addr , node ) ;
if ( ! pgd )
return - ENOMEM ;
pud = vmemmap_pud_populate ( pgd , addr , node ) ;
if ( ! pud )
return - ENOMEM ;
pmd = vmemmap_pmd_populate ( pud , addr , node ) ;
if ( ! pmd )
return - ENOMEM ;
pte = vmemmap_pte_populate ( pmd , addr , node ) ;
if ( ! pte )
return - ENOMEM ;
vmemmap_verify ( pte , node , addr , addr + PAGE_SIZE ) ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:24:14 +04:00
return 0 ;
2007-10-16 12:24:13 +04:00
}
2007-10-16 12:26:14 +04:00
struct page * __meminit sparse_mem_map_populate ( unsigned long pnum , int nid )
2007-10-16 12:24:13 +04:00
{
struct page * map = pfn_to_page ( pnum * PAGES_PER_SECTION ) ;
int error = vmemmap_populate ( map , PAGES_PER_SECTION , nid ) ;
if ( error )
return NULL ;
return map ;
}