2007-03-28 16:38:13 +09:00
/*
* linux / arch / sh / mm / init . c
2005-04-16 15:20:36 -07:00
*
* Copyright ( C ) 1999 Niibe Yutaka
2007-03-28 16:38:13 +09:00
* Copyright ( C ) 2002 - 2007 Paul Mundt
2005-04-16 15:20:36 -07:00
*
* Based on linux / arch / i386 / mm / init . c :
* Copyright ( C ) 1995 Linus Torvalds
*/
# include <linux/mm.h>
# include <linux/swap.h>
# include <linux/init.h>
# include <linux/bootmem.h>
2006-09-27 18:20:58 +09:00
# include <linux/proc_fs.h>
2007-05-14 10:48:01 +09:00
# include <linux/pagemap.h>
2007-03-28 16:38:13 +09:00
# include <linux/percpu.h>
# include <linux/io.h>
2005-04-16 15:20:36 -07:00
# include <asm/mmu_context.h>
# include <asm/tlb.h>
# include <asm/cacheflush.h>
2007-06-06 12:23:06 +09:00
# include <asm/sections.h>
2005-04-16 15:20:36 -07:00
# include <asm/cache.h>
DEFINE_PER_CPU ( struct mmu_gather , mmu_gathers ) ;
pgd_t swapper_pg_dir [ PTRS_PER_PGD ] ;
2008-09-05 16:06:42 +09:00
# ifdef CONFIG_SUPERH32
/*
* Handle trivial transitions between cached and uncached
* segments , making use of the 1 : 1 mapping relationship in
* 512 MB lowmem .
*
* This is the offset of the uncached section from its cached alias .
* Default value only valid in 29 bit mode , in 32 bit mode will be
* overridden in pmb_init .
*/
unsigned long cached_to_uncached = P2SEG - P1SEG ;
# endif
2005-04-16 15:20:36 -07:00
2006-12-07 18:07:27 +09:00
# ifdef CONFIG_MMU
2005-04-16 15:20:36 -07:00
static void set_pte_phys ( unsigned long addr , unsigned long phys , pgprot_t prot )
{
pgd_t * pgd ;
2006-09-27 15:13:36 +09:00
pud_t * pud ;
2005-04-16 15:20:36 -07:00
pmd_t * pmd ;
pte_t * pte ;
2006-11-21 15:38:05 +09:00
pgd = pgd_offset_k ( addr ) ;
2005-04-16 15:20:36 -07:00
if ( pgd_none ( * pgd ) ) {
pgd_ERROR ( * pgd ) ;
return ;
}
2006-11-21 15:38:05 +09:00
pud = pud_alloc ( NULL , pgd , addr ) ;
if ( unlikely ( ! pud ) ) {
pud_ERROR ( * pud ) ;
return ;
2006-09-27 15:13:36 +09:00
}
2006-11-21 15:38:05 +09:00
pmd = pmd_alloc ( NULL , pud , addr ) ;
if ( unlikely ( ! pmd ) ) {
pmd_ERROR ( * pmd ) ;
return ;
2005-04-16 15:20:36 -07:00
}
pte = pte_offset_kernel ( pmd , addr ) ;
if ( ! pte_none ( * pte ) ) {
pte_ERROR ( * pte ) ;
return ;
}
set_pte ( pte , pfn_pte ( phys > > PAGE_SHIFT , prot ) ) ;
2009-06-19 15:37:11 +09:00
local_flush_tlb_one ( get_asid ( ) , addr ) ;
2005-04-16 15:20:36 -07:00
}
/*
* As a performance optimization , other platforms preserve the fixmap mapping
* across a context switch , we don ' t presently do this , but this could be done
* in a similar fashion as to the wired TLB interface that sh64 uses ( by way
2007-05-14 08:15:10 +09:00
* of the memory mapped UTLB configuration ) - - this unfortunately forces us to
2005-04-16 15:20:36 -07:00
* give up a TLB entry for each mapping we want to preserve . While this may be
* viable for a small number of fixmaps , it ' s not particularly useful for
* everything and needs to be carefully evaluated . ( ie , we may want this for
* the vsyscall page ) .
*
* XXX : Perhaps add a _PAGE_WIRED flag or something similar that we can pass
* in at __set_fixmap ( ) time to determine the appropriate behavior to follow .
*
* - - PFM .
*/
void __set_fixmap ( enum fixed_addresses idx , unsigned long phys , pgprot_t prot )
{
unsigned long address = __fix_to_virt ( idx ) ;
if ( idx > = __end_of_fixed_addresses ) {
BUG ( ) ;
return ;
}
set_pte_phys ( address , phys , prot ) ;
}
2007-11-30 17:59:55 +09:00
void __init page_table_range_init ( unsigned long start , unsigned long end ,
pgd_t * pgd_base )
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
int pgd_idx ;
unsigned long vaddr ;
vaddr = start & PMD_MASK ;
end = ( end + PMD_SIZE - 1 ) & PMD_MASK ;
pgd_idx = pgd_index ( vaddr ) ;
pgd = pgd_base + pgd_idx ;
for ( ; ( pgd_idx < PTRS_PER_PGD ) & & ( vaddr ! = end ) ; pgd + + , pgd_idx + + ) {
BUG_ON ( pgd_none ( * pgd ) ) ;
pud = pud_offset ( pgd , 0 ) ;
BUG_ON ( pud_none ( * pud ) ) ;
pmd = pmd_offset ( pud , 0 ) ;
if ( ! pmd_present ( * pmd ) ) {
pte_t * pte_table ;
pte_table = ( pte_t * ) alloc_bootmem_low_pages ( PAGE_SIZE ) ;
pmd_populate_kernel ( & init_mm , pmd , pte_table ) ;
}
vaddr + = PMD_SIZE ;
}
}
2006-12-07 18:07:27 +09:00
# endif /* CONFIG_MMU */
2005-04-16 15:20:36 -07:00
/*
* paging_init ( ) sets up the page tables
*/
void __init paging_init ( void )
{
2007-06-06 12:09:54 +09:00
unsigned long max_zone_pfns [ MAX_NR_ZONES ] ;
2008-11-10 20:00:45 +09:00
unsigned long vaddr ;
2007-03-28 16:38:13 +09:00
int nid ;
2005-04-16 15:20:36 -07:00
2007-03-28 16:38:13 +09:00
/* We don't need to map the kernel through the TLB, as
* it is permanatly mapped using P1 . So clear the
* entire pgd . */
memset ( swapper_pg_dir , 0 , sizeof ( swapper_pg_dir ) ) ;
2005-04-16 15:20:36 -07:00
2006-11-21 13:53:44 +09:00
/* Set an initial value for the MMU.TTB so we don't have to
* check for a null value . */
set_TTB ( swapper_pg_dir ) ;
2008-11-10 20:00:45 +09:00
/*
* Populate the relevant portions of swapper_pg_dir so that
2007-11-30 17:59:55 +09:00
* we can use the fixmap entries without calling kmalloc .
2008-11-10 20:00:45 +09:00
* pte ' s will be filled in by __set_fixmap ( ) .
*/
vaddr = __fix_to_virt ( __end_of_fixed_addresses - 1 ) & PMD_MASK ;
page_table_range_init ( vaddr , 0 , swapper_pg_dir ) ;
kmap_coherent_init ( ) ;
2007-11-30 17:59:55 +09:00
2007-06-06 12:09:54 +09:00
memset ( max_zone_pfns , 0 , sizeof ( max_zone_pfns ) ) ;
2007-03-28 16:38:13 +09:00
for_each_online_node ( nid ) {
pg_data_t * pgdat = NODE_DATA ( nid ) ;
unsigned long low , start_pfn ;
2008-07-23 21:28:09 -07:00
start_pfn = pgdat - > bdata - > node_min_pfn ;
2007-03-28 16:38:13 +09:00
low = pgdat - > bdata - > node_low_pfn ;
2007-06-06 12:09:54 +09:00
if ( max_zone_pfns [ ZONE_NORMAL ] < low )
max_zone_pfns [ ZONE_NORMAL ] = low ;
2007-03-28 16:38:13 +09:00
printk ( " Node %u: start_pfn = 0x%lx, low = 0x%lx \n " ,
nid , start_pfn , low ) ;
}
2007-06-06 12:09:54 +09:00
free_area_init_nodes ( max_zone_pfns ) ;
2007-11-30 17:06:36 +09:00
/* Set up the uncached fixmap */
set_fixmap_nocache ( FIX_UNCACHED , __pa ( & __uncached_start ) ) ;
2005-04-16 15:20:36 -07:00
}
2006-09-27 18:20:58 +09:00
static struct kcore_list kcore_mem , kcore_vmalloc ;
2005-04-16 15:20:36 -07:00
void __init mem_init ( void )
{
2007-05-23 17:48:36 +09:00
int codesize , datasize , initsize ;
2007-03-28 16:38:13 +09:00
int nid ;
2005-04-16 15:20:36 -07:00
2007-06-06 12:09:54 +09:00
num_physpages = 0 ;
high_memory = NULL ;
2007-03-28 16:38:13 +09:00
for_each_online_node ( nid ) {
pg_data_t * pgdat = NODE_DATA ( nid ) ;
unsigned long node_pages = 0 ;
void * node_high_memory ;
num_physpages + = pgdat - > node_present_pages ;
if ( pgdat - > node_spanned_pages )
node_pages = free_all_bootmem_node ( pgdat ) ;
totalram_pages + = node_pages ;
2005-04-16 15:20:36 -07:00
2007-06-06 12:09:54 +09:00
node_high_memory = ( void * ) __va ( ( pgdat - > node_start_pfn +
pgdat - > node_spanned_pages ) < <
PAGE_SHIFT ) ;
2007-03-28 16:38:13 +09:00
if ( node_high_memory > high_memory )
high_memory = node_high_memory ;
}
2005-04-16 15:20:36 -07:00
/* clear the zero-page */
memset ( empty_zero_page , 0 , PAGE_SIZE ) ;
__flush_wback_region ( empty_zero_page , PAGE_SIZE ) ;
codesize = ( unsigned long ) & _etext - ( unsigned long ) & _text ;
datasize = ( unsigned long ) & _edata - ( unsigned long ) & _etext ;
initsize = ( unsigned long ) & __init_end - ( unsigned long ) & __init_begin ;
2006-09-27 18:20:58 +09:00
kclist_add ( & kcore_mem , __va ( 0 ) , max_low_pfn < < PAGE_SHIFT ) ;
kclist_add ( & kcore_vmalloc , ( void * ) VMALLOC_START ,
VMALLOC_END - VMALLOC_START ) ;
printk ( KERN_INFO " Memory: %luk/%luk available (%dk kernel code, "
2007-05-23 17:48:36 +09:00
" %dk data, %dk init) \n " ,
2005-04-16 15:20:36 -07:00
( unsigned long ) nr_free_pages ( ) < < ( PAGE_SHIFT - 10 ) ,
2007-06-06 12:09:54 +09:00
num_physpages < < ( PAGE_SHIFT - 10 ) ,
2005-04-16 15:20:36 -07:00
codesize > > 10 ,
datasize > > 10 ,
initsize > > 10 ) ;
p3_cache_init ( ) ;
2006-09-27 18:33:49 +09:00
/* Initialize the vDSO */
vsyscall_init ( ) ;
2005-04-16 15:20:36 -07:00
}
void free_initmem ( void )
{
unsigned long addr ;
2005-11-07 00:58:24 -08:00
2005-04-16 15:20:36 -07:00
addr = ( unsigned long ) ( & __init_begin ) ;
for ( ; addr < ( unsigned long ) ( & __init_end ) ; addr + = PAGE_SIZE ) {
ClearPageReserved ( virt_to_page ( addr ) ) ;
2006-03-22 00:08:40 -08:00
init_page_count ( virt_to_page ( addr ) ) ;
2005-04-16 15:20:36 -07:00
free_page ( addr ) ;
totalram_pages + + ;
}
2007-06-06 12:23:06 +09:00
printk ( " Freeing unused kernel memory: %ldk freed \n " ,
( ( unsigned long ) & __init_end -
( unsigned long ) & __init_begin ) > > 10 ) ;
2005-04-16 15:20:36 -07:00
}
# ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem ( unsigned long start , unsigned long end )
{
unsigned long p ;
for ( p = start ; p < end ; p + = PAGE_SIZE ) {
ClearPageReserved ( virt_to_page ( p ) ) ;
2006-03-22 00:08:40 -08:00
init_page_count ( virt_to_page ( p ) ) ;
2005-04-16 15:20:36 -07:00
free_page ( p ) ;
totalram_pages + + ;
}
2007-06-06 12:09:54 +09:00
printk ( " Freeing initrd memory: %ldk freed \n " , ( end - start ) > > 10 ) ;
2005-04-16 15:20:36 -07:00
}
# endif
2007-06-07 11:32:52 +09:00
2008-09-20 20:21:33 +09:00
# if THREAD_SHIFT < PAGE_SHIFT
static struct kmem_cache * thread_info_cache ;
struct thread_info * alloc_thread_info ( struct task_struct * tsk )
{
struct thread_info * ti ;
ti = kmem_cache_alloc ( thread_info_cache , GFP_KERNEL ) ;
if ( unlikely ( ti = = NULL ) )
return NULL ;
# ifdef CONFIG_DEBUG_STACK_USAGE
memset ( ti , 0 , THREAD_SIZE ) ;
# endif
return ti ;
}
void free_thread_info ( struct thread_info * ti )
{
kmem_cache_free ( thread_info_cache , ti ) ;
}
void thread_info_cache_init ( void )
{
thread_info_cache = kmem_cache_create ( " thread_info " , THREAD_SIZE ,
THREAD_SIZE , 0 , NULL ) ;
BUG_ON ( thread_info_cache = = NULL ) ;
}
# endif /* THREAD_SHIFT < PAGE_SHIFT */
2007-06-07 11:32:52 +09:00
# ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory ( int nid , u64 start , u64 size )
{
pg_data_t * pgdat ;
unsigned long start_pfn = start > > PAGE_SHIFT ;
unsigned long nr_pages = size > > PAGE_SHIFT ;
int ret ;
pgdat = NODE_DATA ( nid ) ;
/* We only have ZONE_NORMAL, so this is easy.. */
2009-01-06 14:39:14 -08:00
ret = __add_pages ( nid , pgdat - > node_zones + ZONE_NORMAL ,
start_pfn , nr_pages ) ;
2007-06-07 11:32:52 +09:00
if ( unlikely ( ret ) )
2008-03-04 15:23:47 -08:00
printk ( " %s: Failed, __add_pages() == %d \n " , __func__ , ret ) ;
2007-06-07 11:32:52 +09:00
return ret ;
}
EXPORT_SYMBOL_GPL ( arch_add_memory ) ;
2007-06-11 15:32:07 +09:00
# ifdef CONFIG_NUMA
2007-06-07 11:32:52 +09:00
int memory_add_physaddr_to_nid ( u64 addr )
{
/* Node 0 for now.. */
return 0 ;
}
EXPORT_SYMBOL_GPL ( memory_add_physaddr_to_nid ) ;
# endif
2008-09-05 15:39:12 +09:00
# endif /* CONFIG_MEMORY_HOTPLUG */