2005-04-16 15:20:36 -07:00
/*
* arch / s390 / mm / init . c
*
* S390 version
* Copyright ( C ) 1999 IBM Deutschland Entwicklung GmbH , IBM Corporation
* Author ( s ) : Hartmut Penner ( hp @ de . ibm . com )
*
* Derived from " arch/i386/mm/init.c "
* Copyright ( C ) 1995 Linus Torvalds
*/
# include <linux/signal.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/ptrace.h>
# include <linux/mman.h>
# include <linux/mm.h>
# include <linux/swap.h>
# include <linux/smp.h>
# include <linux/init.h>
# include <linux/pagemap.h>
# include <linux/bootmem.h>
2006-07-01 04:36:31 -07:00
# include <linux/pfn.h>
2006-12-08 15:56:13 +01:00
# include <linux/poison.h>
2007-02-05 21:16:47 +01:00
# include <linux/initrd.h>
2005-04-16 15:20:36 -07:00
# include <asm/processor.h>
# include <asm/system.h>
# include <asm/uaccess.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
# include <asm/dma.h>
# include <asm/lowcore.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
2006-07-01 04:36:31 -07:00
# include <asm/sections.h>
2005-04-16 15:20:36 -07:00
DEFINE_PER_CPU ( struct mmu_gather , mmu_gathers ) ;
pgd_t swapper_pg_dir [ PTRS_PER_PGD ] __attribute__ ( ( __aligned__ ( PAGE_SIZE ) ) ) ;
2009-03-26 15:24:04 +01:00
2005-04-16 15:20:36 -07:00
char empty_zero_page [ PAGE_SIZE ] __attribute__ ( ( __aligned__ ( PAGE_SIZE ) ) ) ;
2009-03-26 15:24:04 +01:00
EXPORT_SYMBOL ( empty_zero_page ) ;
2005-04-16 15:20:36 -07:00
/*
* paging_init ( ) sets up the page tables
*/
void __init paging_init ( void )
{
2006-12-08 15:56:07 +01:00
static const int ssm_mask = 0x04000000L ;
2006-12-08 15:56:10 +01:00
unsigned long max_zone_pfns [ MAX_NR_ZONES ] ;
2007-10-22 12:52:47 +02:00
unsigned long pgd_type ;
2006-07-01 04:36:31 -07:00
2007-10-22 12:52:47 +02:00
init_mm . pgd = swapper_pg_dir ;
S390_lowcore . kernel_asce = __pa ( init_mm . pgd ) & PAGE_MASK ;
2006-12-08 15:56:07 +01:00
# ifdef CONFIG_64BIT
2008-02-09 18:24:37 +01:00
/* A three level page table (4TB) is enough for the kernel space. */
S390_lowcore . kernel_asce | = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH ;
pgd_type = _REGION3_ENTRY_EMPTY ;
2006-12-08 15:56:07 +01:00
# else
2007-10-22 12:52:47 +02:00
S390_lowcore . kernel_asce | = _ASCE_TABLE_LENGTH ;
pgd_type = _SEGMENT_ENTRY_EMPTY ;
2006-12-08 15:56:07 +01:00
# endif
2007-10-22 12:52:47 +02:00
clear_table ( ( unsigned long * ) init_mm . pgd , pgd_type ,
sizeof ( unsigned long ) * 2048 ) ;
2006-12-08 15:56:07 +01:00
vmem_map_init ( ) ;
2005-04-16 15:20:36 -07:00
/* enable virtual mapping in kernel mode */
2007-10-22 12:52:47 +02:00
__ctl_load ( S390_lowcore . kernel_asce , 1 , 1 ) ;
__ctl_load ( S390_lowcore . kernel_asce , 7 , 7 ) ;
__ctl_load ( S390_lowcore . kernel_asce , 13 , 13 ) ;
2006-09-28 16:56:43 +02:00
__raw_local_irq_ssm ( ssm_mask ) ;
2005-04-16 15:20:36 -07:00
2008-04-30 13:38:47 +02:00
sparse_memory_present_with_active_regions ( MAX_NUMNODES ) ;
sparse_init ( ) ;
2006-12-08 15:56:10 +01:00
memset ( max_zone_pfns , 0 , sizeof ( max_zone_pfns ) ) ;
2007-02-21 10:55:12 +01:00
# ifdef CONFIG_ZONE_DMA
2006-12-08 15:56:10 +01:00
max_zone_pfns [ ZONE_DMA ] = PFN_DOWN ( MAX_DMA_ADDRESS ) ;
2007-02-21 10:55:12 +01:00
# endif
2006-12-08 15:56:10 +01:00
max_zone_pfns [ ZONE_NORMAL ] = max_low_pfn ;
free_area_init_nodes ( max_zone_pfns ) ;
2005-04-16 15:20:36 -07:00
}
void __init mem_init ( void )
{
unsigned long codesize , reservedpages , datasize , initsize ;
max_mapnr = num_physpages = max_low_pfn ;
high_memory = ( void * ) __va ( max_low_pfn * PAGE_SIZE ) ;
/* clear the zero-page */
memset ( empty_zero_page , 0 , PAGE_SIZE ) ;
2008-05-07 09:22:59 +02:00
/* Setup guest page hinting */
cmma_init ( ) ;
2005-04-16 15:20:36 -07:00
/* this will put all low memory onto the freelists */
totalram_pages + = free_all_bootmem ( ) ;
reservedpages = 0 ;
codesize = ( unsigned long ) & _etext - ( unsigned long ) & _text ;
datasize = ( unsigned long ) & _edata - ( unsigned long ) & _etext ;
initsize = ( unsigned long ) & __init_end - ( unsigned long ) & __init_begin ;
printk ( " Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init) \n " ,
2009-09-21 17:02:36 -07:00
nr_free_pages ( ) < < ( PAGE_SHIFT - 10 ) ,
2005-04-16 15:20:36 -07:00
max_mapnr < < ( PAGE_SHIFT - 10 ) ,
codesize > > 10 ,
reservedpages < < ( PAGE_SHIFT - 10 ) ,
datasize > > 10 ,
initsize > > 10 ) ;
2006-07-01 04:36:31 -07:00
printk ( " Write protected kernel read-only data: %#lx - %#lx \n " ,
2007-02-05 21:18:41 +01:00
( unsigned long ) & _stext ,
PFN_ALIGN ( ( unsigned long ) & _eshared ) - 1 ) ;
2005-04-16 15:20:36 -07:00
}
2008-02-05 16:50:37 +01:00
# ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages ( struct page * page , int numpages , int enable )
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
unsigned long address ;
int i ;
for ( i = 0 ; i < numpages ; i + + ) {
address = page_to_phys ( page + i ) ;
pgd = pgd_offset_k ( address ) ;
pud = pud_offset ( pgd , address ) ;
pmd = pmd_offset ( pud , address ) ;
pte = pte_offset_kernel ( pmd , address ) ;
if ( ! enable ) {
2008-02-09 18:24:35 +01:00
ptep_invalidate ( & init_mm , address , pte ) ;
2008-02-05 16:50:37 +01:00
continue ;
}
* pte = mk_pte_phys ( address , __pgprot ( _PAGE_TYPE_RW ) ) ;
/* Flush cpu write queue. */
mb ( ) ;
}
}
# endif
2005-04-16 15:20:36 -07:00
void free_initmem ( void )
{
unsigned long addr ;
addr = ( unsigned long ) ( & __init_begin ) ;
for ( ; addr < ( unsigned long ) ( & __init_end ) ; addr + = PAGE_SIZE ) {
ClearPageReserved ( virt_to_page ( addr ) ) ;
2006-03-22 00:08:40 -08:00
init_page_count ( virt_to_page ( addr ) ) ;
2006-12-08 15:56:13 +01:00
memset ( ( void * ) addr , POISON_FREE_INITMEM , PAGE_SIZE ) ;
2005-04-16 15:20:36 -07:00
free_page ( addr ) ;
totalram_pages + + ;
}
printk ( " Freeing unused kernel memory: %ldk freed \n " ,
( ( unsigned long ) & __init_end - ( unsigned long ) & __init_begin ) > > 10 ) ;
}
# ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem ( unsigned long start , unsigned long end )
{
if ( start < end )
printk ( " Freeing initrd memory: %ldk freed \n " , ( end - start ) > > 10 ) ;
for ( ; start < end ; start + = PAGE_SIZE ) {
ClearPageReserved ( virt_to_page ( start ) ) ;
2006-03-22 00:08:40 -08:00
init_page_count ( virt_to_page ( start ) ) ;
2005-04-16 15:20:36 -07:00
free_page ( start ) ;
totalram_pages + + ;
}
}
# endif
2008-07-14 09:59:18 +02:00
# ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory ( int nid , u64 start , u64 size )
{
struct pglist_data * pgdat ;
struct zone * zone ;
int rc ;
pgdat = NODE_DATA ( nid ) ;
2008-08-01 16:39:16 +02:00
zone = pgdat - > node_zones + ZONE_MOVABLE ;
2008-07-14 09:59:18 +02:00
rc = vmem_add_mapping ( start , size ) ;
if ( rc )
return rc ;
2009-01-06 14:39:14 -08:00
rc = __add_pages ( nid , zone , PFN_DOWN ( start ) , PFN_DOWN ( size ) ) ;
2008-07-14 09:59:18 +02:00
if ( rc )
vmem_remove_mapping ( start , size ) ;
return rc ;
}
# endif /* CONFIG_MEMORY_HOTPLUG */