2006-12-08 15:56:07 +01:00
/*
* arch / s390 / mm / vmem . c
*
* Copyright IBM Corp . 2006
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com >
*/
# include <linux/bootmem.h>
# include <linux/pfn.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/list.h>
2008-04-30 13:38:46 +02:00
# include <linux/hugetlb.h>
2006-12-08 15:56:07 +01:00
# include <asm/pgalloc.h>
# include <asm/pgtable.h>
# include <asm/setup.h>
# include <asm/tlbflush.h>
2008-04-30 13:38:46 +02:00
# include <asm/sections.h>
2006-12-08 15:56:07 +01:00
static DEFINE_MUTEX ( vmem_mutex ) ;
struct memory_segment {
struct list_head list ;
unsigned long start ;
unsigned long size ;
} ;
static LIST_HEAD ( mem_segs ) ;
2008-05-30 10:03:27 +02:00
static void __ref * vmem_alloc_pages ( unsigned int order )
{
if ( slab_is_available ( ) )
return ( void * ) __get_free_pages ( GFP_KERNEL , order ) ;
return alloc_bootmem_pages ( ( 1 < < order ) * PAGE_SIZE ) ;
}
static inline pud_t * vmem_pud_alloc ( void )
2008-02-09 18:24:36 +01:00
{
pud_t * pud = NULL ;
# ifdef CONFIG_64BIT
2008-05-30 10:03:27 +02:00
pud = vmem_alloc_pages ( 2 ) ;
2008-02-09 18:24:36 +01:00
if ( ! pud )
return NULL ;
2008-04-30 13:38:44 +02:00
clear_table ( ( unsigned long * ) pud , _REGION3_ENTRY_EMPTY , PAGE_SIZE * 4 ) ;
2008-02-09 18:24:36 +01:00
# endif
return pud ;
}
2007-10-22 12:52:48 +02:00
2008-05-30 10:03:27 +02:00
static inline pmd_t * vmem_pmd_alloc ( void )
2006-12-08 15:56:07 +01:00
{
2007-10-22 12:52:47 +02:00
pmd_t * pmd = NULL ;
2006-12-08 15:56:07 +01:00
2007-10-22 12:52:47 +02:00
# ifdef CONFIG_64BIT
2008-05-30 10:03:27 +02:00
pmd = vmem_alloc_pages ( 2 ) ;
2006-12-08 15:56:07 +01:00
if ( ! pmd )
return NULL ;
2008-04-30 13:38:44 +02:00
clear_table ( ( unsigned long * ) pmd , _SEGMENT_ENTRY_EMPTY , PAGE_SIZE * 4 ) ;
2007-10-22 12:52:47 +02:00
# endif
2006-12-08 15:56:07 +01:00
return pmd ;
}
2008-05-15 16:52:31 +02:00
static pte_t __ref * vmem_pte_alloc ( void )
2006-12-08 15:56:07 +01:00
{
2008-02-09 18:24:35 +01:00
pte_t * pte ;
2006-12-08 15:56:07 +01:00
2008-02-09 18:24:35 +01:00
if ( slab_is_available ( ) )
pte = ( pte_t * ) page_table_alloc ( & init_mm ) ;
else
pte = alloc_bootmem ( PTRS_PER_PTE * sizeof ( pte_t ) ) ;
2006-12-08 15:56:07 +01:00
if ( ! pte )
return NULL ;
2009-12-07 12:52:11 +01:00
if ( MACHINE_HAS_HPAGE )
clear_table ( ( unsigned long * ) pte , _PAGE_TYPE_EMPTY | _PAGE_CO ,
PTRS_PER_PTE * sizeof ( pte_t ) ) ;
else
clear_table ( ( unsigned long * ) pte , _PAGE_TYPE_EMPTY ,
PTRS_PER_PTE * sizeof ( pte_t ) ) ;
2006-12-08 15:56:07 +01:00
return pte ;
}
/*
* Add a physical memory range to the 1 : 1 mapping .
*/
2008-04-30 13:38:47 +02:00
static int vmem_add_mem ( unsigned long start , unsigned long size , int ro )
2006-12-08 15:56:07 +01:00
{
unsigned long address ;
pgd_t * pg_dir ;
2007-10-22 12:52:48 +02:00
pud_t * pu_dir ;
2006-12-08 15:56:07 +01:00
pmd_t * pm_dir ;
pte_t * pt_dir ;
pte_t pte ;
int ret = - ENOMEM ;
for ( address = start ; address < start + size ; address + = PAGE_SIZE ) {
pg_dir = pgd_offset_k ( address ) ;
if ( pgd_none ( * pg_dir ) ) {
2007-10-22 12:52:48 +02:00
pu_dir = vmem_pud_alloc ( ) ;
if ( ! pu_dir )
goto out ;
pgd_populate_kernel ( & init_mm , pg_dir , pu_dir ) ;
}
pu_dir = pud_offset ( pg_dir , address ) ;
if ( pud_none ( * pu_dir ) ) {
2006-12-08 15:56:07 +01:00
pm_dir = vmem_pmd_alloc ( ) ;
if ( ! pm_dir )
goto out ;
2007-10-22 12:52:48 +02:00
pud_populate_kernel ( & init_mm , pu_dir , pm_dir ) ;
2006-12-08 15:56:07 +01:00
}
2008-04-30 13:38:46 +02:00
pte = mk_pte_phys ( address , __pgprot ( ro ? _PAGE_RO : 0 ) ) ;
2007-10-22 12:52:48 +02:00
pm_dir = pmd_offset ( pu_dir , address ) ;
2008-04-30 13:38:46 +02:00
# ifdef __s390x__
if ( MACHINE_HAS_HPAGE & & ! ( address & ~ HPAGE_MASK ) & &
( address + HPAGE_SIZE < = start + size ) & &
( address > = HPAGE_SIZE ) ) {
2009-12-07 12:52:11 +01:00
pte_val ( pte ) | = _SEGMENT_ENTRY_LARGE |
_SEGMENT_ENTRY_CO ;
2008-04-30 13:38:46 +02:00
pmd_val ( * pm_dir ) = pte_val ( pte ) ;
address + = HPAGE_SIZE - PAGE_SIZE ;
continue ;
}
# endif
2006-12-08 15:56:07 +01:00
if ( pmd_none ( * pm_dir ) ) {
pt_dir = vmem_pte_alloc ( ) ;
if ( ! pt_dir )
goto out ;
pmd_populate_kernel ( & init_mm , pm_dir , pt_dir ) ;
}
pt_dir = pte_offset_kernel ( pm_dir , address ) ;
2007-02-05 21:18:17 +01:00
* pt_dir = pte ;
2006-12-08 15:56:07 +01:00
}
ret = 0 ;
out :
flush_tlb_kernel_range ( start , start + size ) ;
return ret ;
}
/*
* Remove a physical memory range from the 1 : 1 mapping .
* Currently only invalidates page table entries .
*/
static void vmem_remove_range ( unsigned long start , unsigned long size )
{
unsigned long address ;
pgd_t * pg_dir ;
2007-10-22 12:52:48 +02:00
pud_t * pu_dir ;
2006-12-08 15:56:07 +01:00
pmd_t * pm_dir ;
pte_t * pt_dir ;
pte_t pte ;
pte_val ( pte ) = _PAGE_TYPE_EMPTY ;
for ( address = start ; address < start + size ; address + = PAGE_SIZE ) {
pg_dir = pgd_offset_k ( address ) ;
2007-10-22 12:52:48 +02:00
pu_dir = pud_offset ( pg_dir , address ) ;
if ( pud_none ( * pu_dir ) )
2006-12-08 15:56:07 +01:00
continue ;
2007-10-22 12:52:48 +02:00
pm_dir = pmd_offset ( pu_dir , address ) ;
2006-12-08 15:56:07 +01:00
if ( pmd_none ( * pm_dir ) )
continue ;
2008-04-30 13:38:46 +02:00
if ( pmd_huge ( * pm_dir ) ) {
pmd_clear_kernel ( pm_dir ) ;
address + = HPAGE_SIZE - PAGE_SIZE ;
continue ;
}
2006-12-08 15:56:07 +01:00
pt_dir = pte_offset_kernel ( pm_dir , address ) ;
2007-02-05 21:18:17 +01:00
* pt_dir = pte ;
2006-12-08 15:56:07 +01:00
}
flush_tlb_kernel_range ( start , start + size ) ;
}
/*
* Add a backed mem_map array to the virtual mem_map array .
*/
2008-04-30 13:38:47 +02:00
int __meminit vmemmap_populate ( struct page * start , unsigned long nr , int node )
2006-12-08 15:56:07 +01:00
{
unsigned long address , start_addr , end_addr ;
pgd_t * pg_dir ;
2007-10-22 12:52:48 +02:00
pud_t * pu_dir ;
2006-12-08 15:56:07 +01:00
pmd_t * pm_dir ;
pte_t * pt_dir ;
pte_t pte ;
int ret = - ENOMEM ;
2008-04-30 13:38:47 +02:00
start_addr = ( unsigned long ) start ;
end_addr = ( unsigned long ) ( start + nr ) ;
2006-12-08 15:56:07 +01:00
for ( address = start_addr ; address < end_addr ; address + = PAGE_SIZE ) {
pg_dir = pgd_offset_k ( address ) ;
if ( pgd_none ( * pg_dir ) ) {
2007-10-22 12:52:48 +02:00
pu_dir = vmem_pud_alloc ( ) ;
if ( ! pu_dir )
goto out ;
pgd_populate_kernel ( & init_mm , pg_dir , pu_dir ) ;
}
pu_dir = pud_offset ( pg_dir , address ) ;
if ( pud_none ( * pu_dir ) ) {
2006-12-08 15:56:07 +01:00
pm_dir = vmem_pmd_alloc ( ) ;
if ( ! pm_dir )
goto out ;
2007-10-22 12:52:48 +02:00
pud_populate_kernel ( & init_mm , pu_dir , pm_dir ) ;
2006-12-08 15:56:07 +01:00
}
2007-10-22 12:52:48 +02:00
pm_dir = pmd_offset ( pu_dir , address ) ;
2006-12-08 15:56:07 +01:00
if ( pmd_none ( * pm_dir ) ) {
pt_dir = vmem_pte_alloc ( ) ;
if ( ! pt_dir )
goto out ;
pmd_populate_kernel ( & init_mm , pm_dir , pt_dir ) ;
}
pt_dir = pte_offset_kernel ( pm_dir , address ) ;
if ( pte_none ( * pt_dir ) ) {
unsigned long new_page ;
2008-05-30 10:03:27 +02:00
new_page = __pa ( vmem_alloc_pages ( 0 ) ) ;
2006-12-08 15:56:07 +01:00
if ( ! new_page )
goto out ;
pte = pfn_pte ( new_page > > PAGE_SHIFT , PAGE_KERNEL ) ;
2007-02-05 21:18:17 +01:00
* pt_dir = pte ;
2006-12-08 15:56:07 +01:00
}
}
2008-05-30 10:03:27 +02:00
memset ( start , 0 , nr * sizeof ( struct page ) ) ;
2006-12-08 15:56:07 +01:00
ret = 0 ;
out :
flush_tlb_kernel_range ( start_addr , end_addr ) ;
return ret ;
}
/*
* Add memory segment to the segment list if it doesn ' t overlap with
* an already present segment .
*/
static int insert_memory_segment ( struct memory_segment * seg )
{
struct memory_segment * tmp ;
2008-06-10 10:03:20 +02:00
if ( seg - > start + seg - > size > VMEM_MAX_PHYS | |
2006-12-08 15:56:07 +01:00
seg - > start + seg - > size < seg - > start )
return - ERANGE ;
list_for_each_entry ( tmp , & mem_segs , list ) {
if ( seg - > start > = tmp - > start + tmp - > size )
continue ;
if ( seg - > start + seg - > size < = tmp - > start )
continue ;
return - ENOSPC ;
}
list_add ( & seg - > list , & mem_segs ) ;
return 0 ;
}
/*
* Remove memory segment from the segment list .
*/
static void remove_memory_segment ( struct memory_segment * seg )
{
list_del ( & seg - > list ) ;
}
static void __remove_shared_memory ( struct memory_segment * seg )
{
remove_memory_segment ( seg ) ;
vmem_remove_range ( seg - > start , seg - > size ) ;
}
2008-04-30 13:38:47 +02:00
int vmem_remove_mapping ( unsigned long start , unsigned long size )
2006-12-08 15:56:07 +01:00
{
struct memory_segment * seg ;
int ret ;
mutex_lock ( & vmem_mutex ) ;
ret = - ENOENT ;
list_for_each_entry ( seg , & mem_segs , list ) {
if ( seg - > start = = start & & seg - > size = = size )
break ;
}
if ( seg - > start ! = start | | seg - > size ! = size )
goto out ;
ret = 0 ;
__remove_shared_memory ( seg ) ;
kfree ( seg ) ;
out :
mutex_unlock ( & vmem_mutex ) ;
return ret ;
}
2008-04-30 13:38:47 +02:00
int vmem_add_mapping ( unsigned long start , unsigned long size )
2006-12-08 15:56:07 +01:00
{
struct memory_segment * seg ;
int ret ;
mutex_lock ( & vmem_mutex ) ;
ret = - ENOMEM ;
seg = kzalloc ( sizeof ( * seg ) , GFP_KERNEL ) ;
if ( ! seg )
goto out ;
seg - > start = start ;
seg - > size = size ;
ret = insert_memory_segment ( seg ) ;
if ( ret )
goto out_free ;
2008-04-30 13:38:46 +02:00
ret = vmem_add_mem ( start , size , 0 ) ;
2006-12-08 15:56:07 +01:00
if ( ret )
goto out_remove ;
goto out ;
out_remove :
__remove_shared_memory ( seg ) ;
out_free :
kfree ( seg ) ;
out :
mutex_unlock ( & vmem_mutex ) ;
return ret ;
}
/*
* map whole physical memory to virtual memory ( identity mapping )
2008-01-26 14:11:00 +01:00
* we reserve enough space in the vmalloc area for vmemmap to hotplug
* additional memory segments .
2006-12-08 15:56:07 +01:00
*/
void __init vmem_map_init ( void )
{
2008-04-30 13:38:46 +02:00
unsigned long ro_start , ro_end ;
unsigned long start , end ;
2006-12-08 15:56:07 +01:00
int i ;
2009-09-11 10:28:57 +02:00
spin_lock_init ( & init_mm . context . list_lock ) ;
2008-02-09 18:24:35 +01:00
INIT_LIST_HEAD ( & init_mm . context . crst_list ) ;
INIT_LIST_HEAD ( & init_mm . context . pgtable_list ) ;
init_mm . context . noexec = 0 ;
2008-04-30 13:38:46 +02:00
ro_start = ( ( unsigned long ) & _stext ) & PAGE_MASK ;
ro_end = PFN_ALIGN ( ( unsigned long ) & _eshared ) ;
for ( i = 0 ; i < MEMORY_CHUNKS & & memory_chunk [ i ] . size > 0 ; i + + ) {
start = memory_chunk [ i ] . addr ;
end = memory_chunk [ i ] . addr + memory_chunk [ i ] . size ;
if ( start > = ro_end | | end < = ro_start )
vmem_add_mem ( start , end - start , 0 ) ;
else if ( start > = ro_start & & end < = ro_end )
vmem_add_mem ( start , end - start , 1 ) ;
else if ( start > = ro_start ) {
vmem_add_mem ( start , ro_end - start , 1 ) ;
vmem_add_mem ( ro_end , end - ro_end , 0 ) ;
} else if ( end < ro_end ) {
vmem_add_mem ( start , ro_start - start , 0 ) ;
vmem_add_mem ( ro_start , end - ro_start , 1 ) ;
} else {
vmem_add_mem ( start , ro_start - start , 0 ) ;
vmem_add_mem ( ro_start , ro_end - ro_start , 1 ) ;
vmem_add_mem ( ro_end , end - ro_end , 0 ) ;
}
}
2006-12-08 15:56:07 +01:00
}
/*
* Convert memory chunk array to a memory segment list so there is a single
* list that contains both r / w memory and shared memory segments .
*/
static int __init vmem_convert_memory_chunk ( void )
{
struct memory_segment * seg ;
int i ;
mutex_lock ( & vmem_mutex ) ;
2008-01-26 14:11:02 +01:00
for ( i = 0 ; i < MEMORY_CHUNKS ; i + + ) {
2006-12-08 15:56:07 +01:00
if ( ! memory_chunk [ i ] . size )
continue ;
seg = kzalloc ( sizeof ( * seg ) , GFP_KERNEL ) ;
if ( ! seg )
panic ( " Out of memory... \n " ) ;
seg - > start = memory_chunk [ i ] . addr ;
seg - > size = memory_chunk [ i ] . size ;
insert_memory_segment ( seg ) ;
}
mutex_unlock ( & vmem_mutex ) ;
return 0 ;
}
core_initcall ( vmem_convert_memory_chunk ) ;