2006-06-22 18:05:36 +04:00
/*
* linux / arch / arm / mm / nommu . c
*
* ARM uCLinux supporting functions .
*/
# include <linux/module.h>
2006-06-24 13:46:23 +04:00
# include <linux/mm.h>
# include <linux/pagemap.h>
2008-09-06 15:10:45 +04:00
# include <linux/io.h>
2010-07-09 19:27:52 +04:00
# include <linux/memblock.h>
2006-06-22 18:05:36 +04:00
2006-06-24 13:46:23 +04:00
# include <asm/cacheflush.h>
2008-12-01 14:53:07 +03:00
# include <asm/sections.h>
2006-06-22 18:05:36 +04:00
# include <asm/page.h>
2009-07-24 15:35:03 +04:00
# include <asm/setup.h>
2006-11-30 16:53:54 +03:00
# include <asm/mach/arch.h>
2006-06-22 18:05:36 +04:00
2006-09-27 18:27:33 +04:00
# include "mm.h"
2010-07-09 19:27:52 +04:00
void __init arm_mm_memblock_reserve ( void )
2006-09-27 18:27:33 +04:00
{
/*
* Register the exception vector page .
* some architectures which the DRAM is the exception vector to trap ,
* alloc_page breaks with error , although it is not NULL , but " 0. "
*/
2010-07-09 19:27:52 +04:00
memblock_reserve ( CONFIG_VECTORS_BASE , PAGE_SIZE ) ;
2006-09-27 18:27:33 +04:00
}
2011-07-05 22:58:29 +04:00
void __init sanity_check_meminfo ( void )
{
2011-09-19 06:40:00 +04:00
phys_addr_t end = bank_phys_end ( & meminfo . bank [ meminfo . nr_banks - 1 ] ) ;
high_memory = __va ( end - 1 ) + 1 ;
2011-07-05 22:58:29 +04:00
}
2006-09-27 18:27:33 +04:00
/*
* paging_init ( ) sets up the page tables , initialises the zone memory
* maps , and sets up the zero page , bad page and bad page tables .
*/
2008-10-06 21:24:40 +04:00
void __init paging_init ( struct machine_desc * mdesc )
2006-09-27 18:27:33 +04:00
{
2010-05-22 22:47:18 +04:00
bootmem_init ( ) ;
2006-09-27 18:27:33 +04:00
}
2006-09-27 18:43:47 +04:00
/*
* We don ' t need to do anything here for nommu machines .
*/
2011-11-01 14:15:27 +04:00
void setup_mm_for_reboot ( void )
2006-09-27 18:43:47 +04:00
{
}
2006-06-24 13:46:23 +04:00
void flush_dcache_page ( struct page * page )
{
2009-11-26 15:56:21 +03:00
__cpuc_flush_dcache_area ( page_address ( page ) , PAGE_SIZE ) ;
2006-06-24 13:46:23 +04:00
}
2006-06-27 23:55:43 +04:00
EXPORT_SYMBOL ( flush_dcache_page ) ;
2006-06-24 13:46:23 +04:00
2010-05-06 18:15:28 +04:00
void copy_to_user_page ( struct vm_area_struct * vma , struct page * page ,
unsigned long uaddr , void * dst , const void * src ,
unsigned long len )
{
memcpy ( dst , src , len ) ;
if ( vma - > vm_flags & VM_EXEC )
__cpuc_coherent_user_range ( uaddr , uaddr + len ) ;
}
2007-05-05 23:59:27 +04:00
void __iomem * __arm_ioremap_pfn ( unsigned long pfn , unsigned long offset ,
size_t size , unsigned int mtype )
2006-06-22 18:05:36 +04:00
{
if ( pfn > = ( 0x100000000ULL > > PAGE_SHIFT ) )
return NULL ;
return ( void __iomem * ) ( offset + ( pfn < < PAGE_SHIFT ) ) ;
}
2007-05-05 23:59:27 +04:00
EXPORT_SYMBOL ( __arm_ioremap_pfn ) ;
2006-06-22 18:05:36 +04:00
2009-12-18 14:10:03 +03:00
void __iomem * __arm_ioremap_pfn_caller ( unsigned long pfn , unsigned long offset ,
size_t size , unsigned int mtype , void * caller )
{
return __arm_ioremap_pfn ( pfn , offset , size , mtype ) ;
}
2007-05-05 23:59:27 +04:00
void __iomem * __arm_ioremap ( unsigned long phys_addr , size_t size ,
unsigned int mtype )
2006-06-22 18:05:36 +04:00
{
return ( void __iomem * ) phys_addr ;
}
2007-05-05 23:59:27 +04:00
EXPORT_SYMBOL ( __arm_ioremap ) ;
2006-06-22 18:05:36 +04:00
2010-05-06 18:14:09 +04:00
void __iomem * __arm_ioremap_caller ( unsigned long phys_addr , size_t size ,
unsigned int mtype , void * caller )
2009-12-18 14:10:03 +03:00
{
return __arm_ioremap ( phys_addr , size , mtype ) ;
}
2006-11-30 16:53:54 +03:00
void __iounmap ( volatile void __iomem * addr )
2006-06-22 18:05:36 +04:00
{
}
EXPORT_SYMBOL ( __iounmap ) ;