2005-09-26 10:04:21 +04:00
/*
* PowerPC version
* Copyright ( C ) 1995 - 1996 Gary Thomas ( gdt @ linuxppc . org )
*
* Modifications by Paul Mackerras ( PowerMac ) ( paulus @ cs . anu . edu . au )
* and Cort Dougan ( PReP ) ( cort @ cs . nmt . edu )
* Copyright ( C ) 1996 Paul Mackerras
* PPC44x / 36 - bit changes by Matt Porter ( mporter @ mvista . com )
*
* Derived from " arch/i386/mm/init.c "
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/stddef.h>
# include <linux/init.h>
# include <linux/bootmem.h>
# include <linux/highmem.h>
# include <linux/initrd.h>
# include <linux/pagemap.h>
2007-05-08 13:25:00 +04:00
# include <linux/suspend.h>
2008-02-14 03:56:49 +03:00
# include <linux/lmb.h>
2009-10-26 22:24:31 +03:00
# include <linux/hugetlb.h>
2005-09-26 10:04:21 +04:00
# include <asm/pgalloc.h>
# include <asm/prom.h>
# include <asm/io.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/mmu.h>
# include <asm/smp.h>
# include <asm/machdep.h>
# include <asm/btext.h>
# include <asm/tlb.h>
2005-10-06 06:23:33 +04:00
# include <asm/sections.h>
2008-07-01 05:30:06 +04:00
# include <asm/sparsemem.h>
2005-10-10 15:58:35 +04:00
# include <asm/vdso.h>
2008-04-23 17:05:20 +04:00
# include <asm/fixmap.h>
2005-09-26 10:04:21 +04:00
# include "mmu_decl.h"
# ifndef CPU_FTR_COHERENT_ICACHE
# define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
# define CPU_FTR_NOEXECUTE 0
# endif
2005-10-06 06:23:33 +04:00
int init_bootmem_done ;
int mem_init_done ;
2009-05-08 16:19:27 +04:00
phys_addr_t memory_limit ;
2005-10-06 06:23:33 +04:00
2008-04-23 17:05:20 +04:00
# ifdef CONFIG_HIGHMEM
pte_t * kmap_pte ;
pgprot_t kmap_prot ;
EXPORT_SYMBOL ( kmap_prot ) ;
EXPORT_SYMBOL ( kmap_pte ) ;
static inline pte_t * virt_to_kpte ( unsigned long vaddr )
{
return pte_offset_kernel ( pmd_offset ( pud_offset ( pgd_offset_k ( vaddr ) ,
vaddr ) , vaddr ) , vaddr ) ;
}
# endif
2005-09-26 10:04:21 +04:00
int page_is_ram ( unsigned long pfn )
{
# ifndef CONFIG_PPC64 /* XXX for now */
2008-09-15 14:43:35 +04:00
return pfn < max_pfn ;
2005-09-26 10:04:21 +04:00
# else
2008-09-15 14:43:35 +04:00
unsigned long paddr = ( pfn < < PAGE_SHIFT ) ;
2005-09-26 10:04:21 +04:00
int i ;
for ( i = 0 ; i < lmb . memory . cnt ; i + + ) {
unsigned long base ;
base = lmb . memory . region [ i ] . base ;
if ( ( paddr > = base ) & &
( paddr < ( base + lmb . memory . region [ i ] . size ) ) ) {
return 1 ;
}
}
return 0 ;
# endif
}
2005-10-29 04:46:18 +04:00
pgprot_t phys_mem_access_prot ( struct file * file , unsigned long pfn ,
2005-09-26 10:04:21 +04:00
unsigned long size , pgprot_t vma_prot )
{
if ( ppc_md . phys_mem_access_prot )
2005-10-29 04:46:18 +04:00
return ppc_md . phys_mem_access_prot ( file , pfn , size , vma_prot ) ;
2005-09-26 10:04:21 +04:00
2005-10-29 04:46:18 +04:00
if ( ! page_is_ram ( pfn ) )
2008-12-18 22:13:51 +03:00
vma_prot = pgprot_noncached ( vma_prot ) ;
2005-09-26 10:04:21 +04:00
return vma_prot ;
}
EXPORT_SYMBOL ( phys_mem_access_prot ) ;
2005-10-31 05:37:12 +03:00
# ifdef CONFIG_MEMORY_HOTPLUG
2006-06-27 13:53:30 +04:00
# ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid ( u64 start )
{
return hot_add_scn_to_nid ( start ) ;
}
# endif
2008-03-28 19:10:50 +03:00
int arch_add_memory ( int nid , u64 start , u64 size )
2005-10-31 05:37:12 +03:00
{
2005-12-05 23:06:42 +03:00
struct pglist_data * pgdata ;
2005-10-31 05:37:12 +03:00
struct zone * zone ;
unsigned long start_pfn = start > > PAGE_SHIFT ;
unsigned long nr_pages = size > > PAGE_SHIFT ;
2005-12-05 23:06:42 +03:00
pgdata = NODE_DATA ( nid ) ;
2006-03-22 10:00:05 +03:00
start = ( unsigned long ) __va ( start ) ;
2005-11-08 03:25:48 +03:00
create_section_mapping ( start , start + size ) ;
2005-10-31 05:37:12 +03:00
/* this should work for most non-highmem platforms */
zone = pgdata - > node_zones ;
2009-01-07 01:39:14 +03:00
return __add_pages ( nid , zone , start_pfn , nr_pages ) ;
2005-10-31 05:37:12 +03:00
}
2008-06-04 02:30:54 +04:00
# endif /* CONFIG_MEMORY_HOTPLUG */
2008-02-05 11:10:18 +03:00
/*
* walk_memory_resource ( ) needs to make sure there is no holes in a given
2008-04-19 00:33:53 +04:00
* memory range . PPC64 does not maintain the memory layout in / proc / iomem .
* Instead it maintains it in lmb . memory structures . Walk through the
* memory regions , find holes and callback for contiguous regions .
2008-02-05 11:10:18 +03:00
*/
int
2009-09-23 03:45:46 +04:00
walk_system_ram_range ( unsigned long start_pfn , unsigned long nr_pages ,
void * arg , int ( * func ) ( unsigned long , unsigned long , void * ) )
2008-02-05 11:10:18 +03:00
{
2008-04-19 00:33:53 +04:00
struct lmb_property res ;
unsigned long pfn , len ;
u64 end ;
int ret = - 1 ;
res . base = ( u64 ) start_pfn < < PAGE_SHIFT ;
res . size = ( u64 ) nr_pages < < PAGE_SHIFT ;
end = res . base + res . size - 1 ;
while ( ( res . base < end ) & & ( lmb_find ( & res ) > = 0 ) ) {
pfn = ( unsigned long ) ( res . base > > PAGE_SHIFT ) ;
len = ( unsigned long ) ( res . size > > PAGE_SHIFT ) ;
ret = ( * func ) ( pfn , len , arg ) ;
if ( ret )
break ;
res . base + = ( res . size + 1 ) ;
res . size = ( end - res . base + 1 ) ;
}
return ret ;
2008-02-05 11:10:18 +03:00
}
2009-09-23 03:45:46 +04:00
EXPORT_SYMBOL_GPL ( walk_system_ram_range ) ;
2008-02-05 11:10:18 +03:00
2005-10-06 06:23:33 +04:00
/*
* Initialize the bootmem system and give it all the memory we
* have available . If we are using highmem , we only put the
* lowmem into the bootmem system .
*/
# ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem ( void )
{
unsigned long i ;
unsigned long start , bootmap_pages ;
unsigned long total_pages ;
int boot_mapsize ;
2008-04-21 22:22:34 +04:00
max_low_pfn = max_pfn = lmb_end_of_DRAM ( ) > > PAGE_SHIFT ;
2008-04-15 23:52:22 +04:00
total_pages = ( lmb_end_of_DRAM ( ) - memstart_addr ) > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
# ifdef CONFIG_HIGHMEM
total_pages = total_lowmem > > PAGE_SHIFT ;
2008-04-15 23:52:22 +04:00
max_low_pfn = lowmem_end_addr > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
# endif
/*
* Find an area to use for the bootmem bitmap . Calculate the size of
* bitmap required as ( Total Memory ) / PAGE_SIZE / BITS_PER_BYTE .
* Add 1 additional page in case the address isn ' t page - aligned .
*/
bootmap_pages = bootmem_bootmap_pages ( total_pages ) ;
start = lmb_alloc ( bootmap_pages < < PAGE_SHIFT , PAGE_SIZE ) ;
2008-04-21 22:22:34 +04:00
min_low_pfn = MEMORY_START > > PAGE_SHIFT ;
boot_mapsize = init_bootmem_node ( NODE_DATA ( 0 ) , start > > PAGE_SHIFT , min_low_pfn , max_low_pfn ) ;
2005-10-06 06:23:33 +04:00
2006-09-27 12:49:49 +04:00
/* Add active regions with valid PFNs */
for ( i = 0 ; i < lmb . memory . cnt ; i + + ) {
unsigned long start_pfn , end_pfn ;
start_pfn = lmb . memory . region [ i ] . base > > PAGE_SHIFT ;
end_pfn = start_pfn + lmb_size_pages ( & lmb . memory , i ) ;
add_active_range ( 0 , start_pfn , end_pfn ) ;
}
2005-10-06 06:23:33 +04:00
/* Add all physical memory to the bootmem map, mark each area
* present .
*/
# ifdef CONFIG_HIGHMEM
2008-04-15 23:52:22 +04:00
free_bootmem_with_active_regions ( 0 , lowmem_end_addr > > PAGE_SHIFT ) ;
2008-01-09 20:27:23 +03:00
/* reserve the sections we're already using */
for ( i = 0 ; i < lmb . reserved . cnt ; i + + ) {
unsigned long addr = lmb . reserved . region [ i ] . base +
lmb_size_bytes ( & lmb . reserved , i ) - 1 ;
2008-04-15 23:52:22 +04:00
if ( addr < lowmem_end_addr )
2008-01-09 20:27:23 +03:00
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
lmb_size_bytes ( & lmb . reserved , i ) ,
BOOTMEM_DEFAULT ) ;
2008-04-15 23:52:22 +04:00
else if ( lmb . reserved . region [ i ] . base < lowmem_end_addr ) {
unsigned long adjusted_size = lowmem_end_addr -
2008-01-09 20:27:23 +03:00
lmb . reserved . region [ i ] . base ;
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
adjusted_size , BOOTMEM_DEFAULT ) ;
2008-01-09 20:27:23 +03:00
}
}
2006-09-27 12:49:49 +04:00
# else
free_bootmem_with_active_regions ( 0 , max_pfn ) ;
2005-10-06 06:23:33 +04:00
/* reserve the sections we're already using */
for ( i = 0 ; i < lmb . reserved . cnt ; i + + )
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
lmb_size_bytes ( & lmb . reserved , i ) ,
BOOTMEM_DEFAULT ) ;
2005-10-06 06:23:33 +04:00
2008-01-09 20:27:23 +03:00
# endif
2005-10-06 06:23:33 +04:00
/* XXX need to clip this if using highmem? */
2006-09-27 12:49:49 +04:00
sparse_memory_present_with_active_regions ( 0 ) ;
2005-10-06 06:23:33 +04:00
init_bootmem_done = 1 ;
}
2007-05-08 13:25:00 +04:00
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave ( void )
{
unsigned long lmb_next_region_start_pfn ,
lmb_region_max_pfn ;
int i ;
for ( i = 0 ; i < lmb . memory . cnt - 1 ; i + + ) {
lmb_region_max_pfn =
( lmb . memory . region [ i ] . base > > PAGE_SHIFT ) +
( lmb . memory . region [ i ] . size > > PAGE_SHIFT ) ;
lmb_next_region_start_pfn =
lmb . memory . region [ i + 1 ] . base > > PAGE_SHIFT ;
if ( lmb_region_max_pfn < lmb_next_region_start_pfn )
register_nosave_region ( lmb_region_max_pfn ,
lmb_next_region_start_pfn ) ;
}
return 0 ;
}
2005-10-06 06:23:33 +04:00
/*
* paging_init ( ) sets up the page tables - in fact we ' ve already done this .
*/
void __init paging_init ( void )
{
unsigned long total_ram = lmb_phys_mem_size ( ) ;
2008-07-09 19:09:23 +04:00
phys_addr_t top_of_ram = lmb_end_of_DRAM ( ) ;
2006-09-27 12:49:49 +04:00
unsigned long max_zone_pfns [ MAX_NR_ZONES ] ;
2005-10-06 06:23:33 +04:00
2008-04-23 17:05:20 +04:00
# ifdef CONFIG_PPC32
unsigned long v = __fix_to_virt ( __end_of_fixed_addresses - 1 ) ;
unsigned long end = __fix_to_virt ( FIX_HOLE ) ;
for ( ; v < end ; v + = PAGE_SIZE )
map_page ( v , 0 , 0 ) ; /* XXX gross */
# endif
2005-10-06 06:23:33 +04:00
# ifdef CONFIG_HIGHMEM
map_page ( PKMAP_BASE , 0 , 0 ) ; /* XXX gross */
2008-04-23 17:05:20 +04:00
pkmap_page_table = virt_to_kpte ( PKMAP_BASE ) ;
kmap_pte = virt_to_kpte ( __fix_to_virt ( FIX_KMAP_BEGIN ) ) ;
2005-10-06 06:23:33 +04:00
kmap_prot = PAGE_KERNEL ;
# endif /* CONFIG_HIGHMEM */
2008-07-09 19:09:23 +04:00
printk ( KERN_DEBUG " Top of RAM: 0x%llx, Total RAM: 0x%lx \n " ,
2008-07-31 07:51:42 +04:00
( unsigned long long ) top_of_ram , total_ram ) ;
2006-04-13 00:25:01 +04:00
printk ( KERN_DEBUG " Memory hole size: %ldMB \n " ,
2008-07-09 19:09:23 +04:00
( long int ) ( ( top_of_ram - total_ram ) > > 20 ) ) ;
2006-10-11 12:20:39 +04:00
memset ( max_zone_pfns , 0 , sizeof ( max_zone_pfns ) ) ;
2005-10-06 06:23:33 +04:00
# ifdef CONFIG_HIGHMEM
2008-04-15 23:52:22 +04:00
max_zone_pfns [ ZONE_DMA ] = lowmem_end_addr > > PAGE_SHIFT ;
2006-10-11 12:20:39 +04:00
max_zone_pfns [ ZONE_HIGHMEM ] = top_of_ram > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
# else
2006-10-11 12:20:39 +04:00
max_zone_pfns [ ZONE_DMA ] = top_of_ram > > PAGE_SHIFT ;
2006-09-27 12:49:49 +04:00
# endif
free_area_init_nodes ( max_zone_pfns ) ;
2007-05-08 13:25:00 +04:00
mark_nonram_nosave ( ) ;
2005-10-06 06:23:33 +04:00
}
# endif /* ! CONFIG_NEED_MULTIPLE_NODES */
void __init mem_init ( void )
{
# ifdef CONFIG_NEED_MULTIPLE_NODES
int nid ;
# endif
pg_data_t * pgdat ;
unsigned long i ;
struct page * page ;
unsigned long reservedpages = 0 , codesize , initsize , datasize , bsssize ;
2005-11-16 03:43:26 +03:00
num_physpages = lmb . memory . size > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
high_memory = ( void * ) __va ( max_low_pfn * PAGE_SIZE ) ;
# ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node ( nid ) {
if ( NODE_DATA ( nid ) - > node_spanned_pages ! = 0 ) {
2006-03-25 09:27:09 +03:00
printk ( " freeing bootmem node %d \n " , nid ) ;
2005-10-06 06:23:33 +04:00
totalram_pages + =
free_all_bootmem_node ( NODE_DATA ( nid ) ) ;
}
}
# else
2005-11-16 03:43:26 +03:00
max_mapnr = max_pfn ;
2005-10-06 06:23:33 +04:00
totalram_pages + = free_all_bootmem ( ) ;
# endif
2006-03-27 13:15:59 +04:00
for_each_online_pgdat ( pgdat ) {
2005-10-06 06:23:33 +04:00
for ( i = 0 ; i < pgdat - > node_spanned_pages ; i + + ) {
2005-11-16 03:43:26 +03:00
if ( ! pfn_valid ( pgdat - > node_start_pfn + i ) )
continue ;
2005-10-06 06:23:33 +04:00
page = pgdat_page_nr ( pgdat , i ) ;
if ( PageReserved ( page ) )
reservedpages + + ;
}
}
codesize = ( unsigned long ) & _sdata - ( unsigned long ) & _stext ;
2005-11-07 09:43:07 +03:00
datasize = ( unsigned long ) & _edata - ( unsigned long ) & _sdata ;
2005-10-06 06:23:33 +04:00
initsize = ( unsigned long ) & __init_end - ( unsigned long ) & __init_begin ;
bsssize = ( unsigned long ) & __bss_stop - ( unsigned long ) & __bss_start ;
# ifdef CONFIG_HIGHMEM
{
unsigned long pfn , highmem_mapnr ;
2008-04-15 23:52:22 +04:00
highmem_mapnr = lowmem_end_addr > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
for ( pfn = highmem_mapnr ; pfn < max_mapnr ; + + pfn ) {
struct page * page = pfn_to_page ( pfn ) ;
2008-01-09 20:27:23 +03:00
if ( lmb_is_reserved ( pfn < < PAGE_SHIFT ) )
continue ;
2005-10-06 06:23:33 +04:00
ClearPageReserved ( page ) ;
2006-03-22 11:08:40 +03:00
init_page_count ( page ) ;
2005-10-06 06:23:33 +04:00
__free_page ( page ) ;
totalhigh_pages + + ;
2008-01-09 20:27:23 +03:00
reservedpages - - ;
2005-10-06 06:23:33 +04:00
}
totalram_pages + = totalhigh_pages ;
2006-04-13 00:25:01 +04:00
printk ( KERN_DEBUG " High memory: %luk \n " ,
2005-10-06 06:23:33 +04:00
totalhigh_pages < < ( PAGE_SHIFT - 10 ) ) ;
}
# endif /* CONFIG_HIGHMEM */
printk ( KERN_INFO " Memory: %luk/%luk available (%luk kernel code, "
" %luk reserved, %luk data, %luk bss, %luk init) \n " ,
2009-09-22 04:02:36 +04:00
nr_free_pages ( ) < < ( PAGE_SHIFT - 10 ) ,
2005-10-06 06:23:33 +04:00
num_physpages < < ( PAGE_SHIFT - 10 ) ,
codesize > > 10 ,
reservedpages < < ( PAGE_SHIFT - 10 ) ,
datasize > > 10 ,
bsssize > > 10 ,
initsize > > 10 ) ;
2009-05-27 07:44:50 +04:00
# ifdef CONFIG_PPC32
pr_info ( " Kernel virtual memory layout: \n " ) ;
pr_info ( " * 0x%08lx..0x%08lx : fixmap \n " , FIXADDR_START , FIXADDR_TOP ) ;
# ifdef CONFIG_HIGHMEM
pr_info ( " * 0x%08lx..0x%08lx : highmem PTEs \n " ,
PKMAP_BASE , PKMAP_ADDR ( LAST_PKMAP ) ) ;
# endif /* CONFIG_HIGHMEM */
2009-05-27 07:50:33 +04:00
# ifdef CONFIG_NOT_COHERENT_CACHE
pr_info ( " * 0x%08lx..0x%08lx : consistent mem \n " ,
IOREMAP_TOP , IOREMAP_TOP + CONFIG_CONSISTENT_SIZE ) ;
# endif /* CONFIG_NOT_COHERENT_CACHE */
2009-05-27 07:44:50 +04:00
pr_info ( " * 0x%08lx..0x%08lx : early ioremap \n " ,
ioremap_bot , IOREMAP_TOP ) ;
pr_info ( " * 0x%08lx..0x%08lx : vmalloc & ioremap \n " ,
VMALLOC_START , VMALLOC_END ) ;
# endif /* CONFIG_PPC32 */
2005-10-06 06:23:33 +04:00
mem_init_done = 1 ;
}
2005-09-26 10:04:21 +04:00
/*
* This is called when a page has been modified by the kernel .
* It just marks the page as not i - cache clean . We do the i - cache
* flush later when the page is given to a user process , if necessary .
*/
void flush_dcache_page ( struct page * page )
{
if ( cpu_has_feature ( CPU_FTR_COHERENT_ICACHE ) )
return ;
/* avoid an atomic op if possible */
if ( test_bit ( PG_arch_1 , & page - > flags ) )
clear_bit ( PG_arch_1 , & page - > flags ) ;
}
EXPORT_SYMBOL ( flush_dcache_page ) ;
void flush_dcache_icache_page ( struct page * page )
{
2009-10-26 22:24:31 +03:00
# ifdef CONFIG_HUGETLB_PAGE
if ( PageCompound ( page ) ) {
flush_dcache_icache_hugepage ( page ) ;
return ;
}
# endif
2005-09-26 10:04:21 +04:00
# ifdef CONFIG_BOOKE
2009-10-26 22:24:31 +03:00
{
void * start = kmap_atomic ( page , KM_PPC_SYNC_ICACHE ) ;
__flush_dcache_icache ( start ) ;
kunmap_atomic ( start , KM_PPC_SYNC_ICACHE ) ;
}
2005-10-10 15:58:35 +04:00
# elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
2005-09-26 10:04:21 +04:00
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache ( page_address ( page ) ) ;
# else
__flush_dcache_icache_phys ( page_to_pfn ( page ) < < PAGE_SHIFT ) ;
# endif
}
2009-10-26 22:24:31 +03:00
2005-09-26 10:04:21 +04:00
void clear_user_page ( void * page , unsigned long vaddr , struct page * pg )
{
clear_page ( page ) ;
/*
* We shouldnt have to do this , but some versions of glibc
* require it ( ld . so assumes zero filled pages are icache clean )
* - Anton
*/
2006-02-06 05:24:53 +03:00
flush_dcache_page ( pg ) ;
2005-09-26 10:04:21 +04:00
}
EXPORT_SYMBOL ( clear_user_page ) ;
void copy_user_page ( void * vto , void * vfrom , unsigned long vaddr ,
struct page * pg )
{
copy_page ( vto , vfrom ) ;
/*
* We should be able to use the following optimisation , however
* there are two problems .
* Firstly a bug in some versions of binutils meant PLT sections
* were not marked executable .
* Secondly the first word in the GOT section is blrl , used
* to establish the GOT address . Until recently the GOT was
* not marked executable .
* - Anton
*/
#if 0
if ( ! vma - > vm_file & & ( ( vma - > vm_flags & VM_EXEC ) = = 0 ) )
return ;
# endif
2006-02-06 05:24:53 +03:00
flush_dcache_page ( pg ) ;
2005-09-26 10:04:21 +04:00
}
void flush_icache_user_range ( struct vm_area_struct * vma , struct page * page ,
unsigned long addr , int len )
{
unsigned long maddr ;
maddr = ( unsigned long ) kmap ( page ) + ( addr & ~ PAGE_MASK ) ;
flush_icache_range ( maddr , maddr + len ) ;
kunmap ( page ) ;
}
EXPORT_SYMBOL ( flush_icache_user_range ) ;
/*
* This is called at the end of handling a user page fault , when the
* fault has been handled by updating a PTE in the linux page tables .
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE .
*
2005-11-24 00:37:39 +03:00
* This must always be called with the pte lock held .
2005-09-26 10:04:21 +04:00
*/
void update_mmu_cache ( struct vm_area_struct * vma , unsigned long address ,
pte_t pte )
{
2005-11-07 03:06:55 +03:00
# ifdef CONFIG_PPC_STD_MMU
unsigned long access = 0 , trap ;
2005-09-26 10:04:21 +04:00
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if ( ! pte_young ( pte ) | | address > = TASK_SIZE )
return ;
2005-11-07 03:06:55 +03:00
/* We try to figure out if we are coming from an instruction
* access fault and pass that down to __hash_page so we avoid
* double - faulting on execution of fresh text . We have to test
* for regs NULL since init will get here first thing at boot
*
* We also avoid filling the hash if not coming from a fault
*/
if ( current - > thread . regs = = NULL )
2005-09-26 10:04:21 +04:00
return ;
2005-11-07 03:06:55 +03:00
trap = TRAP ( current - > thread . regs ) ;
if ( trap = = 0x400 )
access | = _PAGE_EXEC ;
else if ( trap ! = 0x300 )
return ;
hash_preload ( vma - > vm_mm , address , access , trap ) ;
# endif /* CONFIG_PPC_STD_MMU */
2005-09-26 10:04:21 +04:00
}