2005-09-26 10:04:21 +04:00
/*
* PowerPC version
* Copyright ( C ) 1995 - 1996 Gary Thomas ( gdt @ linuxppc . org )
*
* Modifications by Paul Mackerras ( PowerMac ) ( paulus @ cs . anu . edu . au )
* and Cort Dougan ( PReP ) ( cort @ cs . nmt . edu )
* Copyright ( C ) 1996 Paul Mackerras
* PPC44x / 36 - bit changes by Matt Porter ( mporter @ mvista . com )
*
* Derived from " arch/i386/mm/init.c "
* Copyright ( C ) 1991 , 1992 , 1993 , 1994 Linus Torvalds
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation ; either version
* 2 of the License , or ( at your option ) any later version .
*
*/
# include <linux/module.h>
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/types.h>
# include <linux/mm.h>
# include <linux/stddef.h>
# include <linux/init.h>
# include <linux/bootmem.h>
# include <linux/highmem.h>
# include <linux/initrd.h>
# include <linux/pagemap.h>
2007-05-08 13:25:00 +04:00
# include <linux/suspend.h>
2008-02-14 03:56:49 +03:00
# include <linux/lmb.h>
2005-09-26 10:04:21 +04:00
# include <asm/pgalloc.h>
# include <asm/prom.h>
# include <asm/io.h>
# include <asm/mmu_context.h>
# include <asm/pgtable.h>
# include <asm/mmu.h>
# include <asm/smp.h>
# include <asm/machdep.h>
# include <asm/btext.h>
# include <asm/tlb.h>
2005-10-06 06:23:33 +04:00
# include <asm/sections.h>
2005-10-10 15:58:35 +04:00
# include <asm/vdso.h>
2005-09-26 10:04:21 +04:00
# include "mmu_decl.h"
# ifndef CPU_FTR_COHERENT_ICACHE
# define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
# define CPU_FTR_NOEXECUTE 0
# endif
2005-10-06 06:23:33 +04:00
int init_bootmem_done ;
int mem_init_done ;
2005-10-31 05:07:02 +03:00
unsigned long memory_limit ;
2005-10-06 06:23:33 +04:00
2005-09-26 10:04:21 +04:00
int page_is_ram ( unsigned long pfn )
{
unsigned long paddr = ( pfn < < PAGE_SHIFT ) ;
# ifndef CONFIG_PPC64 /* XXX for now */
return paddr < __pa ( high_memory ) ;
# else
int i ;
for ( i = 0 ; i < lmb . memory . cnt ; i + + ) {
unsigned long base ;
base = lmb . memory . region [ i ] . base ;
if ( ( paddr > = base ) & &
( paddr < ( base + lmb . memory . region [ i ] . size ) ) ) {
return 1 ;
}
}
return 0 ;
# endif
}
2005-10-29 04:46:18 +04:00
pgprot_t phys_mem_access_prot ( struct file * file , unsigned long pfn ,
2005-09-26 10:04:21 +04:00
unsigned long size , pgprot_t vma_prot )
{
if ( ppc_md . phys_mem_access_prot )
2005-10-29 04:46:18 +04:00
return ppc_md . phys_mem_access_prot ( file , pfn , size , vma_prot ) ;
2005-09-26 10:04:21 +04:00
2005-10-29 04:46:18 +04:00
if ( ! page_is_ram ( pfn ) )
2005-09-26 10:04:21 +04:00
vma_prot = __pgprot ( pgprot_val ( vma_prot )
| _PAGE_GUARDED | _PAGE_NO_CACHE ) ;
return vma_prot ;
}
EXPORT_SYMBOL ( phys_mem_access_prot ) ;
2005-10-31 05:37:12 +03:00
# ifdef CONFIG_MEMORY_HOTPLUG
void online_page ( struct page * page )
{
ClearPageReserved ( page ) ;
2006-03-22 11:08:40 +03:00
init_page_count ( page ) ;
2006-03-22 11:08:35 +03:00
__free_page ( page ) ;
2005-10-31 05:37:12 +03:00
totalram_pages + + ;
num_physpages + + ;
}
2006-06-27 13:53:30 +04:00
# ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid ( u64 start )
{
return hot_add_scn_to_nid ( start ) ;
}
# endif
2008-03-28 19:10:50 +03:00
int arch_add_memory ( int nid , u64 start , u64 size )
2005-10-31 05:37:12 +03:00
{
2005-12-05 23:06:42 +03:00
struct pglist_data * pgdata ;
2005-10-31 05:37:12 +03:00
struct zone * zone ;
unsigned long start_pfn = start > > PAGE_SHIFT ;
unsigned long nr_pages = size > > PAGE_SHIFT ;
2005-12-05 23:06:42 +03:00
pgdata = NODE_DATA ( nid ) ;
2006-03-22 10:00:05 +03:00
start = ( unsigned long ) __va ( start ) ;
2005-11-08 03:25:48 +03:00
create_section_mapping ( start , start + size ) ;
2005-10-31 05:37:12 +03:00
/* this should work for most non-highmem platforms */
zone = pgdata - > node_zones ;
return __add_pages ( zone , start_pfn , nr_pages ) ;
}
2008-02-05 11:10:16 +03:00
# ifdef CONFIG_MEMORY_HOTREMOVE
int remove_memory ( u64 start , u64 size )
{
unsigned long start_pfn , end_pfn ;
int ret ;
start_pfn = start > > PAGE_SHIFT ;
end_pfn = start_pfn + ( size > > PAGE_SHIFT ) ;
ret = offline_pages ( start_pfn , end_pfn , 120 * HZ ) ;
if ( ret )
goto out ;
/* Arch-specific calls go here - next patch */
out :
return ret ;
}
# endif /* CONFIG_MEMORY_HOTREMOVE */
2008-02-05 11:10:18 +03:00
/*
* walk_memory_resource ( ) needs to make sure there is no holes in a given
* memory range . On PPC64 , since this range comes from / sysfs , the range
* is guaranteed to be valid , non - overlapping and can not contain any
* holes . By the time we get here ( memory add or remove ) , / proc / device - tree
* is updated and correct . Only reason we need to check against device - tree
* would be if we allow user - land to specify a memory range through a
* system call / ioctl etc . instead of doing offline / online through / sysfs .
*/
int
walk_memory_resource ( unsigned long start_pfn , unsigned long nr_pages , void * arg ,
int ( * func ) ( unsigned long , unsigned long , void * ) )
{
return ( * func ) ( start_pfn , nr_pages , arg ) ;
}
2005-10-31 05:37:12 +03:00
# endif /* CONFIG_MEMORY_HOTPLUG */
2005-09-26 10:04:21 +04:00
void show_mem ( void )
{
unsigned long total = 0 , reserved = 0 ;
unsigned long shared = 0 , cached = 0 ;
unsigned long highmem = 0 ;
struct page * page ;
pg_data_t * pgdat ;
unsigned long i ;
printk ( " Mem-info: \n " ) ;
show_free_areas ( ) ;
2006-03-27 13:15:59 +04:00
for_each_online_pgdat ( pgdat ) {
2005-10-31 05:37:12 +03:00
unsigned long flags ;
pgdat_resize_lock ( pgdat , & flags ) ;
2005-09-26 10:04:21 +04:00
for ( i = 0 ; i < pgdat - > node_spanned_pages ; i + + ) {
2005-11-16 03:43:26 +03:00
if ( ! pfn_valid ( pgdat - > node_start_pfn + i ) )
continue ;
2005-09-26 10:04:21 +04:00
page = pgdat_page_nr ( pgdat , i ) ;
total + + ;
if ( PageHighMem ( page ) )
highmem + + ;
if ( PageReserved ( page ) )
reserved + + ;
else if ( PageSwapCache ( page ) )
cached + + ;
else if ( page_count ( page ) )
shared + = page_count ( page ) - 1 ;
}
2005-10-31 05:37:12 +03:00
pgdat_resize_unlock ( pgdat , & flags ) ;
2005-09-26 10:04:21 +04:00
}
printk ( " %ld pages of RAM \n " , total ) ;
# ifdef CONFIG_HIGHMEM
printk ( " %ld pages of HIGHMEM \n " , highmem ) ;
# endif
printk ( " %ld reserved pages \n " , reserved ) ;
printk ( " %ld pages shared \n " , shared ) ;
printk ( " %ld pages swap cached \n " , cached ) ;
}
2005-10-06 06:23:33 +04:00
/*
* Initialize the bootmem system and give it all the memory we
* have available . If we are using highmem , we only put the
* lowmem into the bootmem system .
*/
# ifndef CONFIG_NEED_MULTIPLE_NODES
void __init do_init_bootmem ( void )
{
unsigned long i ;
unsigned long start , bootmap_pages ;
unsigned long total_pages ;
int boot_mapsize ;
max_pfn = total_pages = lmb_end_of_DRAM ( ) > > PAGE_SHIFT ;
# ifdef CONFIG_HIGHMEM
total_pages = total_lowmem > > PAGE_SHIFT ;
# endif
/*
* Find an area to use for the bootmem bitmap . Calculate the size of
* bitmap required as ( Total Memory ) / PAGE_SIZE / BITS_PER_BYTE .
* Add 1 additional page in case the address isn ' t page - aligned .
*/
bootmap_pages = bootmem_bootmap_pages ( total_pages ) ;
start = lmb_alloc ( bootmap_pages < < PAGE_SHIFT , PAGE_SIZE ) ;
boot_mapsize = init_bootmem ( start > > PAGE_SHIFT , total_pages ) ;
2006-09-27 12:49:49 +04:00
/* Add active regions with valid PFNs */
for ( i = 0 ; i < lmb . memory . cnt ; i + + ) {
unsigned long start_pfn , end_pfn ;
start_pfn = lmb . memory . region [ i ] . base > > PAGE_SHIFT ;
end_pfn = start_pfn + lmb_size_pages ( & lmb . memory , i ) ;
add_active_range ( 0 , start_pfn , end_pfn ) ;
}
2005-10-06 06:23:33 +04:00
/* Add all physical memory to the bootmem map, mark each area
* present .
*/
# ifdef CONFIG_HIGHMEM
2006-09-27 12:49:49 +04:00
free_bootmem_with_active_regions ( 0 , total_lowmem > > PAGE_SHIFT ) ;
2008-01-09 20:27:23 +03:00
/* reserve the sections we're already using */
for ( i = 0 ; i < lmb . reserved . cnt ; i + + ) {
unsigned long addr = lmb . reserved . region [ i ] . base +
lmb_size_bytes ( & lmb . reserved , i ) - 1 ;
if ( addr < total_lowmem )
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
lmb_size_bytes ( & lmb . reserved , i ) ,
BOOTMEM_DEFAULT ) ;
2008-01-09 20:27:23 +03:00
else if ( lmb . reserved . region [ i ] . base < total_lowmem ) {
unsigned long adjusted_size = total_lowmem -
lmb . reserved . region [ i ] . base ;
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
adjusted_size , BOOTMEM_DEFAULT ) ;
2008-01-09 20:27:23 +03:00
}
}
2006-09-27 12:49:49 +04:00
# else
free_bootmem_with_active_regions ( 0 , max_pfn ) ;
2005-10-06 06:23:33 +04:00
/* reserve the sections we're already using */
for ( i = 0 ; i < lmb . reserved . cnt ; i + + )
reserve_bootmem ( lmb . reserved . region [ i ] . base ,
2008-02-07 11:15:17 +03:00
lmb_size_bytes ( & lmb . reserved , i ) ,
BOOTMEM_DEFAULT ) ;
2005-10-06 06:23:33 +04:00
2008-01-09 20:27:23 +03:00
# endif
2005-10-06 06:23:33 +04:00
/* XXX need to clip this if using highmem? */
2006-09-27 12:49:49 +04:00
sparse_memory_present_with_active_regions ( 0 ) ;
2005-10-06 06:23:33 +04:00
init_bootmem_done = 1 ;
}
2007-05-08 13:25:00 +04:00
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave ( void )
{
unsigned long lmb_next_region_start_pfn ,
lmb_region_max_pfn ;
int i ;
for ( i = 0 ; i < lmb . memory . cnt - 1 ; i + + ) {
lmb_region_max_pfn =
( lmb . memory . region [ i ] . base > > PAGE_SHIFT ) +
( lmb . memory . region [ i ] . size > > PAGE_SHIFT ) ;
lmb_next_region_start_pfn =
lmb . memory . region [ i + 1 ] . base > > PAGE_SHIFT ;
if ( lmb_region_max_pfn < lmb_next_region_start_pfn )
register_nosave_region ( lmb_region_max_pfn ,
lmb_next_region_start_pfn ) ;
}
return 0 ;
}
2005-10-06 06:23:33 +04:00
/*
* paging_init ( ) sets up the page tables - in fact we ' ve already done this .
*/
void __init paging_init ( void )
{
unsigned long total_ram = lmb_phys_mem_size ( ) ;
unsigned long top_of_ram = lmb_end_of_DRAM ( ) ;
2006-09-27 12:49:49 +04:00
unsigned long max_zone_pfns [ MAX_NR_ZONES ] ;
2005-10-06 06:23:33 +04:00
# ifdef CONFIG_HIGHMEM
map_page ( PKMAP_BASE , 0 , 0 ) ; /* XXX gross */
2007-05-22 10:25:51 +04:00
pkmap_page_table = pte_offset_kernel ( pmd_offset ( pud_offset ( pgd_offset_k
( PKMAP_BASE ) , PKMAP_BASE ) , PKMAP_BASE ) , PKMAP_BASE ) ;
2005-10-06 06:23:33 +04:00
map_page ( KMAP_FIX_BEGIN , 0 , 0 ) ; /* XXX gross */
2007-05-22 10:25:51 +04:00
kmap_pte = pte_offset_kernel ( pmd_offset ( pud_offset ( pgd_offset_k
( KMAP_FIX_BEGIN ) , KMAP_FIX_BEGIN ) , KMAP_FIX_BEGIN ) ,
KMAP_FIX_BEGIN ) ;
2005-10-06 06:23:33 +04:00
kmap_prot = PAGE_KERNEL ;
# endif /* CONFIG_HIGHMEM */
2006-04-13 00:25:01 +04:00
printk ( KERN_DEBUG " Top of RAM: 0x%lx, Total RAM: 0x%lx \n " ,
2005-10-06 06:23:33 +04:00
top_of_ram , total_ram ) ;
2006-04-13 00:25:01 +04:00
printk ( KERN_DEBUG " Memory hole size: %ldMB \n " ,
2005-10-06 06:23:33 +04:00
( top_of_ram - total_ram ) > > 20 ) ;
2006-10-11 12:20:39 +04:00
memset ( max_zone_pfns , 0 , sizeof ( max_zone_pfns ) ) ;
2005-10-06 06:23:33 +04:00
# ifdef CONFIG_HIGHMEM
2006-10-11 12:20:39 +04:00
max_zone_pfns [ ZONE_DMA ] = total_lowmem > > PAGE_SHIFT ;
max_zone_pfns [ ZONE_HIGHMEM ] = top_of_ram > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
# else
2006-10-11 12:20:39 +04:00
max_zone_pfns [ ZONE_DMA ] = top_of_ram > > PAGE_SHIFT ;
2006-09-27 12:49:49 +04:00
# endif
free_area_init_nodes ( max_zone_pfns ) ;
2007-05-08 13:25:00 +04:00
mark_nonram_nosave ( ) ;
2005-10-06 06:23:33 +04:00
}
# endif /* ! CONFIG_NEED_MULTIPLE_NODES */
void __init mem_init ( void )
{
# ifdef CONFIG_NEED_MULTIPLE_NODES
int nid ;
# endif
pg_data_t * pgdat ;
unsigned long i ;
struct page * page ;
unsigned long reservedpages = 0 , codesize , initsize , datasize , bsssize ;
2005-11-16 03:43:26 +03:00
num_physpages = lmb . memory . size > > PAGE_SHIFT ;
2005-10-06 06:23:33 +04:00
high_memory = ( void * ) __va ( max_low_pfn * PAGE_SIZE ) ;
# ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node ( nid ) {
if ( NODE_DATA ( nid ) - > node_spanned_pages ! = 0 ) {
2006-03-25 09:27:09 +03:00
printk ( " freeing bootmem node %d \n " , nid ) ;
2005-10-06 06:23:33 +04:00
totalram_pages + =
free_all_bootmem_node ( NODE_DATA ( nid ) ) ;
}
}
# else
2005-11-16 03:43:26 +03:00
max_mapnr = max_pfn ;
2005-10-06 06:23:33 +04:00
totalram_pages + = free_all_bootmem ( ) ;
# endif
2006-03-27 13:15:59 +04:00
for_each_online_pgdat ( pgdat ) {
2005-10-06 06:23:33 +04:00
for ( i = 0 ; i < pgdat - > node_spanned_pages ; i + + ) {
2005-11-16 03:43:26 +03:00
if ( ! pfn_valid ( pgdat - > node_start_pfn + i ) )
continue ;
2005-10-06 06:23:33 +04:00
page = pgdat_page_nr ( pgdat , i ) ;
if ( PageReserved ( page ) )
reservedpages + + ;
}
}
codesize = ( unsigned long ) & _sdata - ( unsigned long ) & _stext ;
2005-11-07 09:43:07 +03:00
datasize = ( unsigned long ) & _edata - ( unsigned long ) & _sdata ;
2005-10-06 06:23:33 +04:00
initsize = ( unsigned long ) & __init_end - ( unsigned long ) & __init_begin ;
bsssize = ( unsigned long ) & __bss_stop - ( unsigned long ) & __bss_start ;
# ifdef CONFIG_HIGHMEM
{
unsigned long pfn , highmem_mapnr ;
highmem_mapnr = total_lowmem > > PAGE_SHIFT ;
for ( pfn = highmem_mapnr ; pfn < max_mapnr ; + + pfn ) {
struct page * page = pfn_to_page ( pfn ) ;
2008-01-09 20:27:23 +03:00
if ( lmb_is_reserved ( pfn < < PAGE_SHIFT ) )
continue ;
2005-10-06 06:23:33 +04:00
ClearPageReserved ( page ) ;
2006-03-22 11:08:40 +03:00
init_page_count ( page ) ;
2005-10-06 06:23:33 +04:00
__free_page ( page ) ;
totalhigh_pages + + ;
2008-01-09 20:27:23 +03:00
reservedpages - - ;
2005-10-06 06:23:33 +04:00
}
totalram_pages + = totalhigh_pages ;
2006-04-13 00:25:01 +04:00
printk ( KERN_DEBUG " High memory: %luk \n " ,
2005-10-06 06:23:33 +04:00
totalhigh_pages < < ( PAGE_SHIFT - 10 ) ) ;
}
# endif /* CONFIG_HIGHMEM */
printk ( KERN_INFO " Memory: %luk/%luk available (%luk kernel code, "
" %luk reserved, %luk data, %luk bss, %luk init) \n " ,
( unsigned long ) nr_free_pages ( ) < < ( PAGE_SHIFT - 10 ) ,
num_physpages < < ( PAGE_SHIFT - 10 ) ,
codesize > > 10 ,
reservedpages < < ( PAGE_SHIFT - 10 ) ,
datasize > > 10 ,
bsssize > > 10 ,
initsize > > 10 ) ;
mem_init_done = 1 ;
}
2005-09-26 10:04:21 +04:00
/*
* This is called when a page has been modified by the kernel .
* It just marks the page as not i - cache clean . We do the i - cache
* flush later when the page is given to a user process , if necessary .
*/
void flush_dcache_page ( struct page * page )
{
if ( cpu_has_feature ( CPU_FTR_COHERENT_ICACHE ) )
return ;
/* avoid an atomic op if possible */
if ( test_bit ( PG_arch_1 , & page - > flags ) )
clear_bit ( PG_arch_1 , & page - > flags ) ;
}
EXPORT_SYMBOL ( flush_dcache_page ) ;
void flush_dcache_icache_page ( struct page * page )
{
# ifdef CONFIG_BOOKE
void * start = kmap_atomic ( page , KM_PPC_SYNC_ICACHE ) ;
__flush_dcache_icache ( start ) ;
kunmap_atomic ( start , KM_PPC_SYNC_ICACHE ) ;
2005-10-10 15:58:35 +04:00
# elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
2005-09-26 10:04:21 +04:00
/* On 8xx there is no need to kmap since highmem is not supported */
__flush_dcache_icache ( page_address ( page ) ) ;
# else
__flush_dcache_icache_phys ( page_to_pfn ( page ) < < PAGE_SHIFT ) ;
# endif
}
void clear_user_page ( void * page , unsigned long vaddr , struct page * pg )
{
clear_page ( page ) ;
/*
* We shouldnt have to do this , but some versions of glibc
* require it ( ld . so assumes zero filled pages are icache clean )
* - Anton
*/
2006-02-06 05:24:53 +03:00
flush_dcache_page ( pg ) ;
2005-09-26 10:04:21 +04:00
}
EXPORT_SYMBOL ( clear_user_page ) ;
void copy_user_page ( void * vto , void * vfrom , unsigned long vaddr ,
struct page * pg )
{
copy_page ( vto , vfrom ) ;
/*
* We should be able to use the following optimisation , however
* there are two problems .
* Firstly a bug in some versions of binutils meant PLT sections
* were not marked executable .
* Secondly the first word in the GOT section is blrl , used
* to establish the GOT address . Until recently the GOT was
* not marked executable .
* - Anton
*/
#if 0
if ( ! vma - > vm_file & & ( ( vma - > vm_flags & VM_EXEC ) = = 0 ) )
return ;
# endif
2006-02-06 05:24:53 +03:00
flush_dcache_page ( pg ) ;
2005-09-26 10:04:21 +04:00
}
void flush_icache_user_range ( struct vm_area_struct * vma , struct page * page ,
unsigned long addr , int len )
{
unsigned long maddr ;
maddr = ( unsigned long ) kmap ( page ) + ( addr & ~ PAGE_MASK ) ;
flush_icache_range ( maddr , maddr + len ) ;
kunmap ( page ) ;
}
EXPORT_SYMBOL ( flush_icache_user_range ) ;
/*
* This is called at the end of handling a user page fault , when the
* fault has been handled by updating a PTE in the linux page tables .
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE .
*
2005-11-24 00:37:39 +03:00
* This must always be called with the pte lock held .
2005-09-26 10:04:21 +04:00
*/
void update_mmu_cache ( struct vm_area_struct * vma , unsigned long address ,
pte_t pte )
{
2005-11-07 03:06:55 +03:00
# ifdef CONFIG_PPC_STD_MMU
unsigned long access = 0 , trap ;
2005-09-26 10:04:21 +04:00
# endif
2005-11-07 03:06:55 +03:00
unsigned long pfn = pte_pfn ( pte ) ;
2005-09-26 10:04:21 +04:00
/* handle i-cache coherency */
if ( ! cpu_has_feature ( CPU_FTR_COHERENT_ICACHE ) & &
! cpu_has_feature ( CPU_FTR_NOEXECUTE ) & &
pfn_valid ( pfn ) ) {
struct page * page = pfn_to_page ( pfn ) ;
2007-01-24 22:40:57 +03:00
# ifdef CONFIG_8xx
/* On 8xx, cache control instructions (particularly
* " dcbst " from flush_dcache_icache ) fault as write
* operation if there is an unpopulated TLB entry
* for the address in question . To workaround that ,
* we invalidate the TLB here , thus avoiding dcbst
* misbehaviour .
*/
2007-11-20 10:32:12 +03:00
_tlbie ( address , 0 /* 8xx doesn't care about PID */ ) ;
2007-01-24 22:40:57 +03:00
# endif
2008-02-05 22:43:26 +03:00
/* The _PAGE_USER test should really be _PAGE_EXEC, but
* older glibc versions execute some code from no - exec
* pages , which for now we are supporting . If exec - only
* pages are ever implemented , this will have to change .
*/
if ( ! PageReserved ( page ) & & ( pte_val ( pte ) & _PAGE_USER )
2005-09-26 10:04:21 +04:00
& & ! test_bit ( PG_arch_1 , & page - > flags ) ) {
if ( vma - > vm_mm = = current - > active_mm ) {
__flush_dcache_icache ( ( void * ) address ) ;
} else
flush_dcache_icache_page ( page ) ;
set_bit ( PG_arch_1 , & page - > flags ) ;
}
}
# ifdef CONFIG_PPC_STD_MMU
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if ( ! pte_young ( pte ) | | address > = TASK_SIZE )
return ;
2005-11-07 03:06:55 +03:00
/* We try to figure out if we are coming from an instruction
* access fault and pass that down to __hash_page so we avoid
* double - faulting on execution of fresh text . We have to test
* for regs NULL since init will get here first thing at boot
*
* We also avoid filling the hash if not coming from a fault
*/
if ( current - > thread . regs = = NULL )
2005-09-26 10:04:21 +04:00
return ;
2005-11-07 03:06:55 +03:00
trap = TRAP ( current - > thread . regs ) ;
if ( trap = = 0x400 )
access | = _PAGE_EXEC ;
else if ( trap ! = 0x300 )
return ;
hash_preload ( vma - > vm_mm , address , access , trap ) ;
# endif /* CONFIG_PPC_STD_MMU */
2005-09-26 10:04:21 +04:00
}