2005-04-16 15:20:36 -07:00
# include <linux/sched.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/mm.h>
2007-10-17 18:04:34 +02:00
# include <linux/nmi.h>
2005-04-16 15:20:36 -07:00
# include <linux/swap.h>
# include <linux/smp.h>
# include <linux/highmem.h>
# include <linux/slab.h>
# include <linux/pagemap.h>
# include <linux/spinlock.h>
2006-09-25 23:32:25 -07:00
# include <linux/module.h>
2007-05-12 11:15:24 -07:00
# include <linux/quicklist.h>
2005-04-16 15:20:36 -07:00
# include <asm/system.h>
# include <asm/pgtable.h>
# include <asm/pgalloc.h>
# include <asm/fixmap.h>
# include <asm/e820.h>
# include <asm/tlb.h>
# include <asm/tlbflush.h>
void show_mem ( void )
{
int total = 0 , reserved = 0 ;
int shared = 0 , cached = 0 ;
int highmem = 0 ;
struct page * page ;
pg_data_t * pgdat ;
unsigned long i ;
2005-10-29 18:16:52 -07:00
unsigned long flags ;
2005-04-16 15:20:36 -07:00
2005-06-25 14:59:24 -07:00
printk ( KERN_INFO " Mem-info: \n " ) ;
2005-04-16 15:20:36 -07:00
show_free_areas ( ) ;
2006-03-27 01:15:59 -08:00
for_each_online_pgdat ( pgdat ) {
2005-10-29 18:16:52 -07:00
pgdat_resize_lock ( pgdat , & flags ) ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < pgdat - > node_spanned_pages ; + + i ) {
2007-10-17 18:04:34 +02:00
if ( unlikely ( i % MAX_ORDER_NR_PAGES = = 0 ) )
touch_nmi_watchdog ( ) ;
[PATCH] remove non-DISCONTIG use of pgdat->node_mem_map
This patch effectively eliminates direct use of pgdat->node_mem_map outside
of the DISCONTIG code. On a flat memory system, these fields aren't
currently used, neither are they on a sparsemem system.
There was also a node_mem_map(nid) macro on many architectures. Its use
along with the use of ->node_mem_map itself was not consistent. It has
been removed in favor of two new, more explicit, arch-independent macros:
pgdat_page_nr(pgdat, pagenr)
nid_page_nr(nid, pagenr)
I called them "pgdat" and "nid" because we overload the term "node" to mean
"NUMA node", "DISCONTIG node" or "pg_data_t" in very confusing ways. I
believe the newer names are much clearer.
These macros can be overridden in the sparsemem case with a theoretically
slower operation using node_start_pfn and pfn_to_page(), instead. We could
make this the only behavior if people want, but I don't want to change too
much at once. One thing at a time.
This patch removes more code than it adds.
Compile tested on alpha, alpha discontig, arm, arm-discontig, i386, i386
generic, NUMAQ, Summit, ppc64, ppc64 discontig, and x86_64. Full list
here: http://sr71.net/patches/2.6.12/2.6.12-rc1-mhp2/configs/
Boot tested on NUMAQ, x86 SMP and ppc64 power4/5 LPARs.
Signed-off-by: Dave Hansen <haveblue@us.ibm.com>
Signed-off-by: Martin J. Bligh <mbligh@aracnet.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-23 00:07:37 -07:00
page = pgdat_page_nr ( pgdat , i ) ;
2005-04-16 15:20:36 -07:00
total + + ;
if ( PageHighMem ( page ) )
highmem + + ;
if ( PageReserved ( page ) )
reserved + + ;
else if ( PageSwapCache ( page ) )
cached + + ;
else if ( page_count ( page ) )
shared + = page_count ( page ) - 1 ;
}
2005-10-29 18:16:52 -07:00
pgdat_resize_unlock ( pgdat , & flags ) ;
2005-04-16 15:20:36 -07:00
}
2005-06-25 14:59:24 -07:00
printk ( KERN_INFO " %d pages of RAM \n " , total ) ;
printk ( KERN_INFO " %d pages of HIGHMEM \n " , highmem ) ;
printk ( KERN_INFO " %d reserved pages \n " , reserved ) ;
printk ( KERN_INFO " %d pages shared \n " , shared ) ;
printk ( KERN_INFO " %d pages swap cached \n " , cached ) ;
2005-06-23 00:08:08 -07:00
2006-06-30 01:55:39 -07:00
printk ( KERN_INFO " %lu pages dirty \n " , global_page_state ( NR_FILE_DIRTY ) ) ;
2006-06-30 01:55:40 -07:00
printk ( KERN_INFO " %lu pages writeback \n " ,
global_page_state ( NR_WRITEBACK ) ) ;
2006-06-30 01:55:34 -07:00
printk ( KERN_INFO " %lu pages mapped \n " , global_page_state ( NR_FILE_MAPPED ) ) ;
2006-09-25 23:31:51 -07:00
printk ( KERN_INFO " %lu pages slab \n " ,
global_page_state ( NR_SLAB_RECLAIMABLE ) +
global_page_state ( NR_SLAB_UNRECLAIMABLE ) ) ;
2006-06-30 01:55:38 -07:00
printk ( KERN_INFO " %lu pages pagetables \n " ,
global_page_state ( NR_PAGETABLE ) ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Associate a virtual page frame with a given physical page frame
* and protection flags for that frame .
*/
2008-06-17 11:41:59 -07:00
void set_pte_vaddr ( unsigned long vaddr , pte_t pteval )
2005-04-16 15:20:36 -07:00
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
pte_t * pte ;
pgd = swapper_pg_dir + pgd_index ( vaddr ) ;
if ( pgd_none ( * pgd ) ) {
BUG ( ) ;
return ;
}
pud = pud_offset ( pgd , vaddr ) ;
if ( pud_none ( * pud ) ) {
BUG ( ) ;
return ;
}
pmd = pmd_offset ( pud , vaddr ) ;
if ( pmd_none ( * pmd ) ) {
BUG ( ) ;
return ;
}
pte = pte_offset_kernel ( pmd , vaddr ) ;
2008-06-17 11:41:59 -07:00
if ( pte_val ( pteval ) )
set_pte_present ( & init_mm , vaddr , pte , pteval ) ;
2006-12-07 02:14:09 +01:00
else
pte_clear ( & init_mm , vaddr , pte ) ;
2005-04-16 15:20:36 -07:00
/*
* It ' s enough to flush this one mapping .
* ( PGE mappings get flushed as well )
*/
__flush_tlb_one ( vaddr ) ;
}
/*
* Associate a large virtual page frame with a given physical page frame
* and protection flags for that frame . pfn is for the base of the page ,
* vaddr is what the page gets mapped to - both must be properly aligned .
* The pmd must already be instantiated . Assumes PAE mode .
*/
void set_pmd_pfn ( unsigned long vaddr , unsigned long pfn , pgprot_t flags )
{
pgd_t * pgd ;
pud_t * pud ;
pmd_t * pmd ;
if ( vaddr & ( PMD_SIZE - 1 ) ) { /* vaddr is misaligned */
2005-06-25 14:59:24 -07:00
printk ( KERN_WARNING " set_pmd_pfn: vaddr misaligned \n " ) ;
2005-04-16 15:20:36 -07:00
return ; /* BUG(); */
}
if ( pfn & ( PTRS_PER_PTE - 1 ) ) { /* pfn is misaligned */
2005-06-25 14:59:24 -07:00
printk ( KERN_WARNING " set_pmd_pfn: pfn misaligned \n " ) ;
2005-04-16 15:20:36 -07:00
return ; /* BUG(); */
}
pgd = swapper_pg_dir + pgd_index ( vaddr ) ;
if ( pgd_none ( * pgd ) ) {
2005-06-25 14:59:24 -07:00
printk ( KERN_WARNING " set_pmd_pfn: pgd_none \n " ) ;
2005-04-16 15:20:36 -07:00
return ; /* BUG(); */
}
pud = pud_offset ( pgd , vaddr ) ;
pmd = pmd_offset ( pud , vaddr ) ;
set_pmd ( pmd , pfn_pmd ( pfn , flags ) ) ;
/*
* It ' s enough to flush this one mapping .
* ( PGE mappings get flushed as well )
*/
__flush_tlb_one ( vaddr ) ;
}
2006-09-25 23:32:25 -07:00
static int fixmaps ;
unsigned long __FIXADDR_TOP = 0xfffff000 ;
EXPORT_SYMBOL ( __FIXADDR_TOP ) ;
/**
* reserve_top_address - reserves a hole in the top of kernel address space
* @ reserve - size of hole to reserve
*
* Can be used to relocate the fixmap area and poke a hole in the top
* of kernel address space to make room for a hypervisor .
*/
void reserve_top_address ( unsigned long reserve )
{
2008-06-17 11:41:54 -07:00
BUG_ON ( fixmaps_set > 0 ) ;
2007-02-13 13:26:21 +01:00
printk ( KERN_INFO " Reserving virtual address space above 0x%08x \n " ,
( int ) - reserve ) ;
2006-09-25 23:32:25 -07:00
__FIXADDR_TOP = - reserve - PAGE_SIZE ;
__VMALLOC_RESERVE + = reserve ;
2005-04-16 15:20:36 -07:00
}