[SPARC64]: Fix 32-bit truncation which broke sparsemem.

The page->flags manipulations done by the D-cache dirty
state tracking was broken because the constants were not
marked with "UL" to make them 64-bit, which means we were
clobbering the upper 32-bits of page->flags all the time.

This doesn't jive well with sparsemem which stores the
section and indexing information in the top 32-bits of
page->flags.

This is yet another sparc64 bug which has been with us
forever.

While we're here, tidy up some things in bootmem_init()
and paginig_init():

1) Pass min_low_pfn to init_bootmem_node(), it's identical
   to (phys_base >> PAGE_SHIFT) but we should use consistent
   with the variable names we print in CONFIG_BOOTMEM_DEBUG

2) max_mapnr, although no longer used, was being set
   inaccurately, we shouldn't subtract pfn_base any more.

3) All the games with phys_base in the zones_*[] arrays
   we pass to free_area_init_node() are no longer necessary.

Thanks to Josh Grebe and Fabbione for the bug reports
and testing.  Fix also verified locally on an SB2500
which had a memory layout that triggered the same problem.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2006-03-08 15:57:03 -08:00
parent d1112018b4
commit 17b0e199a1

View File

@ -205,8 +205,8 @@ inline void flush_dcache_page_impl(struct page *page)
} }
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
#define PG_dcache_cpu_shift 24 #define PG_dcache_cpu_shift 24UL
#define PG_dcache_cpu_mask (256 - 1) #define PG_dcache_cpu_mask (256UL - 1UL)
#if NR_CPUS > 256 #if NR_CPUS > 256
#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
@ -901,8 +901,7 @@ static unsigned long __init bootmem_init(unsigned long *pages_avail,
min_low_pfn, bootmap_pfn, max_low_pfn); min_low_pfn, bootmap_pfn, max_low_pfn);
#endif #endif
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
(phys_base >> PAGE_SHIFT), min_low_pfn, end_pfn);
end_pfn);
/* Now register the available physical memory with the /* Now register the available physical memory with the
* allocator. * allocator.
@ -1311,25 +1310,24 @@ void __init paging_init(void)
pages_avail = 0; pages_avail = 0;
last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base); last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
max_mapnr = last_valid_pfn - (phys_base >> PAGE_SHIFT); max_mapnr = last_valid_pfn;
kernel_physical_mapping_init(); kernel_physical_mapping_init();
{ {
unsigned long zones_size[MAX_NR_ZONES]; unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES]; unsigned long zholes_size[MAX_NR_ZONES];
unsigned long npages;
int znum; int znum;
for (znum = 0; znum < MAX_NR_ZONES; znum++) for (znum = 0; znum < MAX_NR_ZONES; znum++)
zones_size[znum] = zholes_size[znum] = 0; zones_size[znum] = zholes_size[znum] = 0;
npages = end_pfn - (phys_base >> PAGE_SHIFT); zones_size[ZONE_DMA] = end_pfn;
zones_size[ZONE_DMA] = npages; zholes_size[ZONE_DMA] = end_pfn - pages_avail;
zholes_size[ZONE_DMA] = npages - pages_avail;
free_area_init_node(0, &contig_page_data, zones_size, free_area_init_node(0, &contig_page_data, zones_size,
phys_base >> PAGE_SHIFT, zholes_size); __pa(PAGE_OFFSET) >> PAGE_SHIFT,
zholes_size);
} }
device_scan(); device_scan();