Revert "mm: fix initialization of struct page for holes in memory layout"
This reverts commit d3921cb8be29ce5668c64e23ffdaeec5f8c69399. Chris Wilson reports that it causes boot problems: "We have half a dozen or so different machines in CI that are silently failing to boot, that we believe is bisected to this patch" and the CI team confirmed that a revert fixed the issues. The cause is unknown for now, so let's revert it. Link: https://lore.kernel.org/lkml/161160687463.28991.354987542182281928@build.alporthouse.com/ Reported-and-tested-by: Chris Wilson <chris@chris-wilson.co.uk> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
13391c60da
commit
377bf660d0
@ -7080,26 +7080,23 @@ void __init free_area_init_memoryless_node(int nid)
|
|||||||
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
|
* Initialize all valid struct pages in the range [spfn, epfn) and mark them
|
||||||
* PageReserved(). Return the number of struct pages that were initialized.
|
* PageReserved(). Return the number of struct pages that were initialized.
|
||||||
*/
|
*/
|
||||||
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
|
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
|
||||||
int zone, int nid)
|
|
||||||
{
|
{
|
||||||
unsigned long pfn, zone_spfn, zone_epfn;
|
unsigned long pfn;
|
||||||
u64 pgcnt = 0;
|
u64 pgcnt = 0;
|
||||||
|
|
||||||
zone_spfn = arch_zone_lowest_possible_pfn[zone];
|
|
||||||
zone_epfn = arch_zone_highest_possible_pfn[zone];
|
|
||||||
|
|
||||||
spfn = clamp(spfn, zone_spfn, zone_epfn);
|
|
||||||
epfn = clamp(epfn, zone_spfn, zone_epfn);
|
|
||||||
|
|
||||||
for (pfn = spfn; pfn < epfn; pfn++) {
|
for (pfn = spfn; pfn < epfn; pfn++) {
|
||||||
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
|
if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
|
||||||
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
|
pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
|
||||||
+ pageblock_nr_pages - 1;
|
+ pageblock_nr_pages - 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
__init_single_page(pfn_to_page(pfn), pfn, zone, nid);
|
* Use a fake node/zone (0) for now. Some of these pages
|
||||||
|
* (in memblock.reserved but not in memblock.memory) will
|
||||||
|
* get re-initialized via reserve_bootmem_region() later.
|
||||||
|
*/
|
||||||
|
__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
|
||||||
__SetPageReserved(pfn_to_page(pfn));
|
__SetPageReserved(pfn_to_page(pfn));
|
||||||
pgcnt++;
|
pgcnt++;
|
||||||
}
|
}
|
||||||
@ -7108,64 +7105,51 @@ static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Only struct pages that correspond to ranges defined by memblock.memory
|
* Only struct pages that are backed by physical memory are zeroed and
|
||||||
* are zeroed and initialized by going through __init_single_page() during
|
* initialized by going through __init_single_page(). But, there are some
|
||||||
* memmap_init().
|
* struct pages which are reserved in memblock allocator and their fields
|
||||||
|
* may be accessed (for example page_to_pfn() on some configuration accesses
|
||||||
|
* flags). We must explicitly initialize those struct pages.
|
||||||
*
|
*
|
||||||
* But, there could be struct pages that correspond to holes in
|
* This function also addresses a similar issue where struct pages are left
|
||||||
* memblock.memory. This can happen because of the following reasons:
|
* uninitialized because the physical address range is not covered by
|
||||||
* - phyiscal memory bank size is not necessarily the exact multiple of the
|
* memblock.memory or memblock.reserved. That could happen when memblock
|
||||||
* arbitrary section size
|
* layout is manually configured via memmap=, or when the highest physical
|
||||||
* - early reserved memory may not be listed in memblock.memory
|
* address (max_pfn) does not end on a section boundary.
|
||||||
* - memory layouts defined with memmap= kernel parameter may not align
|
|
||||||
* nicely with memmap sections
|
|
||||||
*
|
|
||||||
* Explicitly initialize those struct pages so that:
|
|
||||||
* - PG_Reserved is set
|
|
||||||
* - zone link is set accorging to the architecture constrains
|
|
||||||
* - node is set to node id of the next populated region except for the
|
|
||||||
* trailing hole where last node id is used
|
|
||||||
*/
|
*/
|
||||||
static void __init init_zone_unavailable_mem(int zone)
|
static void __init init_unavailable_mem(void)
|
||||||
{
|
{
|
||||||
unsigned long start, end;
|
phys_addr_t start, end;
|
||||||
int i, nid;
|
u64 i, pgcnt;
|
||||||
u64 pgcnt;
|
phys_addr_t next = 0;
|
||||||
unsigned long next = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Loop through holes in memblock.memory and initialize struct
|
* Loop through unavailable ranges not covered by memblock.memory.
|
||||||
* pages corresponding to these holes
|
|
||||||
*/
|
*/
|
||||||
pgcnt = 0;
|
pgcnt = 0;
|
||||||
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
|
for_each_mem_range(i, &start, &end) {
|
||||||
if (next < start)
|
if (next < start)
|
||||||
pgcnt += init_unavailable_range(next, start, zone, nid);
|
pgcnt += init_unavailable_range(PFN_DOWN(next),
|
||||||
|
PFN_UP(start));
|
||||||
next = end;
|
next = end;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Last section may surpass the actual end of memory (e.g. we can
|
* Early sections always have a fully populated memmap for the whole
|
||||||
* have 1Gb section and 512Mb of RAM pouplated).
|
* section - see pfn_valid(). If the last section has holes at the
|
||||||
* Make sure that memmap has a well defined state in this case.
|
* end and that section is marked "online", the memmap will be
|
||||||
|
* considered initialized. Make sure that memmap has a well defined
|
||||||
|
* state.
|
||||||
*/
|
*/
|
||||||
end = round_up(max_pfn, PAGES_PER_SECTION);
|
pgcnt += init_unavailable_range(PFN_DOWN(next),
|
||||||
pgcnt += init_unavailable_range(next, end, zone, nid);
|
round_up(max_pfn, PAGES_PER_SECTION));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Struct pages that do not have backing memory. This could be because
|
* Struct pages that do not have backing memory. This could be because
|
||||||
* firmware is using some of this memory, or for some other reasons.
|
* firmware is using some of this memory, or for some other reasons.
|
||||||
*/
|
*/
|
||||||
if (pgcnt)
|
if (pgcnt)
|
||||||
pr_info("Zone %s: zeroed struct page in unavailable ranges: %lld pages", zone_names[zone], pgcnt);
|
pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
|
||||||
}
|
|
||||||
|
|
||||||
static void __init init_unavailable_mem(void)
|
|
||||||
{
|
|
||||||
int zone;
|
|
||||||
|
|
||||||
for (zone = 0; zone < ZONE_MOVABLE; zone++)
|
|
||||||
init_zone_unavailable_mem(zone);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void __init init_unavailable_mem(void)
|
static inline void __init init_unavailable_mem(void)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user