sparc64: memblock resizes are not handled properly
In add_node_ranges() when memblock resize happens, the iterator keeps using the previous freed array. This bug cause hangs on machine where there are over 128 memory blocks during boot. For example, on machines where memory interleaving is small. The problem is seen on T4-4 because it cant have 2T of memory, and memory is interleaved at 8G. So we have 2T/8G = 256 regions to set node IDs. The starting size of regions array is 128. Thus, we have to double at least one time (actually we have to double twice because some memory is already reserved and thus we need more than 256 regions). We start using an incorrect pointer to the array after the first doubling. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Babu Moger <babu.moger@oracle.com> Reviewed-by: Babu Moger <babu.moger@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
1537b26dab
commit
cd429ce2d0
@ -1126,6 +1126,10 @@ int of_node_to_nid(struct device_node *dp)
|
||||
static void __init add_node_ranges(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
unsigned long prev_max;
|
||||
|
||||
memblock_resized:
|
||||
prev_max = memblock.memory.max;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long size = reg->size;
|
||||
@ -1145,6 +1149,8 @@ static void __init add_node_ranges(void)
|
||||
|
||||
memblock_set_node(start, this_end - start,
|
||||
&memblock.memory, nid);
|
||||
if (memblock.memory.max != prev_max)
|
||||
goto memblock_resized;
|
||||
start = this_end;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user