Merge branch 'memblock-kill-early_node_map' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/memblock
This commit is contained in:
commit
45aa0663cc
@ -52,6 +52,7 @@
|
|||||||
#include <asm/mach/time.h>
|
#include <asm/mach/time.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/unwind.h>
|
#include <asm/unwind.h>
|
||||||
|
#include <asm/memblock.h>
|
||||||
|
|
||||||
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
|
#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
|
@ -32,6 +32,7 @@
|
|||||||
|
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mach/map.h>
|
#include <asm/mach/map.h>
|
||||||
|
#include <asm/memblock.h>
|
||||||
|
|
||||||
#include "mm.h"
|
#include "mm.h"
|
||||||
|
|
||||||
@ -332,7 +333,6 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
|
|||||||
|
|
||||||
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
|
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
|
||||||
|
|
||||||
memblock_init();
|
|
||||||
for (i = 0; i < mi->nr_banks; i++)
|
for (i = 0; i < mi->nr_banks; i++)
|
||||||
memblock_add(mi->bank[i].start, mi->bank[i].size);
|
memblock_add(mi->bank[i].start, mi->bank[i].size);
|
||||||
|
|
||||||
@ -371,7 +371,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
|
|||||||
if (mdesc->reserve)
|
if (mdesc->reserve)
|
||||||
mdesc->reserve();
|
mdesc->reserve();
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,6 +23,9 @@ config IA64
|
|||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_DMA_API_DEBUG
|
select HAVE_DMA_API_DEBUG
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_PENDING_IRQ if SMP
|
select GENERIC_PENDING_IRQ if SMP
|
||||||
select IRQ_PER_CPU
|
select IRQ_PER_CPU
|
||||||
@ -474,9 +477,6 @@ config NODES_SHIFT
|
|||||||
MAX_NUMNODES will be 2^(This value).
|
MAX_NUMNODES will be 2^(This value).
|
||||||
If in doubt, use the default.
|
If in doubt, use the default.
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
|
# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
|
||||||
# VIRTUAL_MEM_MAP has been retained for historical reasons.
|
# VIRTUAL_MEM_MAP has been retained for historical reasons.
|
||||||
config VIRTUAL_MEM_MAP
|
config VIRTUAL_MEM_MAP
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/nmi.h>
|
#include <linux/nmi.h>
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
@ -348,7 +349,7 @@ paging_init (void)
|
|||||||
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
printk("Virtual mem_map starts at 0x%p\n", mem_map);
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_VIRTUAL_MEM_MAP */
|
#else /* !CONFIG_VIRTUAL_MEM_MAP */
|
||||||
add_active_range(0, 0, max_low_pfn);
|
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
|
||||||
free_area_init_nodes(max_zone_pfns);
|
free_area_init_nodes(max_zone_pfns);
|
||||||
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
|
||||||
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/efi.h>
|
#include <linux/efi.h>
|
||||||
#include <linux/elf.h>
|
#include <linux/elf.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/mmzone.h>
|
#include <linux/mmzone.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -557,8 +558,7 @@ int __init register_active_ranges(u64 start, u64 len, int nid)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (start < end)
|
if (start < end)
|
||||||
add_active_range(nid, __pa(start) >> PAGE_SHIFT,
|
memblock_add_node(__pa(start), end - start, nid);
|
||||||
__pa(end) >> PAGE_SHIFT);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,14 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
|
|
||||||
*
|
|
||||||
* This file is subject to the terms and conditions of the GNU General Public
|
|
||||||
* License. See the file "COPYING" in the main directory of this archive
|
|
||||||
* for more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_MICROBLAZE_MEMBLOCK_H
|
|
||||||
#define _ASM_MICROBLAZE_MEMBLOCK_H
|
|
||||||
|
|
||||||
#endif /* _ASM_MICROBLAZE_MEMBLOCK_H */
|
|
||||||
|
|
||||||
|
|
@ -122,7 +122,6 @@ void __init early_init_devtree(void *params)
|
|||||||
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
|
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
|
||||||
|
|
||||||
/* Scan memory nodes and rebuild MEMBLOCKs */
|
/* Scan memory nodes and rebuild MEMBLOCKs */
|
||||||
memblock_init();
|
|
||||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||||
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
|
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
|
||||||
|
|
||||||
@ -130,7 +129,7 @@ void __init early_init_devtree(void *params)
|
|||||||
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
|
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
|
|
||||||
pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
|
pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
|
||||||
|
|
||||||
|
@ -25,6 +25,9 @@ config MIPS
|
|||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select HAVE_ARCH_JUMP_LABEL
|
select HAVE_ARCH_JUMP_LABEL
|
||||||
select IRQ_FORCED_THREADING
|
select IRQ_FORCED_THREADING
|
||||||
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
|
|
||||||
menu "Machine selection"
|
menu "Machine selection"
|
||||||
|
|
||||||
@ -2064,9 +2067,6 @@ config ARCH_DISCONTIGMEM_ENABLE
|
|||||||
or have huge holes in the physical address space for other reasons.
|
or have huge holes in the physical address space for other reasons.
|
||||||
See <file:Documentation/vm/numa> for more.
|
See <file:Documentation/vm/numa> for more.
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_SPARSEMEM_ENABLE
|
config ARCH_SPARSEMEM_ENABLE
|
||||||
bool
|
bool
|
||||||
select SPARSEMEM_STATIC
|
select SPARSEMEM_STATIC
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/screen_info.h>
|
#include <linux/screen_info.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/initrd.h>
|
#include <linux/initrd.h>
|
||||||
#include <linux/root_dev.h>
|
#include <linux/root_dev.h>
|
||||||
@ -352,7 +353,7 @@ static void __init bootmem_init(void)
|
|||||||
continue;
|
continue;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
add_active_range(0, start, end);
|
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
*/
|
*/
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/mmzone.h>
|
#include <linux/mmzone.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
@ -381,8 +382,8 @@ static void __init szmem(void)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
num_physpages += slot_psize;
|
num_physpages += slot_psize;
|
||||||
add_active_range(node, slot_getbasepfn(node, slot),
|
memblock_add_node(PFN_PHYS(slot_getbasepfn(node, slot)),
|
||||||
slot_getbasepfn(node, slot) + slot_psize);
|
PFN_PHYS(slot_psize), node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,24 +0,0 @@
|
|||||||
/*
|
|
||||||
* OpenRISC Linux
|
|
||||||
*
|
|
||||||
* Linux architectural port borrowing liberally from similar works of
|
|
||||||
* others. All original copyrights apply as per the original source
|
|
||||||
* declaration.
|
|
||||||
*
|
|
||||||
* OpenRISC implementation:
|
|
||||||
* Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
|
|
||||||
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
|
||||||
* et al.
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation; either version 2 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ASM_OPENRISC_MEMBLOCK_H
|
|
||||||
#define __ASM_OPENRISC_MEMBLOCK_H
|
|
||||||
|
|
||||||
/* empty */
|
|
||||||
|
|
||||||
#endif /* __ASM_OPENRISC_MEMBLOCK_H */
|
|
@ -76,14 +76,13 @@ void __init early_init_devtree(void *params)
|
|||||||
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
|
of_scan_flat_dt(early_init_dt_scan_chosen, cmd_line);
|
||||||
|
|
||||||
/* Scan memory nodes and rebuild MEMBLOCKs */
|
/* Scan memory nodes and rebuild MEMBLOCKs */
|
||||||
memblock_init();
|
|
||||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||||
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
|
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
|
||||||
|
|
||||||
/* Save command line for /proc/cmdline and then parse parameters */
|
/* Save command line for /proc/cmdline and then parse parameters */
|
||||||
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
|
strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
|
|
||||||
/* We must copy the flattend device tree from init memory to regular
|
/* We must copy the flattend device tree from init memory to regular
|
||||||
* memory because the device tree references the strings in it
|
* memory because the device tree references the strings in it
|
||||||
|
@ -117,6 +117,7 @@ config PPC
|
|||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
select HAVE_DMA_API_DEBUG
|
select HAVE_DMA_API_DEBUG
|
||||||
select USE_GENERIC_SMP_HELPERS if SMP
|
select USE_GENERIC_SMP_HELPERS if SMP
|
||||||
@ -421,9 +422,6 @@ config ARCH_SPARSEMEM_DEFAULT
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on (SMP && PPC_PSERIES) || PPC_PS3
|
depends on (SMP && PPC_PSERIES) || PPC_PS3
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config SYS_SUPPORTS_HUGETLBFS
|
config SYS_SUPPORTS_HUGETLBFS
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
#ifndef _ASM_POWERPC_MEMBLOCK_H
|
|
||||||
#define _ASM_POWERPC_MEMBLOCK_H
|
|
||||||
|
|
||||||
#include <asm/udbg.h>
|
|
||||||
|
|
||||||
#define MEMBLOCK_DBG(fmt...) udbg_printf(fmt)
|
|
||||||
|
|
||||||
#endif /* _ASM_POWERPC_MEMBLOCK_H */
|
|
@ -107,9 +107,6 @@ void __init reserve_crashkernel(void)
|
|||||||
unsigned long long crash_size, crash_base;
|
unsigned long long crash_size, crash_base;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* this is necessary because of memblock_phys_mem_size() */
|
|
||||||
memblock_analyze();
|
|
||||||
|
|
||||||
/* use common parsing */
|
/* use common parsing */
|
||||||
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
||||||
&crash_size, &crash_base);
|
&crash_size, &crash_base);
|
||||||
|
@ -733,8 +733,6 @@ void __init early_init_devtree(void *params)
|
|||||||
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
|
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line);
|
||||||
|
|
||||||
/* Scan memory nodes and rebuild MEMBLOCKs */
|
/* Scan memory nodes and rebuild MEMBLOCKs */
|
||||||
memblock_init();
|
|
||||||
|
|
||||||
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
of_scan_flat_dt(early_init_dt_scan_root, NULL);
|
||||||
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
|
of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
|
||||||
|
|
||||||
@ -756,20 +754,14 @@ void __init early_init_devtree(void *params)
|
|||||||
early_reserve_mem();
|
early_reserve_mem();
|
||||||
phyp_dump_reserve_mem();
|
phyp_dump_reserve_mem();
|
||||||
|
|
||||||
limit = memory_limit;
|
/*
|
||||||
if (! limit) {
|
* Ensure that total memory size is page-aligned, because otherwise
|
||||||
phys_addr_t memsize;
|
* mark_bootmem() gets upset.
|
||||||
|
*/
|
||||||
/* Ensure that total memory size is page-aligned, because
|
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
|
||||||
* otherwise mark_bootmem() gets upset. */
|
|
||||||
memblock_analyze();
|
|
||||||
memsize = memblock_phys_mem_size();
|
|
||||||
if ((memsize & PAGE_MASK) != memsize)
|
|
||||||
limit = memsize & PAGE_MASK;
|
|
||||||
}
|
|
||||||
memblock_enforce_memory_limit(limit);
|
memblock_enforce_memory_limit(limit);
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
|
|
||||||
DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
|
DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
|
||||||
|
@ -134,8 +134,7 @@ void __init MMU_init(void)
|
|||||||
|
|
||||||
if (memblock.memory.cnt > 1) {
|
if (memblock.memory.cnt > 1) {
|
||||||
#ifndef CONFIG_WII
|
#ifndef CONFIG_WII
|
||||||
memblock.memory.cnt = 1;
|
memblock_enforce_memory_limit(memblock.memory.regions[0].size);
|
||||||
memblock_analyze();
|
|
||||||
printk(KERN_WARNING "Only using first contiguous memory region");
|
printk(KERN_WARNING "Only using first contiguous memory region");
|
||||||
#else
|
#else
|
||||||
wii_memory_fixups();
|
wii_memory_fixups();
|
||||||
@ -158,7 +157,6 @@ void __init MMU_init(void)
|
|||||||
#ifndef CONFIG_HIGHMEM
|
#ifndef CONFIG_HIGHMEM
|
||||||
total_memory = total_lowmem;
|
total_memory = total_lowmem;
|
||||||
memblock_enforce_memory_limit(total_lowmem);
|
memblock_enforce_memory_limit(total_lowmem);
|
||||||
memblock_analyze();
|
|
||||||
#endif /* CONFIG_HIGHMEM */
|
#endif /* CONFIG_HIGHMEM */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ void __init do_init_bootmem(void)
|
|||||||
unsigned long start_pfn, end_pfn;
|
unsigned long start_pfn, end_pfn;
|
||||||
start_pfn = memblock_region_memory_base_pfn(reg);
|
start_pfn = memblock_region_memory_base_pfn(reg);
|
||||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
end_pfn = memblock_region_memory_end_pfn(reg);
|
||||||
add_active_range(0, start_pfn, end_pfn);
|
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Add all physical memory to the bootmem map, mark each area
|
/* Add all physical memory to the bootmem map, mark each area
|
||||||
|
@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get_active_region_work_fn - A helper function for get_node_active_region
|
* get_node_active_region - Return active region containing pfn
|
||||||
* Returns datax set to the start_pfn and end_pfn if they contain
|
|
||||||
* the initial value of datax->start_pfn between them
|
|
||||||
* @start_pfn: start page(inclusive) of region to check
|
|
||||||
* @end_pfn: end page(exclusive) of region to check
|
|
||||||
* @datax: comes in with ->start_pfn set to value to search for and
|
|
||||||
* goes out with active range if it contains it
|
|
||||||
* Returns 1 if search value is in range else 0
|
|
||||||
*/
|
|
||||||
static int __init get_active_region_work_fn(unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn, void *datax)
|
|
||||||
{
|
|
||||||
struct node_active_region *data;
|
|
||||||
data = (struct node_active_region *)datax;
|
|
||||||
|
|
||||||
if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
|
|
||||||
data->start_pfn = start_pfn;
|
|
||||||
data->end_pfn = end_pfn;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* get_node_active_region - Return active region containing start_pfn
|
|
||||||
* Active range returned is empty if none found.
|
* Active range returned is empty if none found.
|
||||||
* @start_pfn: The page to return the region for.
|
* @pfn: The page to return the region for
|
||||||
* @node_ar: Returned set to the active region containing start_pfn
|
* @node_ar: Returned set to the active region containing @pfn
|
||||||
*/
|
*/
|
||||||
static void __init get_node_active_region(unsigned long start_pfn,
|
static void __init get_node_active_region(unsigned long pfn,
|
||||||
struct node_active_region *node_ar)
|
struct node_active_region *node_ar)
|
||||||
{
|
{
|
||||||
int nid = early_pfn_to_nid(start_pfn);
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int i, nid;
|
||||||
|
|
||||||
node_ar->nid = nid;
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
|
||||||
node_ar->start_pfn = start_pfn;
|
if (pfn >= start_pfn && pfn < end_pfn) {
|
||||||
node_ar->end_pfn = start_pfn;
|
node_ar->nid = nid;
|
||||||
work_with_active_regions(nid, get_active_region_work_fn, node_ar);
|
node_ar->start_pfn = start_pfn;
|
||||||
|
node_ar->end_pfn = end_pfn;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void map_cpu_to_node(int cpu, int node)
|
static void map_cpu_to_node(int cpu, int node)
|
||||||
@ -710,9 +690,7 @@ static void __init parse_drconf_memory(struct device_node *memory)
|
|||||||
node_set_online(nid);
|
node_set_online(nid);
|
||||||
sz = numa_enforce_memory_limit(base, size);
|
sz = numa_enforce_memory_limit(base, size);
|
||||||
if (sz)
|
if (sz)
|
||||||
add_active_range(nid, base >> PAGE_SHIFT,
|
memblock_set_node(base, sz, nid);
|
||||||
(base >> PAGE_SHIFT)
|
|
||||||
+ (sz >> PAGE_SHIFT));
|
|
||||||
} while (--ranges);
|
} while (--ranges);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -802,8 +780,7 @@ new_range:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
add_active_range(nid, start >> PAGE_SHIFT,
|
memblock_set_node(start, size, nid);
|
||||||
(start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
|
|
||||||
|
|
||||||
if (--ranges)
|
if (--ranges)
|
||||||
goto new_range;
|
goto new_range;
|
||||||
@ -839,7 +816,8 @@ static void __init setup_nonnuma(void)
|
|||||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
end_pfn = memblock_region_memory_end_pfn(reg);
|
||||||
|
|
||||||
fake_numa_create_new_node(end_pfn, &nid);
|
fake_numa_create_new_node(end_pfn, &nid);
|
||||||
add_active_range(nid, start_pfn, end_pfn);
|
memblock_set_node(PFN_PHYS(start_pfn),
|
||||||
|
PFN_PHYS(end_pfn - start_pfn), nid);
|
||||||
node_set_online(nid);
|
node_set_online(nid);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -615,7 +615,6 @@ static void __early_init_mmu(int boot_cpu)
|
|||||||
|
|
||||||
/* limit memory so we dont have linear faults */
|
/* limit memory so we dont have linear faults */
|
||||||
memblock_enforce_memory_limit(linear_map_top);
|
memblock_enforce_memory_limit(linear_map_top);
|
||||||
memblock_analyze();
|
|
||||||
|
|
||||||
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
|
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
|
||||||
patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
|
patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
|
||||||
|
@ -79,24 +79,19 @@ void __init wii_memory_fixups(void)
|
|||||||
BUG_ON(memblock.memory.cnt != 2);
|
BUG_ON(memblock.memory.cnt != 2);
|
||||||
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
|
BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
|
||||||
|
|
||||||
p[0].size = _ALIGN_DOWN(p[0].size, PAGE_SIZE);
|
/* trim unaligned tail */
|
||||||
p[1].size = _ALIGN_DOWN(p[1].size, PAGE_SIZE);
|
memblock_remove(ALIGN(p[1].base + p[1].size, PAGE_SIZE),
|
||||||
|
(phys_addr_t)ULLONG_MAX);
|
||||||
|
|
||||||
wii_hole_start = p[0].base + p[0].size;
|
/* determine hole, add & reserve them */
|
||||||
|
wii_hole_start = ALIGN(p[0].base + p[0].size, PAGE_SIZE);
|
||||||
wii_hole_size = p[1].base - wii_hole_start;
|
wii_hole_size = p[1].base - wii_hole_start;
|
||||||
|
memblock_add(wii_hole_start, wii_hole_size);
|
||||||
pr_info("MEM1: <%08llx %08llx>\n", p[0].base, p[0].size);
|
|
||||||
pr_info("HOLE: <%08lx %08lx>\n", wii_hole_start, wii_hole_size);
|
|
||||||
pr_info("MEM2: <%08llx %08llx>\n", p[1].base, p[1].size);
|
|
||||||
|
|
||||||
p[0].size += wii_hole_size + p[1].size;
|
|
||||||
|
|
||||||
memblock.memory.cnt = 1;
|
|
||||||
memblock_analyze();
|
|
||||||
|
|
||||||
/* reserve the hole */
|
|
||||||
memblock_reserve(wii_hole_start, wii_hole_size);
|
memblock_reserve(wii_hole_start, wii_hole_size);
|
||||||
|
|
||||||
|
BUG_ON(memblock.memory.cnt != 1);
|
||||||
|
__memblock_dump_all();
|
||||||
|
|
||||||
/* allow ioremapping the address space in the hole */
|
/* allow ioremapping the address space in the hole */
|
||||||
__allow_ioremap_reserved = 1;
|
__allow_ioremap_reserved = 1;
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,6 @@ static int __init ps3_mm_add_memory(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
memblock_add(start_addr, map.r1.size);
|
memblock_add(start_addr, map.r1.size);
|
||||||
memblock_analyze();
|
|
||||||
|
|
||||||
result = online_pages(start_pfn, nr_pages);
|
result = online_pages(start_pfn, nr_pages);
|
||||||
|
|
||||||
|
@ -92,6 +92,9 @@ config S390
|
|||||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||||
select HAVE_RCU_TABLE_FREE if SMP
|
select HAVE_RCU_TABLE_FREE if SMP
|
||||||
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
select ARCH_SAVE_PAGE_KEYS if HIBERNATION
|
||||||
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
select ARCH_INLINE_SPIN_TRYLOCK
|
select ARCH_INLINE_SPIN_TRYLOCK
|
||||||
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
||||||
select ARCH_INLINE_SPIN_LOCK
|
select ARCH_INLINE_SPIN_LOCK
|
||||||
@ -345,9 +348,6 @@ config WARN_DYNAMIC_STACK
|
|||||||
|
|
||||||
Say N if you are unsure.
|
Say N if you are unsure.
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
comment "Kernel preemption"
|
comment "Kernel preemption"
|
||||||
|
|
||||||
source "kernel/Kconfig.preempt"
|
source "kernel/Kconfig.preempt"
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
@ -820,7 +821,8 @@ setup_memory(void)
|
|||||||
end_chunk = min(end_chunk, end_pfn);
|
end_chunk = min(end_chunk, end_pfn);
|
||||||
if (start_chunk >= end_chunk)
|
if (start_chunk >= end_chunk)
|
||||||
continue;
|
continue;
|
||||||
add_active_range(0, start_chunk, end_chunk);
|
memblock_add_node(PFN_PHYS(start_chunk),
|
||||||
|
PFN_PHYS(end_chunk - start_chunk), 0);
|
||||||
pfn = max(start_chunk, start_pfn);
|
pfn = max(start_chunk, start_pfn);
|
||||||
for (; pfn < end_chunk; pfn++)
|
for (; pfn < end_chunk; pfn++)
|
||||||
page_set_storage_key(PFN_PHYS(pfn),
|
page_set_storage_key(PFN_PHYS(pfn),
|
||||||
|
@ -4,6 +4,9 @@ config SCORE
|
|||||||
def_bool y
|
def_bool y
|
||||||
select HAVE_GENERIC_HARDIRQS
|
select HAVE_GENERIC_HARDIRQS
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
|
|
||||||
choice
|
choice
|
||||||
prompt "System type"
|
prompt "System type"
|
||||||
@ -60,9 +63,6 @@ config 32BIT
|
|||||||
config ARCH_FLATMEM_ENABLE
|
config ARCH_FLATMEM_ENABLE
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
source "mm/Kconfig"
|
source "mm/Kconfig"
|
||||||
|
|
||||||
config MEMORY_START
|
config MEMORY_START
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/initrd.h>
|
#include <linux/initrd.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/screen_info.h>
|
#include <linux/screen_info.h>
|
||||||
@ -54,7 +55,8 @@ static void __init bootmem_init(void)
|
|||||||
/* Initialize the boot-time allocator with low memory only. */
|
/* Initialize the boot-time allocator with low memory only. */
|
||||||
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
|
bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
|
||||||
min_low_pfn, max_low_pfn);
|
min_low_pfn, max_low_pfn);
|
||||||
add_active_range(0, min_low_pfn, max_low_pfn);
|
memblock_add_node(PFN_PHYS(min_low_pfn),
|
||||||
|
PFN_PHYS(max_low_pfn - min_low_pfn), 0);
|
||||||
|
|
||||||
free_bootmem(PFN_PHYS(start_pfn),
|
free_bootmem(PFN_PHYS(start_pfn),
|
||||||
(max_low_pfn - start_pfn) << PAGE_SHIFT);
|
(max_low_pfn - start_pfn) << PAGE_SHIFT);
|
||||||
|
@ -4,6 +4,7 @@ config SUPERH
|
|||||||
select CLKDEV_LOOKUP
|
select CLKDEV_LOOKUP
|
||||||
select HAVE_IDE if HAS_IOPORT
|
select HAVE_IDE if HAS_IOPORT
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select HAVE_OPROFILE
|
select HAVE_OPROFILE
|
||||||
select HAVE_GENERIC_DMA_COHERENT
|
select HAVE_GENERIC_DMA_COHERENT
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
|
@ -1,4 +0,0 @@
|
|||||||
#ifndef __ASM_SH_MEMBLOCK_H
|
|
||||||
#define __ASM_SH_MEMBLOCK_H
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_MEMBLOCK_H */
|
|
@ -157,9 +157,6 @@ void __init reserve_crashkernel(void)
|
|||||||
unsigned long long crash_size, crash_base;
|
unsigned long long crash_size, crash_base;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* this is necessary because of memblock_phys_mem_size() */
|
|
||||||
memblock_analyze();
|
|
||||||
|
|
||||||
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
||||||
&crash_size, &crash_base);
|
&crash_size, &crash_base);
|
||||||
if (ret == 0 && crash_size > 0) {
|
if (ret == 0 && crash_size > 0) {
|
||||||
|
@ -230,7 +230,8 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
|
|||||||
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
|
pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
|
||||||
PAGE_KERNEL);
|
PAGE_KERNEL);
|
||||||
|
|
||||||
add_active_range(nid, start_pfn, end_pfn);
|
memblock_set_node(PFN_PHYS(start_pfn),
|
||||||
|
PFN_PHYS(end_pfn - start_pfn), nid);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init __weak plat_early_device_setup(void)
|
void __init __weak plat_early_device_setup(void)
|
||||||
|
@ -143,9 +143,6 @@ config MAX_ACTIVE_REGIONS
|
|||||||
CPU_SUBTYPE_SH7785)
|
CPU_SUBTYPE_SH7785)
|
||||||
default "1"
|
default "1"
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
@ -324,7 +324,6 @@ void __init paging_init(void)
|
|||||||
unsigned long vaddr, end;
|
unsigned long vaddr, end;
|
||||||
int nid;
|
int nid;
|
||||||
|
|
||||||
memblock_init();
|
|
||||||
sh_mv.mv_mem_init();
|
sh_mv.mv_mem_init();
|
||||||
|
|
||||||
early_reserve_mem();
|
early_reserve_mem();
|
||||||
@ -337,7 +336,7 @@ void __init paging_init(void)
|
|||||||
sh_mv.mv_mem_reserve();
|
sh_mv.mv_mem_reserve();
|
||||||
|
|
||||||
memblock_enforce_memory_limit(memory_limit);
|
memblock_enforce_memory_limit(memory_limit);
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
|
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
|
|
||||||
|
@ -43,6 +43,7 @@ config SPARC64
|
|||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
select HAVE_RCU_TABLE_FREE if SMP
|
select HAVE_RCU_TABLE_FREE if SMP
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select HAVE_SYSCALL_WRAPPERS
|
select HAVE_SYSCALL_WRAPPERS
|
||||||
select HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
@ -352,9 +353,6 @@ config NODES_SPAN_OTHER_NODES
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on NEED_MULTIPLE_NODES
|
depends on NEED_MULTIPLE_NODES
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y if SPARC64
|
|
||||||
|
|
||||||
config ARCH_SELECT_MEMORY_MODEL
|
config ARCH_SELECT_MEMORY_MODEL
|
||||||
def_bool y if SPARC64
|
def_bool y if SPARC64
|
||||||
|
|
||||||
|
@ -1,8 +0,0 @@
|
|||||||
#ifndef _SPARC64_MEMBLOCK_H
|
|
||||||
#define _SPARC64_MEMBLOCK_H
|
|
||||||
|
|
||||||
#include <asm/oplib.h>
|
|
||||||
|
|
||||||
#define MEMBLOCK_DBG(fmt...) prom_printf(fmt)
|
|
||||||
|
|
||||||
#endif /* !(_SPARC64_MEMBLOCK_H) */
|
|
@ -790,7 +790,7 @@ static int find_node(unsigned long addr)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
||||||
{
|
{
|
||||||
*nid = find_node(start);
|
*nid = find_node(start);
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
@ -808,7 +808,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
||||||
{
|
{
|
||||||
*nid = 0;
|
*nid = 0;
|
||||||
return end;
|
return end;
|
||||||
@ -816,7 +816,7 @@ u64 memblock_nid_range(u64 start, u64 end, int *nid)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This must be invoked after performing all of the necessary
|
/* This must be invoked after performing all of the necessary
|
||||||
* add_active_range() calls for 'nid'. We need to be able to get
|
* memblock_set_node() calls for 'nid'. We need to be able to get
|
||||||
* correct data from get_pfn_range_for_nid().
|
* correct data from get_pfn_range_for_nid().
|
||||||
*/
|
*/
|
||||||
static void __init allocate_node_data(int nid)
|
static void __init allocate_node_data(int nid)
|
||||||
@ -987,14 +987,11 @@ static void __init add_node_ranges(void)
|
|||||||
|
|
||||||
this_end = memblock_nid_range(start, end, &nid);
|
this_end = memblock_nid_range(start, end, &nid);
|
||||||
|
|
||||||
numadbg("Adding active range nid[%d] "
|
numadbg("Setting memblock NUMA node nid[%d] "
|
||||||
"start[%lx] end[%lx]\n",
|
"start[%lx] end[%lx]\n",
|
||||||
nid, start, this_end);
|
nid, start, this_end);
|
||||||
|
|
||||||
add_active_range(nid,
|
memblock_set_node(start, this_end - start, nid);
|
||||||
start >> PAGE_SHIFT,
|
|
||||||
this_end >> PAGE_SHIFT);
|
|
||||||
|
|
||||||
start = this_end;
|
start = this_end;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1282,7 +1279,6 @@ static void __init bootmem_init_nonnuma(void)
|
|||||||
{
|
{
|
||||||
unsigned long top_of_ram = memblock_end_of_DRAM();
|
unsigned long top_of_ram = memblock_end_of_DRAM();
|
||||||
unsigned long total_ram = memblock_phys_mem_size();
|
unsigned long total_ram = memblock_phys_mem_size();
|
||||||
struct memblock_region *reg;
|
|
||||||
|
|
||||||
numadbg("bootmem_init_nonnuma()\n");
|
numadbg("bootmem_init_nonnuma()\n");
|
||||||
|
|
||||||
@ -1292,20 +1288,8 @@ static void __init bootmem_init_nonnuma(void)
|
|||||||
(top_of_ram - total_ram) >> 20);
|
(top_of_ram - total_ram) >> 20);
|
||||||
|
|
||||||
init_node_masks_nonnuma();
|
init_node_masks_nonnuma();
|
||||||
|
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
|
||||||
for_each_memblock(memory, reg) {
|
|
||||||
unsigned long start_pfn, end_pfn;
|
|
||||||
|
|
||||||
if (!reg->size)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
start_pfn = memblock_region_memory_base_pfn(reg);
|
|
||||||
end_pfn = memblock_region_memory_end_pfn(reg);
|
|
||||||
add_active_range(0, start_pfn, end_pfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
allocate_node_data(0);
|
allocate_node_data(0);
|
||||||
|
|
||||||
node_set_online(0);
|
node_set_online(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1769,8 +1753,6 @@ void __init paging_init(void)
|
|||||||
sun4v_ktsb_init();
|
sun4v_ktsb_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
memblock_init();
|
|
||||||
|
|
||||||
/* Find available physical memory...
|
/* Find available physical memory...
|
||||||
*
|
*
|
||||||
* Read it twice in order to work around a bug in openfirmware.
|
* Read it twice in order to work around a bug in openfirmware.
|
||||||
@ -1796,7 +1778,7 @@ void __init paging_init(void)
|
|||||||
|
|
||||||
memblock_enforce_memory_limit(cmdline_memory_size);
|
memblock_enforce_memory_limit(cmdline_memory_size);
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
|
|
||||||
set_bit(0, mmu_context_bmap);
|
set_bit(0, mmu_context_bmap);
|
||||||
|
@ -37,6 +37,7 @@
|
|||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
|
#include <asm/memblock.h>
|
||||||
|
|
||||||
#include "setup.h"
|
#include "setup.h"
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/sizes.h>
|
#include <asm/sizes.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
#include <asm/memblock.h>
|
||||||
#include <mach/map.h>
|
#include <mach/map.h>
|
||||||
|
|
||||||
#include "mm.h"
|
#include "mm.h"
|
||||||
@ -245,7 +246,6 @@ void __init uc32_memblock_init(struct meminfo *mi)
|
|||||||
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
|
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]),
|
||||||
meminfo_cmp, NULL);
|
meminfo_cmp, NULL);
|
||||||
|
|
||||||
memblock_init();
|
|
||||||
for (i = 0; i < mi->nr_banks; i++)
|
for (i = 0; i < mi->nr_banks; i++)
|
||||||
memblock_add(mi->bank[i].start, mi->bank[i].size);
|
memblock_add(mi->bank[i].start, mi->bank[i].size);
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ void __init uc32_memblock_init(struct meminfo *mi)
|
|||||||
|
|
||||||
uc32_mm_memblock_reserve();
|
uc32_mm_memblock_reserve();
|
||||||
|
|
||||||
memblock_analyze();
|
memblock_allow_resize();
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/sizes.h>
|
#include <asm/sizes.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
|
#include <asm/memblock.h>
|
||||||
|
|
||||||
#include <mach/map.h>
|
#include <mach/map.h>
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ config X86
|
|||||||
select HAVE_IOREMAP_PROT
|
select HAVE_IOREMAP_PROT
|
||||||
select HAVE_KPROBES
|
select HAVE_KPROBES
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select ARCH_WANT_FRAME_POINTERS
|
select ARCH_WANT_FRAME_POINTERS
|
||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
@ -204,9 +206,6 @@ config ZONE_DMA32
|
|||||||
bool
|
bool
|
||||||
default X86_64
|
default X86_64
|
||||||
|
|
||||||
config ARCH_POPULATES_NODE_MAP
|
|
||||||
def_bool y
|
|
||||||
|
|
||||||
config AUDIT_ARCH
|
config AUDIT_ARCH
|
||||||
bool
|
bool
|
||||||
default X86_64
|
default X86_64
|
||||||
|
@ -117,7 +117,7 @@ static inline void early_memtest(unsigned long start, unsigned long end)
|
|||||||
|
|
||||||
extern unsigned long e820_end_of_ram_pfn(void);
|
extern unsigned long e820_end_of_ram_pfn(void);
|
||||||
extern unsigned long e820_end_of_low_ram_pfn(void);
|
extern unsigned long e820_end_of_low_ram_pfn(void);
|
||||||
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);
|
extern u64 early_reserve_e820(u64 sizet, u64 align);
|
||||||
|
|
||||||
void memblock_x86_fill(void);
|
void memblock_x86_fill(void);
|
||||||
void memblock_find_dma_reserve(void);
|
void memblock_find_dma_reserve(void);
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
#ifndef _X86_MEMBLOCK_H
|
|
||||||
#define _X86_MEMBLOCK_H
|
|
||||||
|
|
||||||
#define ARCH_DISCARD_MEMBLOCK
|
|
||||||
|
|
||||||
u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
|
|
||||||
|
|
||||||
void memblock_x86_reserve_range(u64 start, u64 end, char *name);
|
|
||||||
void memblock_x86_free_range(u64 start, u64 end);
|
|
||||||
struct range;
|
|
||||||
int __get_free_all_memory_range(struct range **range, int nodeid,
|
|
||||||
unsigned long start_pfn, unsigned long end_pfn);
|
|
||||||
int get_free_all_memory_range(struct range **rangep, int nodeid);
|
|
||||||
|
|
||||||
void memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
|
|
||||||
unsigned long last_pfn);
|
|
||||||
u64 memblock_x86_hole_size(u64 start, u64 end);
|
|
||||||
u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
|
|
||||||
u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
|
|
||||||
u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
|
|
||||||
bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
|
|
||||||
|
|
||||||
#endif
|
|
@ -88,13 +88,13 @@ static u32 __init allocate_aperture(void)
|
|||||||
*/
|
*/
|
||||||
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
||||||
aper_size, aper_size);
|
aper_size, aper_size);
|
||||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
|
if (!addr || addr + aper_size > GART_MAX_ADDR) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||||
addr, aper_size>>10);
|
addr, aper_size>>10);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(addr, addr + aper_size, "aperture64");
|
memblock_reserve(addr, aper_size);
|
||||||
/*
|
/*
|
||||||
* Kmemleak should not scan this block as it may not be mapped via the
|
* Kmemleak should not scan this block as it may not be mapped via the
|
||||||
* kernel direct mapping.
|
* kernel direct mapping.
|
||||||
|
@ -62,7 +62,8 @@ early_param("memory_corruption_check_size", set_corruption_check_size);
|
|||||||
|
|
||||||
void __init setup_bios_corruption_check(void)
|
void __init setup_bios_corruption_check(void)
|
||||||
{
|
{
|
||||||
u64 addr = PAGE_SIZE; /* assume first page is reserved anyway */
|
phys_addr_t start, end;
|
||||||
|
u64 i;
|
||||||
|
|
||||||
if (memory_corruption_check == -1) {
|
if (memory_corruption_check == -1) {
|
||||||
memory_corruption_check =
|
memory_corruption_check =
|
||||||
@ -82,28 +83,23 @@ void __init setup_bios_corruption_check(void)
|
|||||||
|
|
||||||
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
|
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
|
||||||
|
|
||||||
while (addr < corruption_check_size && num_scan_areas < MAX_SCAN_AREAS) {
|
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
|
||||||
u64 size;
|
start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
|
||||||
addr = memblock_x86_find_in_range_size(addr, &size, PAGE_SIZE);
|
PAGE_SIZE, corruption_check_size);
|
||||||
|
end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
|
||||||
|
PAGE_SIZE, corruption_check_size);
|
||||||
|
if (start >= end)
|
||||||
|
continue;
|
||||||
|
|
||||||
if (addr == MEMBLOCK_ERROR)
|
memblock_reserve(start, end - start);
|
||||||
break;
|
scan_areas[num_scan_areas].addr = start;
|
||||||
|
scan_areas[num_scan_areas].size = end - start;
|
||||||
if (addr >= corruption_check_size)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if ((addr + size) > corruption_check_size)
|
|
||||||
size = corruption_check_size - addr;
|
|
||||||
|
|
||||||
memblock_x86_reserve_range(addr, addr + size, "SCAN RAM");
|
|
||||||
scan_areas[num_scan_areas].addr = addr;
|
|
||||||
scan_areas[num_scan_areas].size = size;
|
|
||||||
num_scan_areas++;
|
|
||||||
|
|
||||||
/* Assume we've already mapped this early memory */
|
/* Assume we've already mapped this early memory */
|
||||||
memset(__va(addr), 0, size);
|
memset(__va(start), 0, end - start);
|
||||||
|
|
||||||
addr += size;
|
if (++num_scan_areas >= MAX_SCAN_AREAS)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (num_scan_areas)
|
if (num_scan_areas)
|
||||||
|
@ -738,35 +738,17 @@ core_initcall(e820_mark_nvs_memory);
|
|||||||
/*
|
/*
|
||||||
* pre allocated 4k and reserved it in memblock and e820_saved
|
* pre allocated 4k and reserved it in memblock and e820_saved
|
||||||
*/
|
*/
|
||||||
u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
|
u64 __init early_reserve_e820(u64 size, u64 align)
|
||||||
{
|
{
|
||||||
u64 size = 0;
|
|
||||||
u64 addr;
|
u64 addr;
|
||||||
u64 start;
|
|
||||||
|
|
||||||
for (start = startt; ; start += size) {
|
addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
|
||||||
start = memblock_x86_find_in_range_size(start, &size, align);
|
if (addr) {
|
||||||
if (start == MEMBLOCK_ERROR)
|
e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
|
||||||
return 0;
|
printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
|
||||||
if (size >= sizet)
|
update_e820_saved();
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
if (start >= MAXMEM)
|
|
||||||
return 0;
|
|
||||||
if (start + size > MAXMEM)
|
|
||||||
size = MAXMEM - start;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
addr = round_down(start + size - sizet, align);
|
|
||||||
if (addr < start)
|
|
||||||
return 0;
|
|
||||||
memblock_x86_reserve_range(addr, addr + sizet, "new next");
|
|
||||||
e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
|
|
||||||
printk(KERN_INFO "update e820_saved for early_reserve_e820\n");
|
|
||||||
update_e820_saved();
|
|
||||||
|
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1090,7 +1072,7 @@ void __init memblock_x86_fill(void)
|
|||||||
* We are safe to enable resizing, beause memblock_x86_fill()
|
* We are safe to enable resizing, beause memblock_x86_fill()
|
||||||
* is rather later for x86
|
* is rather later for x86
|
||||||
*/
|
*/
|
||||||
memblock_can_resize = 1;
|
memblock_allow_resize();
|
||||||
|
|
||||||
for (i = 0; i < e820.nr_map; i++) {
|
for (i = 0; i < e820.nr_map; i++) {
|
||||||
struct e820entry *ei = &e820.map[i];
|
struct e820entry *ei = &e820.map[i];
|
||||||
@ -1105,22 +1087,36 @@ void __init memblock_x86_fill(void)
|
|||||||
memblock_add(ei->addr, ei->size);
|
memblock_add(ei->addr, ei->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
memblock_analyze();
|
|
||||||
memblock_dump_all();
|
memblock_dump_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init memblock_find_dma_reserve(void)
|
void __init memblock_find_dma_reserve(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
u64 free_size_pfn;
|
u64 nr_pages = 0, nr_free_pages = 0;
|
||||||
u64 mem_size_pfn;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
phys_addr_t start, end;
|
||||||
|
int i;
|
||||||
|
u64 u;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* need to find out used area below MAX_DMA_PFN
|
* need to find out used area below MAX_DMA_PFN
|
||||||
* need to use memblock to get free size in [0, MAX_DMA_PFN]
|
* need to use memblock to get free size in [0, MAX_DMA_PFN]
|
||||||
* at first, and assume boot_mem will not take below MAX_DMA_PFN
|
* at first, and assume boot_mem will not take below MAX_DMA_PFN
|
||||||
*/
|
*/
|
||||||
mem_size_pfn = memblock_x86_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
|
||||||
free_size_pfn = memblock_x86_free_memory_in_range(0, MAX_DMA_PFN << PAGE_SHIFT) >> PAGE_SHIFT;
|
start_pfn = min_t(unsigned long, start_pfn, MAX_DMA_PFN);
|
||||||
set_dma_reserve(mem_size_pfn - free_size_pfn);
|
end_pfn = min_t(unsigned long, end_pfn, MAX_DMA_PFN);
|
||||||
|
nr_pages += end_pfn - start_pfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
|
||||||
|
start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
|
||||||
|
end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
|
||||||
|
if (start_pfn < end_pfn)
|
||||||
|
nr_free_pages += end_pfn - start_pfn;
|
||||||
|
}
|
||||||
|
|
||||||
|
set_dma_reserve(nr_pages - nr_free_pages);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -52,5 +52,5 @@ void __init reserve_ebda_region(void)
|
|||||||
lowmem = 0x9f000;
|
lowmem = 0x9f000;
|
||||||
|
|
||||||
/* reserve all memory between lowmem and the 1MB mark */
|
/* reserve all memory between lowmem and the 1MB mark */
|
||||||
memblock_x86_reserve_range(lowmem, 0x100000, "* BIOS reserved");
|
memblock_reserve(lowmem, 0x100000 - lowmem);
|
||||||
}
|
}
|
||||||
|
@ -31,9 +31,8 @@ static void __init i386_default_early_setup(void)
|
|||||||
|
|
||||||
void __init i386_start_kernel(void)
|
void __init i386_start_kernel(void)
|
||||||
{
|
{
|
||||||
memblock_init();
|
memblock_reserve(__pa_symbol(&_text),
|
||||||
|
__pa_symbol(&__bss_stop) - __pa_symbol(&_text));
|
||||||
memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
/* Reserve INITRD */
|
/* Reserve INITRD */
|
||||||
@ -42,7 +41,7 @@ void __init i386_start_kernel(void)
|
|||||||
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
|
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
|
||||||
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
|
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
|
||||||
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
|
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
|
||||||
memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
|
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -98,9 +98,8 @@ void __init x86_64_start_reservations(char *real_mode_data)
|
|||||||
{
|
{
|
||||||
copy_bootdata(__va(real_mode_data));
|
copy_bootdata(__va(real_mode_data));
|
||||||
|
|
||||||
memblock_init();
|
memblock_reserve(__pa_symbol(&_text),
|
||||||
|
__pa_symbol(&__bss_stop) - __pa_symbol(&_text));
|
||||||
memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
|
|
||||||
|
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
/* Reserve INITRD */
|
/* Reserve INITRD */
|
||||||
@ -109,7 +108,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
|
|||||||
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
|
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
|
||||||
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
|
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
|
||||||
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
|
unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
|
||||||
memblock_x86_reserve_range(ramdisk_image, ramdisk_end, "RAMDISK");
|
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -564,9 +564,7 @@ void __init default_get_smp_config(unsigned int early)
|
|||||||
|
|
||||||
static void __init smp_reserve_memory(struct mpf_intel *mpf)
|
static void __init smp_reserve_memory(struct mpf_intel *mpf)
|
||||||
{
|
{
|
||||||
unsigned long size = get_mpc_size(mpf->physptr);
|
memblock_reserve(mpf->physptr, get_mpc_size(mpf->physptr));
|
||||||
|
|
||||||
memblock_x86_reserve_range(mpf->physptr, mpf->physptr+size, "* MP-table mpc");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init smp_scan_config(unsigned long base, unsigned long length)
|
static int __init smp_scan_config(unsigned long base, unsigned long length)
|
||||||
@ -595,7 +593,7 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
|
|||||||
mpf, (u64)virt_to_phys(mpf));
|
mpf, (u64)virt_to_phys(mpf));
|
||||||
|
|
||||||
mem = virt_to_phys(mpf);
|
mem = virt_to_phys(mpf);
|
||||||
memblock_x86_reserve_range(mem, mem + sizeof(*mpf), "* MP-table mpf");
|
memblock_reserve(mem, sizeof(*mpf));
|
||||||
if (mpf->physptr)
|
if (mpf->physptr)
|
||||||
smp_reserve_memory(mpf);
|
smp_reserve_memory(mpf);
|
||||||
|
|
||||||
@ -836,10 +834,8 @@ early_param("alloc_mptable", parse_alloc_mptable_opt);
|
|||||||
|
|
||||||
void __init early_reserve_e820_mpc_new(void)
|
void __init early_reserve_e820_mpc_new(void)
|
||||||
{
|
{
|
||||||
if (enable_update_mptable && alloc_mptable) {
|
if (enable_update_mptable && alloc_mptable)
|
||||||
u64 startt = 0;
|
mpc_new_phys = early_reserve_e820(mpc_new_length, 4);
|
||||||
mpc_new_phys = early_reserve_e820(startt, mpc_new_length, 4);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init update_mp_table(void)
|
static int __init update_mp_table(void)
|
||||||
|
@ -306,7 +306,8 @@ static void __init cleanup_highmap(void)
|
|||||||
static void __init reserve_brk(void)
|
static void __init reserve_brk(void)
|
||||||
{
|
{
|
||||||
if (_brk_end > _brk_start)
|
if (_brk_end > _brk_start)
|
||||||
memblock_x86_reserve_range(__pa(_brk_start), __pa(_brk_end), "BRK");
|
memblock_reserve(__pa(_brk_start),
|
||||||
|
__pa(_brk_end) - __pa(_brk_start));
|
||||||
|
|
||||||
/* Mark brk area as locked down and no longer taking any
|
/* Mark brk area as locked down and no longer taking any
|
||||||
new allocations */
|
new allocations */
|
||||||
@ -331,13 +332,13 @@ static void __init relocate_initrd(void)
|
|||||||
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
|
ramdisk_here = memblock_find_in_range(0, end_of_lowmem, area_size,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
|
|
||||||
if (ramdisk_here == MEMBLOCK_ERROR)
|
if (!ramdisk_here)
|
||||||
panic("Cannot find place for new RAMDISK of size %lld\n",
|
panic("Cannot find place for new RAMDISK of size %lld\n",
|
||||||
ramdisk_size);
|
ramdisk_size);
|
||||||
|
|
||||||
/* Note: this includes all the lowmem currently occupied by
|
/* Note: this includes all the lowmem currently occupied by
|
||||||
the initrd, we rely on that fact to keep the data intact. */
|
the initrd, we rely on that fact to keep the data intact. */
|
||||||
memblock_x86_reserve_range(ramdisk_here, ramdisk_here + area_size, "NEW RAMDISK");
|
memblock_reserve(ramdisk_here, area_size);
|
||||||
initrd_start = ramdisk_here + PAGE_OFFSET;
|
initrd_start = ramdisk_here + PAGE_OFFSET;
|
||||||
initrd_end = initrd_start + ramdisk_size;
|
initrd_end = initrd_start + ramdisk_size;
|
||||||
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
|
printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n",
|
||||||
@ -393,7 +394,7 @@ static void __init reserve_initrd(void)
|
|||||||
initrd_start = 0;
|
initrd_start = 0;
|
||||||
|
|
||||||
if (ramdisk_size >= (end_of_lowmem>>1)) {
|
if (ramdisk_size >= (end_of_lowmem>>1)) {
|
||||||
memblock_x86_free_range(ramdisk_image, ramdisk_end);
|
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||||
printk(KERN_ERR "initrd too large to handle, "
|
printk(KERN_ERR "initrd too large to handle, "
|
||||||
"disabling initrd\n");
|
"disabling initrd\n");
|
||||||
return;
|
return;
|
||||||
@ -416,7 +417,7 @@ static void __init reserve_initrd(void)
|
|||||||
|
|
||||||
relocate_initrd();
|
relocate_initrd();
|
||||||
|
|
||||||
memblock_x86_free_range(ramdisk_image, ramdisk_end);
|
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void __init reserve_initrd(void)
|
static void __init reserve_initrd(void)
|
||||||
@ -490,15 +491,13 @@ static void __init memblock_x86_reserve_range_setup_data(void)
|
|||||||
{
|
{
|
||||||
struct setup_data *data;
|
struct setup_data *data;
|
||||||
u64 pa_data;
|
u64 pa_data;
|
||||||
char buf[32];
|
|
||||||
|
|
||||||
if (boot_params.hdr.version < 0x0209)
|
if (boot_params.hdr.version < 0x0209)
|
||||||
return;
|
return;
|
||||||
pa_data = boot_params.hdr.setup_data;
|
pa_data = boot_params.hdr.setup_data;
|
||||||
while (pa_data) {
|
while (pa_data) {
|
||||||
data = early_memremap(pa_data, sizeof(*data));
|
data = early_memremap(pa_data, sizeof(*data));
|
||||||
sprintf(buf, "setup data %x", data->type);
|
memblock_reserve(pa_data, sizeof(*data) + data->len);
|
||||||
memblock_x86_reserve_range(pa_data, pa_data+sizeof(*data)+data->len, buf);
|
|
||||||
pa_data = data->next;
|
pa_data = data->next;
|
||||||
early_iounmap(data, sizeof(*data));
|
early_iounmap(data, sizeof(*data));
|
||||||
}
|
}
|
||||||
@ -554,7 +553,7 @@ static void __init reserve_crashkernel(void)
|
|||||||
crash_base = memblock_find_in_range(alignment,
|
crash_base = memblock_find_in_range(alignment,
|
||||||
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
|
||||||
|
|
||||||
if (crash_base == MEMBLOCK_ERROR) {
|
if (!crash_base) {
|
||||||
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
pr_info("crashkernel reservation failed - No suitable area found.\n");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -568,7 +567,7 @@ static void __init reserve_crashkernel(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(crash_base, crash_base + crash_size, "CRASH KERNEL");
|
memblock_reserve(crash_base, crash_size);
|
||||||
|
|
||||||
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
|
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
|
||||||
"for crashkernel (System RAM: %ldMB)\n",
|
"for crashkernel (System RAM: %ldMB)\n",
|
||||||
@ -626,7 +625,7 @@ static __init void reserve_ibft_region(void)
|
|||||||
addr = find_ibft_region(&size);
|
addr = find_ibft_region(&size);
|
||||||
|
|
||||||
if (size)
|
if (size)
|
||||||
memblock_x86_reserve_range(addr, addr + size, "* ibft");
|
memblock_reserve(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
|
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
|
||||||
|
@ -14,11 +14,11 @@ void __init setup_trampolines(void)
|
|||||||
|
|
||||||
/* Has to be in very low memory so we can execute real-mode AP code. */
|
/* Has to be in very low memory so we can execute real-mode AP code. */
|
||||||
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
|
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
|
||||||
if (mem == MEMBLOCK_ERROR)
|
if (!mem)
|
||||||
panic("Cannot allocate trampoline\n");
|
panic("Cannot allocate trampoline\n");
|
||||||
|
|
||||||
x86_trampoline_base = __va(mem);
|
x86_trampoline_base = __va(mem);
|
||||||
memblock_x86_reserve_range(mem, mem + size, "TRAMPOLINE");
|
memblock_reserve(mem, size);
|
||||||
|
|
||||||
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
|
printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
|
||||||
x86_trampoline_base, (unsigned long long)mem, size);
|
x86_trampoline_base, (unsigned long long)mem, size);
|
||||||
|
@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o
|
|||||||
obj-$(CONFIG_ACPI_NUMA) += srat.o
|
obj-$(CONFIG_ACPI_NUMA) += srat.o
|
||||||
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
|
obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
|
||||||
|
|
||||||
obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_MEMTEST) += memtest.o
|
obj-$(CONFIG_MEMTEST) += memtest.o
|
||||||
|
@ -67,7 +67,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|||||||
good_end = max_pfn_mapped << PAGE_SHIFT;
|
good_end = max_pfn_mapped << PAGE_SHIFT;
|
||||||
|
|
||||||
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
|
base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
|
||||||
if (base == MEMBLOCK_ERROR)
|
if (!base)
|
||||||
panic("Cannot find space for the kernel page tables");
|
panic("Cannot find space for the kernel page tables");
|
||||||
|
|
||||||
pgt_buf_start = base >> PAGE_SHIFT;
|
pgt_buf_start = base >> PAGE_SHIFT;
|
||||||
@ -80,7 +80,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
|||||||
|
|
||||||
void __init native_pagetable_reserve(u64 start, u64 end)
|
void __init native_pagetable_reserve(u64 start, u64 end)
|
||||||
{
|
{
|
||||||
memblock_x86_reserve_range(start, end, "PGTABLE");
|
memblock_reserve(start, end - start);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct map_range {
|
struct map_range {
|
||||||
@ -279,8 +279,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|||||||
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
|
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
|
||||||
* so that they can be reused for other purposes.
|
* so that they can be reused for other purposes.
|
||||||
*
|
*
|
||||||
* On native it just means calling memblock_x86_reserve_range, on Xen it
|
* On native it just means calling memblock_reserve, on Xen it also
|
||||||
* also means marking RW the pagetable pages that we allocated before
|
* means marking RW the pagetable pages that we allocated before
|
||||||
* but that haven't been used.
|
* but that haven't been used.
|
||||||
*
|
*
|
||||||
* In fact on xen we mark RO the whole range pgt_buf_start -
|
* In fact on xen we mark RO the whole range pgt_buf_start -
|
||||||
|
@ -427,23 +427,17 @@ static void __init add_one_highpage_init(struct page *page)
|
|||||||
void __init add_highpages_with_active_regions(int nid,
|
void __init add_highpages_with_active_regions(int nid,
|
||||||
unsigned long start_pfn, unsigned long end_pfn)
|
unsigned long start_pfn, unsigned long end_pfn)
|
||||||
{
|
{
|
||||||
struct range *range;
|
phys_addr_t start, end;
|
||||||
int nr_range;
|
u64 i;
|
||||||
int i;
|
|
||||||
|
|
||||||
nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
|
for_each_free_mem_range(i, nid, &start, &end, NULL) {
|
||||||
|
unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
|
||||||
for (i = 0; i < nr_range; i++) {
|
start_pfn, end_pfn);
|
||||||
struct page *page;
|
unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
|
||||||
int node_pfn;
|
start_pfn, end_pfn);
|
||||||
|
for ( ; pfn < e_pfn; pfn++)
|
||||||
for (node_pfn = range[i].start; node_pfn < range[i].end;
|
if (pfn_valid(pfn))
|
||||||
node_pfn++) {
|
add_one_highpage_init(pfn_to_page(pfn));
|
||||||
if (!pfn_valid(node_pfn))
|
|
||||||
continue;
|
|
||||||
page = pfn_to_page(node_pfn);
|
|
||||||
add_one_highpage_init(page);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -650,18 +644,18 @@ void __init initmem_init(void)
|
|||||||
highstart_pfn = highend_pfn = max_pfn;
|
highstart_pfn = highend_pfn = max_pfn;
|
||||||
if (max_pfn > max_low_pfn)
|
if (max_pfn > max_low_pfn)
|
||||||
highstart_pfn = max_low_pfn;
|
highstart_pfn = max_low_pfn;
|
||||||
memblock_x86_register_active_regions(0, 0, highend_pfn);
|
|
||||||
sparse_memory_present_with_active_regions(0);
|
|
||||||
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
|
||||||
pages_to_mb(highend_pfn - highstart_pfn));
|
pages_to_mb(highend_pfn - highstart_pfn));
|
||||||
num_physpages = highend_pfn;
|
num_physpages = highend_pfn;
|
||||||
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
|
high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
|
||||||
#else
|
#else
|
||||||
memblock_x86_register_active_regions(0, 0, max_low_pfn);
|
|
||||||
sparse_memory_present_with_active_regions(0);
|
|
||||||
num_physpages = max_low_pfn;
|
num_physpages = max_low_pfn;
|
||||||
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
|
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
|
||||||
|
sparse_memory_present_with_active_regions(0);
|
||||||
|
|
||||||
#ifdef CONFIG_FLATMEM
|
#ifdef CONFIG_FLATMEM
|
||||||
max_mapnr = num_physpages;
|
max_mapnr = num_physpages;
|
||||||
#endif
|
#endif
|
||||||
|
@ -608,7 +608,7 @@ kernel_physical_mapping_init(unsigned long start,
|
|||||||
#ifndef CONFIG_NUMA
|
#ifndef CONFIG_NUMA
|
||||||
void __init initmem_init(void)
|
void __init initmem_init(void)
|
||||||
{
|
{
|
||||||
memblock_x86_register_active_regions(0, 0, max_pfn);
|
memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1,348 +0,0 @@
|
|||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/init.h>
|
|
||||||
#include <linux/bitops.h>
|
|
||||||
#include <linux/memblock.h>
|
|
||||||
#include <linux/bootmem.h>
|
|
||||||
#include <linux/mm.h>
|
|
||||||
#include <linux/range.h>
|
|
||||||
|
|
||||||
/* Check for already reserved areas */
|
|
||||||
bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
|
|
||||||
{
|
|
||||||
struct memblock_region *r;
|
|
||||||
u64 addr = *addrp, last;
|
|
||||||
u64 size = *sizep;
|
|
||||||
bool changed = false;
|
|
||||||
|
|
||||||
again:
|
|
||||||
last = addr + size;
|
|
||||||
for_each_memblock(reserved, r) {
|
|
||||||
if (last > r->base && addr < r->base) {
|
|
||||||
size = r->base - addr;
|
|
||||||
changed = true;
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
if (last > (r->base + r->size) && addr < (r->base + r->size)) {
|
|
||||||
addr = round_up(r->base + r->size, align);
|
|
||||||
size = last - addr;
|
|
||||||
changed = true;
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
if (last <= (r->base + r->size) && addr >= r->base) {
|
|
||||||
*sizep = 0;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (changed) {
|
|
||||||
*addrp = addr;
|
|
||||||
*sizep = size;
|
|
||||||
}
|
|
||||||
return changed;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Find next free range after start, and size is returned in *sizep
|
|
||||||
*/
|
|
||||||
u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
|
|
||||||
{
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
for_each_memblock(memory, r) {
|
|
||||||
u64 ei_start = r->base;
|
|
||||||
u64 ei_last = ei_start + r->size;
|
|
||||||
u64 addr;
|
|
||||||
|
|
||||||
addr = round_up(ei_start, align);
|
|
||||||
if (addr < start)
|
|
||||||
addr = round_up(start, align);
|
|
||||||
if (addr >= ei_last)
|
|
||||||
continue;
|
|
||||||
*sizep = ei_last - addr;
|
|
||||||
while (memblock_x86_check_reserved_size(&addr, sizep, align))
|
|
||||||
;
|
|
||||||
|
|
||||||
if (*sizep)
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __init struct range *find_range_array(int count)
|
|
||||||
{
|
|
||||||
u64 end, size, mem;
|
|
||||||
struct range *range;
|
|
||||||
|
|
||||||
size = sizeof(struct range) * count;
|
|
||||||
end = memblock.current_limit;
|
|
||||||
|
|
||||||
mem = memblock_find_in_range(0, end, size, sizeof(struct range));
|
|
||||||
if (mem == MEMBLOCK_ERROR)
|
|
||||||
panic("can not find more space for range array");
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This range is tempoaray, so don't reserve it, it will not be
|
|
||||||
* overlapped because We will not alloccate new buffer before
|
|
||||||
* We discard this one
|
|
||||||
*/
|
|
||||||
range = __va(mem);
|
|
||||||
memset(range, 0, size);
|
|
||||||
|
|
||||||
return range;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init memblock_x86_subtract_reserved(struct range *range, int az)
|
|
||||||
{
|
|
||||||
u64 final_start, final_end;
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
/* Take out region array itself at first*/
|
|
||||||
memblock_free_reserved_regions();
|
|
||||||
|
|
||||||
memblock_dbg("Subtract (%ld early reservations)\n", memblock.reserved.cnt);
|
|
||||||
|
|
||||||
for_each_memblock(reserved, r) {
|
|
||||||
memblock_dbg(" [%010llx-%010llx]\n", (u64)r->base, (u64)r->base + r->size - 1);
|
|
||||||
final_start = PFN_DOWN(r->base);
|
|
||||||
final_end = PFN_UP(r->base + r->size);
|
|
||||||
if (final_start >= final_end)
|
|
||||||
continue;
|
|
||||||
subtract_range(range, az, final_start, final_end);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Put region array back ? */
|
|
||||||
memblock_reserve_reserved_regions();
|
|
||||||
}
|
|
||||||
|
|
||||||
struct count_data {
|
|
||||||
int nr;
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init count_work_fn(unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn, void *datax)
|
|
||||||
{
|
|
||||||
struct count_data *data = datax;
|
|
||||||
|
|
||||||
data->nr++;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init count_early_node_map(int nodeid)
|
|
||||||
{
|
|
||||||
struct count_data data;
|
|
||||||
|
|
||||||
data.nr = 0;
|
|
||||||
work_with_active_regions(nodeid, count_work_fn, &data);
|
|
||||||
|
|
||||||
return data.nr;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init __get_free_all_memory_range(struct range **rangep, int nodeid,
|
|
||||||
unsigned long start_pfn, unsigned long end_pfn)
|
|
||||||
{
|
|
||||||
int count;
|
|
||||||
struct range *range;
|
|
||||||
int nr_range;
|
|
||||||
|
|
||||||
count = (memblock.reserved.cnt + count_early_node_map(nodeid)) * 2;
|
|
||||||
|
|
||||||
range = find_range_array(count);
|
|
||||||
nr_range = 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Use early_node_map[] and memblock.reserved.region to get range array
|
|
||||||
* at first
|
|
||||||
*/
|
|
||||||
nr_range = add_from_early_node_map(range, count, nr_range, nodeid);
|
|
||||||
subtract_range(range, count, 0, start_pfn);
|
|
||||||
subtract_range(range, count, end_pfn, -1ULL);
|
|
||||||
|
|
||||||
memblock_x86_subtract_reserved(range, count);
|
|
||||||
nr_range = clean_sort_range(range, count);
|
|
||||||
|
|
||||||
*rangep = range;
|
|
||||||
return nr_range;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init get_free_all_memory_range(struct range **rangep, int nodeid)
|
|
||||||
{
|
|
||||||
unsigned long end_pfn = -1UL;
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
end_pfn = max_low_pfn;
|
|
||||||
#endif
|
|
||||||
return __get_free_all_memory_range(rangep, nodeid, 0, end_pfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 __init __memblock_x86_memory_in_range(u64 addr, u64 limit, bool get_free)
|
|
||||||
{
|
|
||||||
int i, count;
|
|
||||||
struct range *range;
|
|
||||||
int nr_range;
|
|
||||||
u64 final_start, final_end;
|
|
||||||
u64 free_size;
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
count = (memblock.reserved.cnt + memblock.memory.cnt) * 2;
|
|
||||||
|
|
||||||
range = find_range_array(count);
|
|
||||||
nr_range = 0;
|
|
||||||
|
|
||||||
addr = PFN_UP(addr);
|
|
||||||
limit = PFN_DOWN(limit);
|
|
||||||
|
|
||||||
for_each_memblock(memory, r) {
|
|
||||||
final_start = PFN_UP(r->base);
|
|
||||||
final_end = PFN_DOWN(r->base + r->size);
|
|
||||||
if (final_start >= final_end)
|
|
||||||
continue;
|
|
||||||
if (final_start >= limit || final_end <= addr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
nr_range = add_range(range, count, nr_range, final_start, final_end);
|
|
||||||
}
|
|
||||||
subtract_range(range, count, 0, addr);
|
|
||||||
subtract_range(range, count, limit, -1ULL);
|
|
||||||
|
|
||||||
/* Subtract memblock.reserved.region in range ? */
|
|
||||||
if (!get_free)
|
|
||||||
goto sort_and_count_them;
|
|
||||||
for_each_memblock(reserved, r) {
|
|
||||||
final_start = PFN_DOWN(r->base);
|
|
||||||
final_end = PFN_UP(r->base + r->size);
|
|
||||||
if (final_start >= final_end)
|
|
||||||
continue;
|
|
||||||
if (final_start >= limit || final_end <= addr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
subtract_range(range, count, final_start, final_end);
|
|
||||||
}
|
|
||||||
|
|
||||||
sort_and_count_them:
|
|
||||||
nr_range = clean_sort_range(range, count);
|
|
||||||
|
|
||||||
free_size = 0;
|
|
||||||
for (i = 0; i < nr_range; i++)
|
|
||||||
free_size += range[i].end - range[i].start;
|
|
||||||
|
|
||||||
return free_size << PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 __init memblock_x86_free_memory_in_range(u64 addr, u64 limit)
|
|
||||||
{
|
|
||||||
return __memblock_x86_memory_in_range(addr, limit, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
u64 __init memblock_x86_memory_in_range(u64 addr, u64 limit)
|
|
||||||
{
|
|
||||||
return __memblock_x86_memory_in_range(addr, limit, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init memblock_x86_reserve_range(u64 start, u64 end, char *name)
|
|
||||||
{
|
|
||||||
if (start == end)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end))
|
|
||||||
return;
|
|
||||||
|
|
||||||
memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name);
|
|
||||||
|
|
||||||
memblock_reserve(start, end - start);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init memblock_x86_free_range(u64 start, u64 end)
|
|
||||||
{
|
|
||||||
if (start == end)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end))
|
|
||||||
return;
|
|
||||||
|
|
||||||
memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1);
|
|
||||||
|
|
||||||
memblock_free(start, end - start);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Need to call this function after memblock_x86_register_active_regions,
|
|
||||||
* so early_node_map[] is filled already.
|
|
||||||
*/
|
|
||||||
u64 __init memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align)
|
|
||||||
{
|
|
||||||
u64 addr;
|
|
||||||
addr = find_memory_core_early(nid, size, align, start, end);
|
|
||||||
if (addr != MEMBLOCK_ERROR)
|
|
||||||
return addr;
|
|
||||||
|
|
||||||
/* Fallback, should already have start end within node range */
|
|
||||||
return memblock_find_in_range(start, end, size, align);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Finds an active region in the address range from start_pfn to last_pfn and
|
|
||||||
* returns its range in ei_startpfn and ei_endpfn for the memblock entry.
|
|
||||||
*/
|
|
||||||
static int __init memblock_x86_find_active_region(const struct memblock_region *ei,
|
|
||||||
unsigned long start_pfn,
|
|
||||||
unsigned long last_pfn,
|
|
||||||
unsigned long *ei_startpfn,
|
|
||||||
unsigned long *ei_endpfn)
|
|
||||||
{
|
|
||||||
u64 align = PAGE_SIZE;
|
|
||||||
|
|
||||||
*ei_startpfn = round_up(ei->base, align) >> PAGE_SHIFT;
|
|
||||||
*ei_endpfn = round_down(ei->base + ei->size, align) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
/* Skip map entries smaller than a page */
|
|
||||||
if (*ei_startpfn >= *ei_endpfn)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Skip if map is outside the node */
|
|
||||||
if (*ei_endpfn <= start_pfn || *ei_startpfn >= last_pfn)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/* Check for overlaps */
|
|
||||||
if (*ei_startpfn < start_pfn)
|
|
||||||
*ei_startpfn = start_pfn;
|
|
||||||
if (*ei_endpfn > last_pfn)
|
|
||||||
*ei_endpfn = last_pfn;
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Walk the memblock.memory map and register active regions within a node */
|
|
||||||
void __init memblock_x86_register_active_regions(int nid, unsigned long start_pfn,
|
|
||||||
unsigned long last_pfn)
|
|
||||||
{
|
|
||||||
unsigned long ei_startpfn;
|
|
||||||
unsigned long ei_endpfn;
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
for_each_memblock(memory, r)
|
|
||||||
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
|
|
||||||
&ei_startpfn, &ei_endpfn))
|
|
||||||
add_active_range(nid, ei_startpfn, ei_endpfn);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Find the hole size (in bytes) in the memory range.
|
|
||||||
* @start: starting address of the memory range to scan
|
|
||||||
* @end: ending address of the memory range to scan
|
|
||||||
*/
|
|
||||||
u64 __init memblock_x86_hole_size(u64 start, u64 end)
|
|
||||||
{
|
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
||||||
unsigned long last_pfn = end >> PAGE_SHIFT;
|
|
||||||
unsigned long ei_startpfn, ei_endpfn, ram = 0;
|
|
||||||
struct memblock_region *r;
|
|
||||||
|
|
||||||
for_each_memblock(memory, r)
|
|
||||||
if (memblock_x86_find_active_region(r, start_pfn, last_pfn,
|
|
||||||
&ei_startpfn, &ei_endpfn))
|
|
||||||
ram += ei_endpfn - ei_startpfn;
|
|
||||||
|
|
||||||
return end - start - ((u64)ram << PAGE_SHIFT);
|
|
||||||
}
|
|
@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
|
|||||||
(unsigned long long) pattern,
|
(unsigned long long) pattern,
|
||||||
(unsigned long long) start_bad,
|
(unsigned long long) start_bad,
|
||||||
(unsigned long long) end_bad);
|
(unsigned long long) end_bad);
|
||||||
memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM");
|
memblock_reserve(start_bad, end_bad - start_bad);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init memtest(u64 pattern, u64 start_phys, u64 size)
|
static void __init memtest(u64 pattern, u64 start_phys, u64 size)
|
||||||
@ -70,24 +70,19 @@ static void __init memtest(u64 pattern, u64 start_phys, u64 size)
|
|||||||
|
|
||||||
static void __init do_one_pass(u64 pattern, u64 start, u64 end)
|
static void __init do_one_pass(u64 pattern, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
u64 size = 0;
|
u64 i;
|
||||||
|
phys_addr_t this_start, this_end;
|
||||||
|
|
||||||
while (start < end) {
|
for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
|
||||||
start = memblock_x86_find_in_range_size(start, &size, 1);
|
this_start = clamp_t(phys_addr_t, this_start, start, end);
|
||||||
|
this_end = clamp_t(phys_addr_t, this_end, start, end);
|
||||||
/* done ? */
|
if (this_start < this_end) {
|
||||||
if (start >= end)
|
printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
|
||||||
break;
|
(unsigned long long)this_start,
|
||||||
if (start + size > end)
|
(unsigned long long)this_end,
|
||||||
size = end - start;
|
(unsigned long long)cpu_to_be64(pattern));
|
||||||
|
memtest(pattern, this_start, this_end - this_start);
|
||||||
printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
|
}
|
||||||
(unsigned long long) start,
|
|
||||||
(unsigned long long) start + size,
|
|
||||||
(unsigned long long) cpu_to_be64(pattern));
|
|
||||||
memtest(pattern, start, size);
|
|
||||||
|
|
||||||
start += size;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,8 +192,6 @@ int __init numa_add_memblk(int nid, u64 start, u64 end)
|
|||||||
/* Initialize NODE_DATA for a node on the local memory */
|
/* Initialize NODE_DATA for a node on the local memory */
|
||||||
static void __init setup_node_data(int nid, u64 start, u64 end)
|
static void __init setup_node_data(int nid, u64 start, u64 end)
|
||||||
{
|
{
|
||||||
const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
|
|
||||||
const u64 nd_high = PFN_PHYS(max_pfn_mapped);
|
|
||||||
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
|
const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
|
||||||
bool remapped = false;
|
bool remapped = false;
|
||||||
u64 nd_pa;
|
u64 nd_pa;
|
||||||
@ -224,17 +222,12 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
|
|||||||
nd_pa = __pa(nd);
|
nd_pa = __pa(nd);
|
||||||
remapped = true;
|
remapped = true;
|
||||||
} else {
|
} else {
|
||||||
nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
|
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
|
||||||
nd_size, SMP_CACHE_BYTES);
|
if (!nd_pa) {
|
||||||
if (nd_pa == MEMBLOCK_ERROR)
|
|
||||||
nd_pa = memblock_find_in_range(nd_low, nd_high,
|
|
||||||
nd_size, SMP_CACHE_BYTES);
|
|
||||||
if (nd_pa == MEMBLOCK_ERROR) {
|
|
||||||
pr_err("Cannot find %zu bytes in node %d\n",
|
pr_err("Cannot find %zu bytes in node %d\n",
|
||||||
nd_size, nid);
|
nd_size, nid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
|
|
||||||
nd = __va(nd_pa);
|
nd = __va(nd_pa);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -371,8 +364,7 @@ void __init numa_reset_distance(void)
|
|||||||
|
|
||||||
/* numa_distance could be 1LU marking allocation failure, test cnt */
|
/* numa_distance could be 1LU marking allocation failure, test cnt */
|
||||||
if (numa_distance_cnt)
|
if (numa_distance_cnt)
|
||||||
memblock_x86_free_range(__pa(numa_distance),
|
memblock_free(__pa(numa_distance), size);
|
||||||
__pa(numa_distance) + size);
|
|
||||||
numa_distance_cnt = 0;
|
numa_distance_cnt = 0;
|
||||||
numa_distance = NULL; /* enable table creation */
|
numa_distance = NULL; /* enable table creation */
|
||||||
}
|
}
|
||||||
@ -395,13 +387,13 @@ static int __init numa_alloc_distance(void)
|
|||||||
|
|
||||||
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||||
size, PAGE_SIZE);
|
size, PAGE_SIZE);
|
||||||
if (phys == MEMBLOCK_ERROR) {
|
if (!phys) {
|
||||||
pr_warning("NUMA: Warning: can't allocate distance table!\n");
|
pr_warning("NUMA: Warning: can't allocate distance table!\n");
|
||||||
/* don't retry until explicitly reset */
|
/* don't retry until explicitly reset */
|
||||||
numa_distance = (void *)1LU;
|
numa_distance = (void *)1LU;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
|
memblock_reserve(phys, size);
|
||||||
|
|
||||||
numa_distance = __va(phys);
|
numa_distance = __va(phys);
|
||||||
numa_distance_cnt = cnt;
|
numa_distance_cnt = cnt;
|
||||||
@ -482,8 +474,8 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
|
|||||||
numaram = 0;
|
numaram = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
e820ram = max_pfn - (memblock_x86_hole_size(0,
|
e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
|
||||||
PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
|
|
||||||
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
|
/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
|
||||||
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
|
if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
|
||||||
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
|
printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
|
||||||
@ -505,13 +497,10 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
|
|||||||
if (WARN_ON(nodes_empty(node_possible_map)))
|
if (WARN_ON(nodes_empty(node_possible_map)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for (i = 0; i < mi->nr_blks; i++)
|
for (i = 0; i < mi->nr_blks; i++) {
|
||||||
memblock_x86_register_active_regions(mi->blk[i].nid,
|
struct numa_memblk *mb = &mi->blk[i];
|
||||||
mi->blk[i].start >> PAGE_SHIFT,
|
memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
|
||||||
mi->blk[i].end >> PAGE_SHIFT);
|
}
|
||||||
|
|
||||||
/* for out of order entries */
|
|
||||||
sort_node_map();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If sections array is gonna be used for pfn -> nid mapping, check
|
* If sections array is gonna be used for pfn -> nid mapping, check
|
||||||
@ -545,6 +534,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
|
|||||||
setup_node_data(nid, start, end);
|
setup_node_data(nid, start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Dump memblock with node info and return. */
|
||||||
|
memblock_dump_all();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -582,7 +573,7 @@ static int __init numa_init(int (*init_func)(void))
|
|||||||
nodes_clear(node_possible_map);
|
nodes_clear(node_possible_map);
|
||||||
nodes_clear(node_online_map);
|
nodes_clear(node_online_map);
|
||||||
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
|
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
|
||||||
remove_all_active_ranges();
|
WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
|
||||||
numa_reset_distance();
|
numa_reset_distance();
|
||||||
|
|
||||||
ret = init_func();
|
ret = init_func();
|
||||||
|
@ -199,23 +199,23 @@ void __init init_alloc_remap(int nid, u64 start, u64 end)
|
|||||||
|
|
||||||
/* allocate node memory and the lowmem remap area */
|
/* allocate node memory and the lowmem remap area */
|
||||||
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
|
node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES);
|
||||||
if (node_pa == MEMBLOCK_ERROR) {
|
if (!node_pa) {
|
||||||
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
|
pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n",
|
||||||
size, nid);
|
size, nid);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM");
|
memblock_reserve(node_pa, size);
|
||||||
|
|
||||||
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
|
remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT,
|
||||||
max_low_pfn << PAGE_SHIFT,
|
max_low_pfn << PAGE_SHIFT,
|
||||||
size, LARGE_PAGE_BYTES);
|
size, LARGE_PAGE_BYTES);
|
||||||
if (remap_pa == MEMBLOCK_ERROR) {
|
if (!remap_pa) {
|
||||||
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
|
pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n",
|
||||||
size, nid);
|
size, nid);
|
||||||
memblock_x86_free_range(node_pa, node_pa + size);
|
memblock_free(node_pa, size);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG");
|
memblock_reserve(remap_pa, size);
|
||||||
remap_va = phys_to_virt(remap_pa);
|
remap_va = phys_to_virt(remap_pa);
|
||||||
|
|
||||||
/* perform actual remap */
|
/* perform actual remap */
|
||||||
|
@ -19,7 +19,7 @@ unsigned long __init numa_free_all_bootmem(void)
|
|||||||
for_each_online_node(i)
|
for_each_online_node(i)
|
||||||
pages += free_all_bootmem_node(NODE_DATA(i));
|
pages += free_all_bootmem_node(NODE_DATA(i));
|
||||||
|
|
||||||
pages += free_all_memory_core_early(MAX_NUMNODES);
|
pages += free_low_memory_core_early(MAX_NUMNODES);
|
||||||
|
|
||||||
return pages;
|
return pages;
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,16 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
|
|||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u64 mem_hole_size(u64 start, u64 end)
|
||||||
|
{
|
||||||
|
unsigned long start_pfn = PFN_UP(start);
|
||||||
|
unsigned long end_pfn = PFN_DOWN(end);
|
||||||
|
|
||||||
|
if (start_pfn < end_pfn)
|
||||||
|
return PFN_PHYS(absent_pages_in_range(start_pfn, end_pfn));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sets up nid to range from @start to @end. The return value is -errno if
|
* Sets up nid to range from @start to @end. The return value is -errno if
|
||||||
* something went wrong, 0 otherwise.
|
* something went wrong, 0 otherwise.
|
||||||
@ -89,7 +99,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* Calculate target node size. x86_32 freaks on __udivdi3() so do
|
* Calculate target node size. x86_32 freaks on __udivdi3() so do
|
||||||
* the division in ulong number of pages and convert back.
|
* the division in ulong number of pages and convert back.
|
||||||
*/
|
*/
|
||||||
size = max_addr - addr - memblock_x86_hole_size(addr, max_addr);
|
size = max_addr - addr - mem_hole_size(addr, max_addr);
|
||||||
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
|
size = PFN_PHYS((unsigned long)(size >> PAGE_SHIFT) / nr_nodes);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -135,8 +145,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* Continue to add memory to this fake node if its
|
* Continue to add memory to this fake node if its
|
||||||
* non-reserved memory is less than the per-node size.
|
* non-reserved memory is less than the per-node size.
|
||||||
*/
|
*/
|
||||||
while (end - start -
|
while (end - start - mem_hole_size(start, end) < size) {
|
||||||
memblock_x86_hole_size(start, end) < size) {
|
|
||||||
end += FAKE_NODE_MIN_SIZE;
|
end += FAKE_NODE_MIN_SIZE;
|
||||||
if (end > limit) {
|
if (end > limit) {
|
||||||
end = limit;
|
end = limit;
|
||||||
@ -150,7 +159,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* this one must extend to the boundary.
|
* this one must extend to the boundary.
|
||||||
*/
|
*/
|
||||||
if (end < dma32_end && dma32_end - end -
|
if (end < dma32_end && dma32_end - end -
|
||||||
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
||||||
end = dma32_end;
|
end = dma32_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -158,8 +167,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
|
|||||||
* next node, this one must extend to the end of the
|
* next node, this one must extend to the end of the
|
||||||
* physical node.
|
* physical node.
|
||||||
*/
|
*/
|
||||||
if (limit - end -
|
if (limit - end - mem_hole_size(end, limit) < size)
|
||||||
memblock_x86_hole_size(end, limit) < size)
|
|
||||||
end = limit;
|
end = limit;
|
||||||
|
|
||||||
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
|
ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
|
||||||
@ -180,7 +188,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
|
|||||||
{
|
{
|
||||||
u64 end = start + size;
|
u64 end = start + size;
|
||||||
|
|
||||||
while (end - start - memblock_x86_hole_size(start, end) < size) {
|
while (end - start - mem_hole_size(start, end) < size) {
|
||||||
end += FAKE_NODE_MIN_SIZE;
|
end += FAKE_NODE_MIN_SIZE;
|
||||||
if (end > max_addr) {
|
if (end > max_addr) {
|
||||||
end = max_addr;
|
end = max_addr;
|
||||||
@ -211,8 +219,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* creates a uniform distribution of node sizes across the entire
|
* creates a uniform distribution of node sizes across the entire
|
||||||
* machine (but not necessarily over physical nodes).
|
* machine (but not necessarily over physical nodes).
|
||||||
*/
|
*/
|
||||||
min_size = (max_addr - addr - memblock_x86_hole_size(addr, max_addr)) /
|
min_size = (max_addr - addr - mem_hole_size(addr, max_addr)) / MAX_NUMNODES;
|
||||||
MAX_NUMNODES;
|
|
||||||
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
|
min_size = max(min_size, FAKE_NODE_MIN_SIZE);
|
||||||
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
|
if ((min_size & FAKE_NODE_MIN_HASH_MASK) < min_size)
|
||||||
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
|
min_size = (min_size + FAKE_NODE_MIN_SIZE) &
|
||||||
@ -252,7 +259,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* this one must extend to the boundary.
|
* this one must extend to the boundary.
|
||||||
*/
|
*/
|
||||||
if (end < dma32_end && dma32_end - end -
|
if (end < dma32_end && dma32_end - end -
|
||||||
memblock_x86_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
mem_hole_size(end, dma32_end) < FAKE_NODE_MIN_SIZE)
|
||||||
end = dma32_end;
|
end = dma32_end;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -260,8 +267,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
|
|||||||
* next node, this one must extend to the end of the
|
* next node, this one must extend to the end of the
|
||||||
* physical node.
|
* physical node.
|
||||||
*/
|
*/
|
||||||
if (limit - end -
|
if (limit - end - mem_hole_size(end, limit) < size)
|
||||||
memblock_x86_hole_size(end, limit) < size)
|
|
||||||
end = limit;
|
end = limit;
|
||||||
|
|
||||||
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
|
ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
|
||||||
@ -351,11 +357,11 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
|||||||
|
|
||||||
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
|
||||||
phys_size, PAGE_SIZE);
|
phys_size, PAGE_SIZE);
|
||||||
if (phys == MEMBLOCK_ERROR) {
|
if (!phys) {
|
||||||
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
|
pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
|
||||||
goto no_emu;
|
goto no_emu;
|
||||||
}
|
}
|
||||||
memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST");
|
memblock_reserve(phys, phys_size);
|
||||||
phys_dist = __va(phys);
|
phys_dist = __va(phys);
|
||||||
|
|
||||||
for (i = 0; i < numa_dist_cnt; i++)
|
for (i = 0; i < numa_dist_cnt; i++)
|
||||||
@ -424,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
|
|||||||
|
|
||||||
/* free the copied physical distance table */
|
/* free the copied physical distance table */
|
||||||
if (phys_dist)
|
if (phys_dist)
|
||||||
memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size);
|
memblock_free(__pa(phys_dist), phys_size);
|
||||||
return;
|
return;
|
||||||
|
|
||||||
no_emu:
|
no_emu:
|
||||||
|
@ -352,8 +352,7 @@ void __init efi_memblock_x86_reserve_range(void)
|
|||||||
boot_params.efi_info.efi_memdesc_size;
|
boot_params.efi_info.efi_memdesc_size;
|
||||||
memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
|
memmap.desc_version = boot_params.efi_info.efi_memdesc_version;
|
||||||
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
|
memmap.desc_size = boot_params.efi_info.efi_memdesc_size;
|
||||||
memblock_x86_reserve_range(pmap, pmap + memmap.nr_map * memmap.desc_size,
|
memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
|
||||||
"EFI memmap");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if EFI_DEBUG
|
#if EFI_DEBUG
|
||||||
@ -397,16 +396,14 @@ void __init efi_reserve_boot_services(void)
|
|||||||
if ((start+size >= virt_to_phys(_text)
|
if ((start+size >= virt_to_phys(_text)
|
||||||
&& start <= virt_to_phys(_end)) ||
|
&& start <= virt_to_phys(_end)) ||
|
||||||
!e820_all_mapped(start, start+size, E820_RAM) ||
|
!e820_all_mapped(start, start+size, E820_RAM) ||
|
||||||
memblock_x86_check_reserved_size(&start, &size,
|
memblock_is_region_reserved(start, size)) {
|
||||||
1<<EFI_PAGE_SHIFT)) {
|
|
||||||
/* Could not reserve, skip it */
|
/* Could not reserve, skip it */
|
||||||
md->num_pages = 0;
|
md->num_pages = 0;
|
||||||
memblock_dbg(PFX "Could not reserve boot range "
|
memblock_dbg(PFX "Could not reserve boot range "
|
||||||
"[0x%010llx-0x%010llx]\n",
|
"[0x%010llx-0x%010llx]\n",
|
||||||
start, start+size-1);
|
start, start+size-1);
|
||||||
} else
|
} else
|
||||||
memblock_x86_reserve_range(start, start+size,
|
memblock_reserve(start, size);
|
||||||
"EFI Boot");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1215,8 +1215,6 @@ asmlinkage void __init xen_start_kernel(void)
|
|||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
early_boot_irqs_disabled = true;
|
early_boot_irqs_disabled = true;
|
||||||
|
|
||||||
memblock_init();
|
|
||||||
|
|
||||||
xen_raw_console_write("mapping kernel into physical memory\n");
|
xen_raw_console_write("mapping kernel into physical memory\n");
|
||||||
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
|
||||||
xen_ident_map_ISA();
|
xen_ident_map_ISA();
|
||||||
|
@ -1774,10 +1774,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
|||||||
__xen_write_cr3(true, __pa(pgd));
|
__xen_write_cr3(true, __pa(pgd));
|
||||||
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
xen_mc_issue(PARAVIRT_LAZY_CPU);
|
||||||
|
|
||||||
memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
|
memblock_reserve(__pa(xen_start_info->pt_base),
|
||||||
__pa(xen_start_info->pt_base +
|
xen_start_info->nr_pt_frames * PAGE_SIZE);
|
||||||
xen_start_info->nr_pt_frames * PAGE_SIZE),
|
|
||||||
"XEN PAGETABLES");
|
|
||||||
|
|
||||||
return pgd;
|
return pgd;
|
||||||
}
|
}
|
||||||
@ -1853,10 +1851,8 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
|
|||||||
PFN_DOWN(__pa(initial_page_table)));
|
PFN_DOWN(__pa(initial_page_table)));
|
||||||
xen_write_cr3(__pa(initial_page_table));
|
xen_write_cr3(__pa(initial_page_table));
|
||||||
|
|
||||||
memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
|
memblock_reserve(__pa(xen_start_info->pt_base),
|
||||||
__pa(xen_start_info->pt_base +
|
xen_start_info->nr_pt_frames * PAGE_SIZE));
|
||||||
xen_start_info->nr_pt_frames * PAGE_SIZE),
|
|
||||||
"XEN PAGETABLES");
|
|
||||||
|
|
||||||
return initial_page_table;
|
return initial_page_table;
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
|
|||||||
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
|
if (i == XEN_EXTRA_MEM_MAX_REGIONS)
|
||||||
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
|
printk(KERN_WARNING "Warning: not enough extra memory regions\n");
|
||||||
|
|
||||||
memblock_x86_reserve_range(start, start + size, "XEN EXTRA");
|
memblock_reserve(start, size);
|
||||||
|
|
||||||
xen_max_p2m_pfn = PFN_DOWN(start + size);
|
xen_max_p2m_pfn = PFN_DOWN(start + size);
|
||||||
|
|
||||||
@ -311,9 +311,8 @@ char * __init xen_memory_setup(void)
|
|||||||
* - xen_start_info
|
* - xen_start_info
|
||||||
* See comment above "struct start_info" in <xen/interface/xen.h>
|
* See comment above "struct start_info" in <xen/interface/xen.h>
|
||||||
*/
|
*/
|
||||||
memblock_x86_reserve_range(__pa(xen_start_info->mfn_list),
|
memblock_reserve(__pa(xen_start_info->mfn_list),
|
||||||
__pa(xen_start_info->pt_base),
|
xen_start_info->pt_base - xen_start_info->mfn_list);
|
||||||
"XEN START INFO");
|
|
||||||
|
|
||||||
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
|
||||||
|
|
||||||
|
@ -41,6 +41,7 @@
|
|||||||
#include <linux/tboot.h>
|
#include <linux/tboot.h>
|
||||||
#include <linux/dmi.h>
|
#include <linux/dmi.h>
|
||||||
#include <linux/pci-ats.h>
|
#include <linux/pci-ats.h>
|
||||||
|
#include <linux/memblock.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
|
|
||||||
@ -2188,18 +2189,6 @@ static inline void iommu_prepare_isa(void)
|
|||||||
|
|
||||||
static int md_domain_init(struct dmar_domain *domain, int guest_width);
|
static int md_domain_init(struct dmar_domain *domain, int guest_width);
|
||||||
|
|
||||||
static int __init si_domain_work_fn(unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn, void *datax)
|
|
||||||
{
|
|
||||||
int *ret = datax;
|
|
||||||
|
|
||||||
*ret = iommu_domain_identity_map(si_domain,
|
|
||||||
(uint64_t)start_pfn << PAGE_SHIFT,
|
|
||||||
(uint64_t)end_pfn << PAGE_SHIFT);
|
|
||||||
return *ret;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init si_domain_init(int hw)
|
static int __init si_domain_init(int hw)
|
||||||
{
|
{
|
||||||
struct dmar_drhd_unit *drhd;
|
struct dmar_drhd_unit *drhd;
|
||||||
@ -2231,9 +2220,15 @@ static int __init si_domain_init(int hw)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for_each_online_node(nid) {
|
for_each_online_node(nid) {
|
||||||
work_with_active_regions(nid, si_domain_work_fn, &ret);
|
unsigned long start_pfn, end_pfn;
|
||||||
if (ret)
|
int i;
|
||||||
return ret;
|
|
||||||
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
|
||||||
|
ret = iommu_domain_identity_map(si_domain,
|
||||||
|
PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -44,7 +44,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
|
|||||||
unsigned long endpfn);
|
unsigned long endpfn);
|
||||||
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
|
extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
|
||||||
|
|
||||||
unsigned long free_all_memory_core_early(int nodeid);
|
extern unsigned long free_low_memory_core_early(int nodeid);
|
||||||
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
|
extern unsigned long free_all_bootmem_node(pg_data_t *pgdat);
|
||||||
extern unsigned long free_all_bootmem(void);
|
extern unsigned long free_all_bootmem(void);
|
||||||
|
|
||||||
|
@ -2,8 +2,6 @@
|
|||||||
#define _LINUX_MEMBLOCK_H
|
#define _LINUX_MEMBLOCK_H
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
#define MEMBLOCK_ERROR 0
|
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_MEMBLOCK
|
#ifdef CONFIG_HAVE_MEMBLOCK
|
||||||
/*
|
/*
|
||||||
* Logical memory blocks.
|
* Logical memory blocks.
|
||||||
@ -19,81 +17,161 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
|
||||||
#include <asm/memblock.h>
|
|
||||||
|
|
||||||
#define INIT_MEMBLOCK_REGIONS 128
|
#define INIT_MEMBLOCK_REGIONS 128
|
||||||
|
|
||||||
struct memblock_region {
|
struct memblock_region {
|
||||||
phys_addr_t base;
|
phys_addr_t base;
|
||||||
phys_addr_t size;
|
phys_addr_t size;
|
||||||
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
int nid;
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct memblock_type {
|
struct memblock_type {
|
||||||
unsigned long cnt; /* number of regions */
|
unsigned long cnt; /* number of regions */
|
||||||
unsigned long max; /* size of the allocated array */
|
unsigned long max; /* size of the allocated array */
|
||||||
|
phys_addr_t total_size; /* size of all regions */
|
||||||
struct memblock_region *regions;
|
struct memblock_region *regions;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct memblock {
|
struct memblock {
|
||||||
phys_addr_t current_limit;
|
phys_addr_t current_limit;
|
||||||
phys_addr_t memory_size; /* Updated by memblock_analyze() */
|
|
||||||
struct memblock_type memory;
|
struct memblock_type memory;
|
||||||
struct memblock_type reserved;
|
struct memblock_type reserved;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct memblock memblock;
|
extern struct memblock memblock;
|
||||||
extern int memblock_debug;
|
extern int memblock_debug;
|
||||||
extern int memblock_can_resize;
|
|
||||||
|
|
||||||
#define memblock_dbg(fmt, ...) \
|
#define memblock_dbg(fmt, ...) \
|
||||||
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
|
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
|
||||||
|
|
||||||
u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align);
|
phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
|
||||||
|
phys_addr_t size, phys_addr_t align, int nid);
|
||||||
|
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
||||||
|
phys_addr_t size, phys_addr_t align);
|
||||||
int memblock_free_reserved_regions(void);
|
int memblock_free_reserved_regions(void);
|
||||||
int memblock_reserve_reserved_regions(void);
|
int memblock_reserve_reserved_regions(void);
|
||||||
|
|
||||||
extern void memblock_init(void);
|
void memblock_allow_resize(void);
|
||||||
extern void memblock_analyze(void);
|
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||||
extern long memblock_add(phys_addr_t base, phys_addr_t size);
|
int memblock_add(phys_addr_t base, phys_addr_t size);
|
||||||
extern long memblock_remove(phys_addr_t base, phys_addr_t size);
|
int memblock_remove(phys_addr_t base, phys_addr_t size);
|
||||||
extern long memblock_free(phys_addr_t base, phys_addr_t size);
|
int memblock_free(phys_addr_t base, phys_addr_t size);
|
||||||
extern long memblock_reserve(phys_addr_t base, phys_addr_t size);
|
int memblock_reserve(phys_addr_t base, phys_addr_t size);
|
||||||
|
|
||||||
/* The numa aware allocator is only available if
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
* CONFIG_ARCH_POPULATES_NODE_MAP is set
|
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
|
||||||
|
unsigned long *out_end_pfn, int *out_nid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for_each_mem_pfn_range - early memory pfn range iterator
|
||||||
|
* @i: an integer used as loop variable
|
||||||
|
* @nid: node selector, %MAX_NUMNODES for all nodes
|
||||||
|
* @p_start: ptr to ulong for start pfn of the range, can be %NULL
|
||||||
|
* @p_end: ptr to ulong for end pfn of the range, can be %NULL
|
||||||
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
||||||
|
*
|
||||||
|
* Walks over configured memory ranges. Available after early_node_map is
|
||||||
|
* populated.
|
||||||
*/
|
*/
|
||||||
extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align,
|
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
|
||||||
int nid);
|
for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
|
||||||
extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
|
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
|
||||||
int nid);
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
void __next_free_mem_range(u64 *idx, int nid, phys_addr_t *out_start,
|
||||||
|
phys_addr_t *out_end, int *out_nid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for_each_free_mem_range - iterate through free memblock areas
|
||||||
|
* @i: u64 used as loop variable
|
||||||
|
* @nid: node selector, %MAX_NUMNODES for all nodes
|
||||||
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
||||||
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
||||||
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
||||||
|
*
|
||||||
|
* Walks over free (memory && !reserved) areas of memblock. Available as
|
||||||
|
* soon as memblock is initialized.
|
||||||
|
*/
|
||||||
|
#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \
|
||||||
|
for (i = 0, \
|
||||||
|
__next_free_mem_range(&i, nid, p_start, p_end, p_nid); \
|
||||||
|
i != (u64)ULLONG_MAX; \
|
||||||
|
__next_free_mem_range(&i, nid, p_start, p_end, p_nid))
|
||||||
|
|
||||||
|
void __next_free_mem_range_rev(u64 *idx, int nid, phys_addr_t *out_start,
|
||||||
|
phys_addr_t *out_end, int *out_nid);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* for_each_free_mem_range_reverse - rev-iterate through free memblock areas
|
||||||
|
* @i: u64 used as loop variable
|
||||||
|
* @nid: node selector, %MAX_NUMNODES for all nodes
|
||||||
|
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
|
||||||
|
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
|
||||||
|
* @p_nid: ptr to int for nid of the range, can be %NULL
|
||||||
|
*
|
||||||
|
* Walks over free (memory && !reserved) areas of memblock in reverse
|
||||||
|
* order. Available as soon as memblock is initialized.
|
||||||
|
*/
|
||||||
|
#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \
|
||||||
|
for (i = (u64)ULLONG_MAX, \
|
||||||
|
__next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid); \
|
||||||
|
i != (u64)ULLONG_MAX; \
|
||||||
|
__next_free_mem_range_rev(&i, nid, p_start, p_end, p_nid))
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
int memblock_set_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||||
|
|
||||||
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
||||||
|
{
|
||||||
|
r->nid = nid;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
||||||
|
{
|
||||||
|
return r->nid;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int memblock_get_region_node(const struct memblock_region *r)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
|
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||||
|
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
|
||||||
|
|
||||||
|
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
|
||||||
|
|
||||||
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
|
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
|
||||||
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
|
||||||
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
#define MEMBLOCK_ALLOC_ACCESSIBLE 0
|
||||||
|
|
||||||
extern phys_addr_t memblock_alloc_base(phys_addr_t size,
|
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
||||||
phys_addr_t align,
|
phys_addr_t max_addr);
|
||||||
phys_addr_t max_addr);
|
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
|
||||||
extern phys_addr_t __memblock_alloc_base(phys_addr_t size,
|
phys_addr_t max_addr);
|
||||||
phys_addr_t align,
|
phys_addr_t memblock_phys_mem_size(void);
|
||||||
phys_addr_t max_addr);
|
phys_addr_t memblock_start_of_DRAM(void);
|
||||||
extern phys_addr_t memblock_phys_mem_size(void);
|
phys_addr_t memblock_end_of_DRAM(void);
|
||||||
extern phys_addr_t memblock_start_of_DRAM(void);
|
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
|
||||||
extern phys_addr_t memblock_end_of_DRAM(void);
|
int memblock_is_memory(phys_addr_t addr);
|
||||||
extern void memblock_enforce_memory_limit(phys_addr_t memory_limit);
|
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
||||||
extern int memblock_is_memory(phys_addr_t addr);
|
int memblock_is_reserved(phys_addr_t addr);
|
||||||
extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
||||||
extern int memblock_is_reserved(phys_addr_t addr);
|
|
||||||
extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
|
||||||
|
|
||||||
extern void memblock_dump_all(void);
|
extern void __memblock_dump_all(void);
|
||||||
|
|
||||||
/* Provided by the architecture */
|
static inline void memblock_dump_all(void)
|
||||||
extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid);
|
{
|
||||||
extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
|
if (memblock_debug)
|
||||||
phys_addr_t addr2, phys_addr_t size2);
|
__memblock_dump_all();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* memblock_set_current_limit - Set the current allocation limit to allow
|
* memblock_set_current_limit - Set the current allocation limit to allow
|
||||||
@ -101,7 +179,7 @@ extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
|
|||||||
* accessible during boot
|
* accessible during boot
|
||||||
* @limit: New limit value (physical address)
|
* @limit: New limit value (physical address)
|
||||||
*/
|
*/
|
||||||
extern void memblock_set_current_limit(phys_addr_t limit);
|
void memblock_set_current_limit(phys_addr_t limit);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -154,9 +232,9 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
|
|||||||
region++)
|
region++)
|
||||||
|
|
||||||
|
|
||||||
#ifdef ARCH_DISCARD_MEMBLOCK
|
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
||||||
#define __init_memblock __init
|
#define __init_memblock __meminit
|
||||||
#define __initdata_memblock __initdata
|
#define __initdata_memblock __meminitdata
|
||||||
#else
|
#else
|
||||||
#define __init_memblock
|
#define __init_memblock
|
||||||
#define __initdata_memblock
|
#define __initdata_memblock
|
||||||
@ -165,7 +243,7 @@ static inline unsigned long memblock_region_reserved_end_pfn(const struct memblo
|
|||||||
#else
|
#else
|
||||||
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
|
||||||
{
|
{
|
||||||
return MEMBLOCK_ERROR;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HAVE_MEMBLOCK */
|
#endif /* CONFIG_HAVE_MEMBLOCK */
|
||||||
|
@ -1253,41 +1253,34 @@ static inline void pgtable_page_dtor(struct page *page)
|
|||||||
extern void free_area_init(unsigned long * zones_size);
|
extern void free_area_init(unsigned long * zones_size);
|
||||||
extern void free_area_init_node(int nid, unsigned long * zones_size,
|
extern void free_area_init_node(int nid, unsigned long * zones_size,
|
||||||
unsigned long zone_start_pfn, unsigned long *zholes_size);
|
unsigned long zone_start_pfn, unsigned long *zholes_size);
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
/*
|
/*
|
||||||
* With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
|
* With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
|
||||||
* zones, allocate the backing mem_map and account for memory holes in a more
|
* zones, allocate the backing mem_map and account for memory holes in a more
|
||||||
* architecture independent manner. This is a substitute for creating the
|
* architecture independent manner. This is a substitute for creating the
|
||||||
* zone_sizes[] and zholes_size[] arrays and passing them to
|
* zone_sizes[] and zholes_size[] arrays and passing them to
|
||||||
* free_area_init_node()
|
* free_area_init_node()
|
||||||
*
|
*
|
||||||
* An architecture is expected to register range of page frames backed by
|
* An architecture is expected to register range of page frames backed by
|
||||||
* physical memory with add_active_range() before calling
|
* physical memory with memblock_add[_node]() before calling
|
||||||
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
|
* free_area_init_nodes() passing in the PFN each zone ends at. At a basic
|
||||||
* usage, an architecture is expected to do something like
|
* usage, an architecture is expected to do something like
|
||||||
*
|
*
|
||||||
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
|
* unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
|
||||||
* max_highmem_pfn};
|
* max_highmem_pfn};
|
||||||
* for_each_valid_physical_page_range()
|
* for_each_valid_physical_page_range()
|
||||||
* add_active_range(node_id, start_pfn, end_pfn)
|
* memblock_add_node(base, size, nid)
|
||||||
* free_area_init_nodes(max_zone_pfns);
|
* free_area_init_nodes(max_zone_pfns);
|
||||||
*
|
*
|
||||||
* If the architecture guarantees that there are no holes in the ranges
|
* free_bootmem_with_active_regions() calls free_bootmem_node() for each
|
||||||
* registered with add_active_range(), free_bootmem_active_regions()
|
* registered physical page range. Similarly
|
||||||
* will call free_bootmem_node() for each registered physical page range.
|
* sparse_memory_present_with_active_regions() calls memory_present() for
|
||||||
* Similarly sparse_memory_present_with_active_regions() calls
|
* each range when SPARSEMEM is enabled.
|
||||||
* memory_present() for each range when SPARSEMEM is enabled.
|
|
||||||
*
|
*
|
||||||
* See mm/page_alloc.c for more information on each function exposed by
|
* See mm/page_alloc.c for more information on each function exposed by
|
||||||
* CONFIG_ARCH_POPULATES_NODE_MAP
|
* CONFIG_HAVE_MEMBLOCK_NODE_MAP.
|
||||||
*/
|
*/
|
||||||
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
|
extern void free_area_init_nodes(unsigned long *max_zone_pfn);
|
||||||
extern void add_active_range(unsigned int nid, unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn);
|
|
||||||
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn);
|
|
||||||
extern void remove_all_active_ranges(void);
|
|
||||||
void sort_node_map(void);
|
|
||||||
unsigned long node_map_pfn_alignment(void);
|
unsigned long node_map_pfn_alignment(void);
|
||||||
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
|
unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
|
||||||
unsigned long end_pfn);
|
unsigned long end_pfn);
|
||||||
@ -1300,14 +1293,11 @@ extern void free_bootmem_with_active_regions(int nid,
|
|||||||
unsigned long max_low_pfn);
|
unsigned long max_low_pfn);
|
||||||
int add_from_early_node_map(struct range *range, int az,
|
int add_from_early_node_map(struct range *range, int az,
|
||||||
int nr_range, int nid);
|
int nr_range, int nid);
|
||||||
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
|
|
||||||
u64 goal, u64 limit);
|
|
||||||
typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
|
|
||||||
extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
|
|
||||||
extern void sparse_memory_present_with_active_regions(int nid);
|
extern void sparse_memory_present_with_active_regions(int nid);
|
||||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
||||||
|
|
||||||
#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
|
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
|
||||||
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
!defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
|
||||||
static inline int __early_pfn_to_nid(unsigned long pfn)
|
static inline int __early_pfn_to_nid(unsigned long pfn)
|
||||||
{
|
{
|
||||||
|
@ -598,13 +598,13 @@ struct zonelist {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
struct node_active_region {
|
struct node_active_region {
|
||||||
unsigned long start_pfn;
|
unsigned long start_pfn;
|
||||||
unsigned long end_pfn;
|
unsigned long end_pfn;
|
||||||
int nid;
|
int nid;
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
#ifndef CONFIG_DISCONTIGMEM
|
#ifndef CONFIG_DISCONTIGMEM
|
||||||
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
|
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
|
||||||
@ -720,7 +720,7 @@ extern int movable_zone;
|
|||||||
|
|
||||||
static inline int zone_movable_is_highmem(void)
|
static inline int zone_movable_is_highmem(void)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
|
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
|
||||||
return movable_zone == ZONE_HIGHMEM;
|
return movable_zone == ZONE_HIGHMEM;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
@ -938,7 +938,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
|
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
|
||||||
!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
|
!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
|
||||||
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -40,12 +40,6 @@
|
|||||||
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
|
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
|
||||||
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
|
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
|
||||||
|
|
||||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
|
||||||
#define MEMBLOCK_INACTIVE 0x3a84fb0144c9e71bULL
|
|
||||||
#else
|
|
||||||
#define MEMBLOCK_INACTIVE 0x44c9e71bUL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define SLUB_RED_INACTIVE 0xbb
|
#define SLUB_RED_INACTIVE 0xbb
|
||||||
#define SLUB_RED_ACTIVE 0xcc
|
#define SLUB_RED_ACTIVE 0xcc
|
||||||
|
|
||||||
|
@ -199,7 +199,7 @@ void __init setup_log_buf(int early)
|
|||||||
unsigned long mem;
|
unsigned long mem;
|
||||||
|
|
||||||
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
|
mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
|
||||||
if (mem == MEMBLOCK_ERROR)
|
if (!mem)
|
||||||
return;
|
return;
|
||||||
new_log_buf = __va(mem);
|
new_log_buf = __va(mem);
|
||||||
} else {
|
} else {
|
||||||
|
@ -131,6 +131,12 @@ config SPARSEMEM_VMEMMAP
|
|||||||
config HAVE_MEMBLOCK
|
config HAVE_MEMBLOCK
|
||||||
boolean
|
boolean
|
||||||
|
|
||||||
|
config HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
boolean
|
||||||
|
|
||||||
|
config ARCH_DISCARD_MEMBLOCK
|
||||||
|
boolean
|
||||||
|
|
||||||
config NO_BOOTMEM
|
config NO_BOOTMEM
|
||||||
boolean
|
boolean
|
||||||
|
|
||||||
|
1057
mm/memblock.c
1057
mm/memblock.c
File diff suppressed because it is too large
Load Diff
@ -41,14 +41,13 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
|
|||||||
if (limit > memblock.current_limit)
|
if (limit > memblock.current_limit)
|
||||||
limit = memblock.current_limit;
|
limit = memblock.current_limit;
|
||||||
|
|
||||||
addr = find_memory_core_early(nid, size, align, goal, limit);
|
addr = memblock_find_in_range_node(goal, limit, size, align, nid);
|
||||||
|
if (!addr)
|
||||||
if (addr == MEMBLOCK_ERROR)
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
ptr = phys_to_virt(addr);
|
ptr = phys_to_virt(addr);
|
||||||
memset(ptr, 0, size);
|
memset(ptr, 0, size);
|
||||||
memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
|
memblock_reserve(addr, size);
|
||||||
/*
|
/*
|
||||||
* The min_count is set to 0 so that bootmem allocated blocks
|
* The min_count is set to 0 so that bootmem allocated blocks
|
||||||
* are never reported as leaks.
|
* are never reported as leaks.
|
||||||
@ -107,23 +106,27 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
|
|||||||
__free_pages_bootmem(pfn_to_page(i), 0);
|
__free_pages_bootmem(pfn_to_page(i), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long __init free_all_memory_core_early(int nodeid)
|
unsigned long __init free_low_memory_core_early(int nodeid)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
u64 start, end;
|
|
||||||
unsigned long count = 0;
|
unsigned long count = 0;
|
||||||
struct range *range = NULL;
|
phys_addr_t start, end;
|
||||||
int nr_range;
|
u64 i;
|
||||||
|
|
||||||
nr_range = get_free_all_memory_range(&range, nodeid);
|
/* free reserved array temporarily so that it's treated as free area */
|
||||||
|
memblock_free_reserved_regions();
|
||||||
|
|
||||||
for (i = 0; i < nr_range; i++) {
|
for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
|
||||||
start = range[i].start;
|
unsigned long start_pfn = PFN_UP(start);
|
||||||
end = range[i].end;
|
unsigned long end_pfn = min_t(unsigned long,
|
||||||
count += end - start;
|
PFN_DOWN(end), max_low_pfn);
|
||||||
__free_pages_memory(start, end);
|
if (start_pfn < end_pfn) {
|
||||||
|
__free_pages_memory(start_pfn, end_pfn);
|
||||||
|
count += end_pfn - start_pfn;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* put region array back? */
|
||||||
|
memblock_reserve_reserved_regions();
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +140,7 @@ unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
|
|||||||
{
|
{
|
||||||
register_page_bootmem_info_node(pgdat);
|
register_page_bootmem_info_node(pgdat);
|
||||||
|
|
||||||
/* free_all_memory_core_early(MAX_NUMNODES) will be called later */
|
/* free_low_memory_core_early(MAX_NUMNODES) will be called later */
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,7 +158,7 @@ unsigned long __init free_all_bootmem(void)
|
|||||||
* Use MAX_NUMNODES will make sure all ranges in early_node_map[]
|
* Use MAX_NUMNODES will make sure all ranges in early_node_map[]
|
||||||
* will be used instead of only Node0 related
|
* will be used instead of only Node0 related
|
||||||
*/
|
*/
|
||||||
return free_all_memory_core_early(MAX_NUMNODES);
|
return free_low_memory_core_early(MAX_NUMNODES);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -172,7 +175,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
|
|||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
kmemleak_free_part(__va(physaddr), size);
|
kmemleak_free_part(__va(physaddr), size);
|
||||||
memblock_x86_free_range(physaddr, physaddr + size);
|
memblock_free(physaddr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -187,7 +190,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
|
|||||||
void __init free_bootmem(unsigned long addr, unsigned long size)
|
void __init free_bootmem(unsigned long addr, unsigned long size)
|
||||||
{
|
{
|
||||||
kmemleak_free_part(__va(addr), size);
|
kmemleak_free_part(__va(addr), size);
|
||||||
memblock_x86_free_range(addr, addr + size);
|
memblock_free(addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
|
static void * __init ___alloc_bootmem_nopanic(unsigned long size,
|
||||||
|
506
mm/page_alloc.c
506
mm/page_alloc.c
@ -181,39 +181,17 @@ static unsigned long __meminitdata nr_kernel_pages;
|
|||||||
static unsigned long __meminitdata nr_all_pages;
|
static unsigned long __meminitdata nr_all_pages;
|
||||||
static unsigned long __meminitdata dma_reserve;
|
static unsigned long __meminitdata dma_reserve;
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
/*
|
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
|
||||||
* MAX_ACTIVE_REGIONS determines the maximum number of distinct
|
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
|
||||||
* ranges of memory (RAM) that may be registered with add_active_range().
|
static unsigned long __initdata required_kernelcore;
|
||||||
* Ranges passed to add_active_range() will be merged if possible
|
static unsigned long __initdata required_movablecore;
|
||||||
* so the number of times add_active_range() can be called is
|
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
||||||
* related to the number of nodes and the number of holes
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_MAX_ACTIVE_REGIONS
|
|
||||||
/* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
|
|
||||||
#define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
|
|
||||||
#else
|
|
||||||
#if MAX_NUMNODES >= 32
|
|
||||||
/* If there can be many nodes, allow up to 50 holes per node */
|
|
||||||
#define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
|
|
||||||
#else
|
|
||||||
/* By default, allow up to 256 distinct regions */
|
|
||||||
#define MAX_ACTIVE_REGIONS 256
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
|
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
||||||
static int __meminitdata nr_nodemap_entries;
|
int movable_zone;
|
||||||
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
|
EXPORT_SYMBOL(movable_zone);
|
||||||
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
static unsigned long __initdata required_kernelcore;
|
|
||||||
static unsigned long __initdata required_movablecore;
|
|
||||||
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
|
|
||||||
|
|
||||||
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
|
|
||||||
int movable_zone;
|
|
||||||
EXPORT_SYMBOL(movable_zone);
|
|
||||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
|
||||||
|
|
||||||
#if MAX_NUMNODES > 1
|
#if MAX_NUMNODES > 1
|
||||||
int nr_node_ids __read_mostly = MAX_NUMNODES;
|
int nr_node_ids __read_mostly = MAX_NUMNODES;
|
||||||
@ -706,10 +684,10 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
|
|||||||
int loop;
|
int loop;
|
||||||
|
|
||||||
prefetchw(page);
|
prefetchw(page);
|
||||||
for (loop = 0; loop < BITS_PER_LONG; loop++) {
|
for (loop = 0; loop < (1 << order); loop++) {
|
||||||
struct page *p = &page[loop];
|
struct page *p = &page[loop];
|
||||||
|
|
||||||
if (loop + 1 < BITS_PER_LONG)
|
if (loop + 1 < (1 << order))
|
||||||
prefetchw(p + 1);
|
prefetchw(p + 1);
|
||||||
__ClearPageReserved(p);
|
__ClearPageReserved(p);
|
||||||
set_page_count(p, 0);
|
set_page_count(p, 0);
|
||||||
@ -3737,35 +3715,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
/*
|
|
||||||
* Basic iterator support. Return the first range of PFNs for a node
|
|
||||||
* Note: nid == MAX_NUMNODES returns first region regardless of node
|
|
||||||
*/
|
|
||||||
static int __meminit first_active_region_index_in_nid(int nid)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < nr_nodemap_entries; i++)
|
|
||||||
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
|
|
||||||
return i;
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Basic iterator support. Return the next active range of PFNs for a node
|
|
||||||
* Note: nid == MAX_NUMNODES returns next region regardless of node
|
|
||||||
*/
|
|
||||||
static int __meminit next_active_region_index_in_nid(int index, int nid)
|
|
||||||
{
|
|
||||||
for (index = index + 1; index < nr_nodemap_entries; index++)
|
|
||||||
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
|
|
||||||
return index;
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
|
||||||
/*
|
/*
|
||||||
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
|
||||||
@ -3775,15 +3725,12 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
|
|||||||
*/
|
*/
|
||||||
int __meminit __early_pfn_to_nid(unsigned long pfn)
|
int __meminit __early_pfn_to_nid(unsigned long pfn)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int i, nid;
|
||||||
for (i = 0; i < nr_nodemap_entries; i++) {
|
|
||||||
unsigned long start_pfn = early_node_map[i].start_pfn;
|
|
||||||
unsigned long end_pfn = early_node_map[i].end_pfn;
|
|
||||||
|
|
||||||
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
|
||||||
if (start_pfn <= pfn && pfn < end_pfn)
|
if (start_pfn <= pfn && pfn < end_pfn)
|
||||||
return early_node_map[i].nid;
|
return nid;
|
||||||
}
|
|
||||||
/* This is a memory hole */
|
/* This is a memory hole */
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -3812,11 +3759,6 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Basic iterator support to walk early_node_map[] */
|
|
||||||
#define for_each_active_range_index_in_nid(i, nid) \
|
|
||||||
for (i = first_active_region_index_in_nid(nid); i != -1; \
|
|
||||||
i = next_active_region_index_in_nid(i, nid))
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
|
* free_bootmem_with_active_regions - Call free_bootmem_node for each active range
|
||||||
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
|
* @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
|
||||||
@ -3826,122 +3768,34 @@ bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
|
|||||||
* add_active_ranges() contain no holes and may be freed, this
|
* add_active_ranges() contain no holes and may be freed, this
|
||||||
* this function may be used instead of calling free_bootmem() manually.
|
* this function may be used instead of calling free_bootmem() manually.
|
||||||
*/
|
*/
|
||||||
void __init free_bootmem_with_active_regions(int nid,
|
void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
|
||||||
unsigned long max_low_pfn)
|
|
||||||
{
|
{
|
||||||
int i;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int i, this_nid;
|
||||||
|
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
|
||||||
unsigned long size_pages = 0;
|
start_pfn = min(start_pfn, max_low_pfn);
|
||||||
unsigned long end_pfn = early_node_map[i].end_pfn;
|
end_pfn = min(end_pfn, max_low_pfn);
|
||||||
|
|
||||||
if (early_node_map[i].start_pfn >= max_low_pfn)
|
if (start_pfn < end_pfn)
|
||||||
continue;
|
free_bootmem_node(NODE_DATA(this_nid),
|
||||||
|
PFN_PHYS(start_pfn),
|
||||||
if (end_pfn > max_low_pfn)
|
(end_pfn - start_pfn) << PAGE_SHIFT);
|
||||||
end_pfn = max_low_pfn;
|
|
||||||
|
|
||||||
size_pages = end_pfn - early_node_map[i].start_pfn;
|
|
||||||
free_bootmem_node(NODE_DATA(early_node_map[i].nid),
|
|
||||||
PFN_PHYS(early_node_map[i].start_pfn),
|
|
||||||
size_pages << PAGE_SHIFT);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_MEMBLOCK
|
|
||||||
/*
|
|
||||||
* Basic iterator support. Return the last range of PFNs for a node
|
|
||||||
* Note: nid == MAX_NUMNODES returns last region regardless of node
|
|
||||||
*/
|
|
||||||
static int __meminit last_active_region_index_in_nid(int nid)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = nr_nodemap_entries - 1; i >= 0; i--)
|
|
||||||
if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
|
|
||||||
return i;
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Basic iterator support. Return the previous active range of PFNs for a node
|
|
||||||
* Note: nid == MAX_NUMNODES returns next region regardless of node
|
|
||||||
*/
|
|
||||||
static int __meminit previous_active_region_index_in_nid(int index, int nid)
|
|
||||||
{
|
|
||||||
for (index = index - 1; index >= 0; index--)
|
|
||||||
if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
|
|
||||||
return index;
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define for_each_active_range_index_in_nid_reverse(i, nid) \
|
|
||||||
for (i = last_active_region_index_in_nid(nid); i != -1; \
|
|
||||||
i = previous_active_region_index_in_nid(i, nid))
|
|
||||||
|
|
||||||
u64 __init find_memory_core_early(int nid, u64 size, u64 align,
|
|
||||||
u64 goal, u64 limit)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
/* Need to go over early_node_map to find out good range for node */
|
|
||||||
for_each_active_range_index_in_nid_reverse(i, nid) {
|
|
||||||
u64 addr;
|
|
||||||
u64 ei_start, ei_last;
|
|
||||||
u64 final_start, final_end;
|
|
||||||
|
|
||||||
ei_last = early_node_map[i].end_pfn;
|
|
||||||
ei_last <<= PAGE_SHIFT;
|
|
||||||
ei_start = early_node_map[i].start_pfn;
|
|
||||||
ei_start <<= PAGE_SHIFT;
|
|
||||||
|
|
||||||
final_start = max(ei_start, goal);
|
|
||||||
final_end = min(ei_last, limit);
|
|
||||||
|
|
||||||
if (final_start >= final_end)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
addr = memblock_find_in_range(final_start, final_end, size, align);
|
|
||||||
|
|
||||||
if (addr == MEMBLOCK_ERROR)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
return addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return MEMBLOCK_ERROR;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int __init add_from_early_node_map(struct range *range, int az,
|
int __init add_from_early_node_map(struct range *range, int az,
|
||||||
int nr_range, int nid)
|
int nr_range, int nid)
|
||||||
{
|
{
|
||||||
|
unsigned long start_pfn, end_pfn;
|
||||||
int i;
|
int i;
|
||||||
u64 start, end;
|
|
||||||
|
|
||||||
/* need to go over early_node_map to find out good range for node */
|
/* need to go over early_node_map to find out good range for node */
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
|
||||||
start = early_node_map[i].start_pfn;
|
nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
|
||||||
end = early_node_map[i].end_pfn;
|
|
||||||
nr_range = add_range(range, az, nr_range, start, end);
|
|
||||||
}
|
|
||||||
return nr_range;
|
return nr_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
|
||||||
ret = work_fn(early_node_map[i].start_pfn,
|
|
||||||
early_node_map[i].end_pfn, data);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/**
|
/**
|
||||||
* sparse_memory_present_with_active_regions - Call memory_present for each active range
|
* sparse_memory_present_with_active_regions - Call memory_present for each active range
|
||||||
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
|
* @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
|
||||||
@ -3952,12 +3806,11 @@ void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
|
|||||||
*/
|
*/
|
||||||
void __init sparse_memory_present_with_active_regions(int nid)
|
void __init sparse_memory_present_with_active_regions(int nid)
|
||||||
{
|
{
|
||||||
int i;
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int i, this_nid;
|
||||||
|
|
||||||
for_each_active_range_index_in_nid(i, nid)
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
|
||||||
memory_present(early_node_map[i].nid,
|
memory_present(this_nid, start_pfn, end_pfn);
|
||||||
early_node_map[i].start_pfn,
|
|
||||||
early_node_map[i].end_pfn);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3974,13 +3827,15 @@ void __init sparse_memory_present_with_active_regions(int nid)
|
|||||||
void __meminit get_pfn_range_for_nid(unsigned int nid,
|
void __meminit get_pfn_range_for_nid(unsigned int nid,
|
||||||
unsigned long *start_pfn, unsigned long *end_pfn)
|
unsigned long *start_pfn, unsigned long *end_pfn)
|
||||||
{
|
{
|
||||||
|
unsigned long this_start_pfn, this_end_pfn;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
*start_pfn = -1UL;
|
*start_pfn = -1UL;
|
||||||
*end_pfn = 0;
|
*end_pfn = 0;
|
||||||
|
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
|
||||||
*start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
|
*start_pfn = min(*start_pfn, this_start_pfn);
|
||||||
*end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
|
*end_pfn = max(*end_pfn, this_end_pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*start_pfn == -1UL)
|
if (*start_pfn == -1UL)
|
||||||
@ -4083,46 +3938,16 @@ unsigned long __meminit __absent_pages_in_range(int nid,
|
|||||||
unsigned long range_start_pfn,
|
unsigned long range_start_pfn,
|
||||||
unsigned long range_end_pfn)
|
unsigned long range_end_pfn)
|
||||||
{
|
{
|
||||||
int i = 0;
|
unsigned long nr_absent = range_end_pfn - range_start_pfn;
|
||||||
unsigned long prev_end_pfn = 0, hole_pages = 0;
|
unsigned long start_pfn, end_pfn;
|
||||||
unsigned long start_pfn;
|
int i;
|
||||||
|
|
||||||
/* Find the end_pfn of the first active range of pfns in the node */
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
|
||||||
i = first_active_region_index_in_nid(nid);
|
start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
|
||||||
if (i == -1)
|
end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
|
||||||
return 0;
|
nr_absent -= end_pfn - start_pfn;
|
||||||
|
|
||||||
prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
|
|
||||||
|
|
||||||
/* Account for ranges before physical memory on this node */
|
|
||||||
if (early_node_map[i].start_pfn > range_start_pfn)
|
|
||||||
hole_pages = prev_end_pfn - range_start_pfn;
|
|
||||||
|
|
||||||
/* Find all holes for the zone within the node */
|
|
||||||
for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
|
|
||||||
|
|
||||||
/* No need to continue if prev_end_pfn is outside the zone */
|
|
||||||
if (prev_end_pfn >= range_end_pfn)
|
|
||||||
break;
|
|
||||||
|
|
||||||
/* Make sure the end of the zone is not within the hole */
|
|
||||||
start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
|
|
||||||
prev_end_pfn = max(prev_end_pfn, range_start_pfn);
|
|
||||||
|
|
||||||
/* Update the hole size cound and move on */
|
|
||||||
if (start_pfn > range_start_pfn) {
|
|
||||||
BUG_ON(prev_end_pfn > start_pfn);
|
|
||||||
hole_pages += start_pfn - prev_end_pfn;
|
|
||||||
}
|
|
||||||
prev_end_pfn = early_node_map[i].end_pfn;
|
|
||||||
}
|
}
|
||||||
|
return nr_absent;
|
||||||
/* Account for ranges past physical memory on this node */
|
|
||||||
if (range_end_pfn > prev_end_pfn)
|
|
||||||
hole_pages += range_end_pfn -
|
|
||||||
max(range_start_pfn, prev_end_pfn);
|
|
||||||
|
|
||||||
return hole_pages;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -4143,14 +3968,14 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|||||||
unsigned long zone_type,
|
unsigned long zone_type,
|
||||||
unsigned long *ignored)
|
unsigned long *ignored)
|
||||||
{
|
{
|
||||||
|
unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
|
||||||
|
unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
|
||||||
unsigned long node_start_pfn, node_end_pfn;
|
unsigned long node_start_pfn, node_end_pfn;
|
||||||
unsigned long zone_start_pfn, zone_end_pfn;
|
unsigned long zone_start_pfn, zone_end_pfn;
|
||||||
|
|
||||||
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
|
get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
|
||||||
zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
|
zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
|
||||||
node_start_pfn);
|
zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
|
||||||
zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
|
|
||||||
node_end_pfn);
|
|
||||||
|
|
||||||
adjust_zone_range_for_zone_movable(nid, zone_type,
|
adjust_zone_range_for_zone_movable(nid, zone_type,
|
||||||
node_start_pfn, node_end_pfn,
|
node_start_pfn, node_end_pfn,
|
||||||
@ -4158,7 +3983,7 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|||||||
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
|
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
|
||||||
unsigned long zone_type,
|
unsigned long zone_type,
|
||||||
unsigned long *zones_size)
|
unsigned long *zones_size)
|
||||||
@ -4176,7 +4001,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
|
|||||||
return zholes_size[zone_type];
|
return zholes_size[zone_type];
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
|
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
|
||||||
unsigned long *zones_size, unsigned long *zholes_size)
|
unsigned long *zones_size, unsigned long *zholes_size)
|
||||||
@ -4399,10 +4224,10 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
|
|||||||
*/
|
*/
|
||||||
if (pgdat == NODE_DATA(0)) {
|
if (pgdat == NODE_DATA(0)) {
|
||||||
mem_map = NODE_DATA(0)->node_mem_map;
|
mem_map = NODE_DATA(0)->node_mem_map;
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
|
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
|
||||||
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
|
mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
|
||||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
|
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
|
||||||
@ -4427,7 +4252,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
|
|||||||
free_area_init_core(pgdat, zones_size, zholes_size);
|
free_area_init_core(pgdat, zones_size, zholes_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
|
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
|
||||||
|
|
||||||
#if MAX_NUMNODES > 1
|
#if MAX_NUMNODES > 1
|
||||||
/*
|
/*
|
||||||
@ -4448,170 +4273,6 @@ static inline void setup_nr_node_ids(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
|
||||||
* add_active_range - Register a range of PFNs backed by physical memory
|
|
||||||
* @nid: The node ID the range resides on
|
|
||||||
* @start_pfn: The start PFN of the available physical memory
|
|
||||||
* @end_pfn: The end PFN of the available physical memory
|
|
||||||
*
|
|
||||||
* These ranges are stored in an early_node_map[] and later used by
|
|
||||||
* free_area_init_nodes() to calculate zone sizes and holes. If the
|
|
||||||
* range spans a memory hole, it is up to the architecture to ensure
|
|
||||||
* the memory is not freed by the bootmem allocator. If possible
|
|
||||||
* the range being registered will be merged with existing ranges.
|
|
||||||
*/
|
|
||||||
void __init add_active_range(unsigned int nid, unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
mminit_dprintk(MMINIT_TRACE, "memory_register",
|
|
||||||
"Entering add_active_range(%d, %#lx, %#lx) "
|
|
||||||
"%d entries of %d used\n",
|
|
||||||
nid, start_pfn, end_pfn,
|
|
||||||
nr_nodemap_entries, MAX_ACTIVE_REGIONS);
|
|
||||||
|
|
||||||
mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
|
|
||||||
|
|
||||||
/* Merge with existing active regions if possible */
|
|
||||||
for (i = 0; i < nr_nodemap_entries; i++) {
|
|
||||||
if (early_node_map[i].nid != nid)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* Skip if an existing region covers this new one */
|
|
||||||
if (start_pfn >= early_node_map[i].start_pfn &&
|
|
||||||
end_pfn <= early_node_map[i].end_pfn)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Merge forward if suitable */
|
|
||||||
if (start_pfn <= early_node_map[i].end_pfn &&
|
|
||||||
end_pfn > early_node_map[i].end_pfn) {
|
|
||||||
early_node_map[i].end_pfn = end_pfn;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Merge backward if suitable */
|
|
||||||
if (start_pfn < early_node_map[i].start_pfn &&
|
|
||||||
end_pfn >= early_node_map[i].start_pfn) {
|
|
||||||
early_node_map[i].start_pfn = start_pfn;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check that early_node_map is large enough */
|
|
||||||
if (i >= MAX_ACTIVE_REGIONS) {
|
|
||||||
printk(KERN_CRIT "More than %d memory regions, truncating\n",
|
|
||||||
MAX_ACTIVE_REGIONS);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
early_node_map[i].nid = nid;
|
|
||||||
early_node_map[i].start_pfn = start_pfn;
|
|
||||||
early_node_map[i].end_pfn = end_pfn;
|
|
||||||
nr_nodemap_entries = i + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* remove_active_range - Shrink an existing registered range of PFNs
|
|
||||||
* @nid: The node id the range is on that should be shrunk
|
|
||||||
* @start_pfn: The new PFN of the range
|
|
||||||
* @end_pfn: The new PFN of the range
|
|
||||||
*
|
|
||||||
* i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
|
|
||||||
* The map is kept near the end physical page range that has already been
|
|
||||||
* registered. This function allows an arch to shrink an existing registered
|
|
||||||
* range.
|
|
||||||
*/
|
|
||||||
void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
|
|
||||||
unsigned long end_pfn)
|
|
||||||
{
|
|
||||||
int i, j;
|
|
||||||
int removed = 0;
|
|
||||||
|
|
||||||
printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
|
|
||||||
nid, start_pfn, end_pfn);
|
|
||||||
|
|
||||||
/* Find the old active region end and shrink */
|
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
|
||||||
if (early_node_map[i].start_pfn >= start_pfn &&
|
|
||||||
early_node_map[i].end_pfn <= end_pfn) {
|
|
||||||
/* clear it */
|
|
||||||
early_node_map[i].start_pfn = 0;
|
|
||||||
early_node_map[i].end_pfn = 0;
|
|
||||||
removed = 1;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (early_node_map[i].start_pfn < start_pfn &&
|
|
||||||
early_node_map[i].end_pfn > start_pfn) {
|
|
||||||
unsigned long temp_end_pfn = early_node_map[i].end_pfn;
|
|
||||||
early_node_map[i].end_pfn = start_pfn;
|
|
||||||
if (temp_end_pfn > end_pfn)
|
|
||||||
add_active_range(nid, end_pfn, temp_end_pfn);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (early_node_map[i].start_pfn >= start_pfn &&
|
|
||||||
early_node_map[i].end_pfn > end_pfn &&
|
|
||||||
early_node_map[i].start_pfn < end_pfn) {
|
|
||||||
early_node_map[i].start_pfn = end_pfn;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!removed)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* remove the blank ones */
|
|
||||||
for (i = nr_nodemap_entries - 1; i > 0; i--) {
|
|
||||||
if (early_node_map[i].nid != nid)
|
|
||||||
continue;
|
|
||||||
if (early_node_map[i].end_pfn)
|
|
||||||
continue;
|
|
||||||
/* we found it, get rid of it */
|
|
||||||
for (j = i; j < nr_nodemap_entries - 1; j++)
|
|
||||||
memcpy(&early_node_map[j], &early_node_map[j+1],
|
|
||||||
sizeof(early_node_map[j]));
|
|
||||||
j = nr_nodemap_entries - 1;
|
|
||||||
memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
|
|
||||||
nr_nodemap_entries--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* remove_all_active_ranges - Remove all currently registered regions
|
|
||||||
*
|
|
||||||
* During discovery, it may be found that a table like SRAT is invalid
|
|
||||||
* and an alternative discovery method must be used. This function removes
|
|
||||||
* all currently registered regions.
|
|
||||||
*/
|
|
||||||
void __init remove_all_active_ranges(void)
|
|
||||||
{
|
|
||||||
memset(early_node_map, 0, sizeof(early_node_map));
|
|
||||||
nr_nodemap_entries = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compare two active node_active_regions */
|
|
||||||
static int __init cmp_node_active_region(const void *a, const void *b)
|
|
||||||
{
|
|
||||||
struct node_active_region *arange = (struct node_active_region *)a;
|
|
||||||
struct node_active_region *brange = (struct node_active_region *)b;
|
|
||||||
|
|
||||||
/* Done this way to avoid overflows */
|
|
||||||
if (arange->start_pfn > brange->start_pfn)
|
|
||||||
return 1;
|
|
||||||
if (arange->start_pfn < brange->start_pfn)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* sort the node_map by start_pfn */
|
|
||||||
void __init sort_node_map(void)
|
|
||||||
{
|
|
||||||
sort(early_node_map, (size_t)nr_nodemap_entries,
|
|
||||||
sizeof(struct node_active_region),
|
|
||||||
cmp_node_active_region, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* node_map_pfn_alignment - determine the maximum internode alignment
|
* node_map_pfn_alignment - determine the maximum internode alignment
|
||||||
*
|
*
|
||||||
@ -4634,15 +4295,11 @@ void __init sort_node_map(void)
|
|||||||
unsigned long __init node_map_pfn_alignment(void)
|
unsigned long __init node_map_pfn_alignment(void)
|
||||||
{
|
{
|
||||||
unsigned long accl_mask = 0, last_end = 0;
|
unsigned long accl_mask = 0, last_end = 0;
|
||||||
|
unsigned long start, end, mask;
|
||||||
int last_nid = -1;
|
int last_nid = -1;
|
||||||
int i;
|
int i, nid;
|
||||||
|
|
||||||
for_each_active_range_index_in_nid(i, MAX_NUMNODES) {
|
|
||||||
int nid = early_node_map[i].nid;
|
|
||||||
unsigned long start = early_node_map[i].start_pfn;
|
|
||||||
unsigned long end = early_node_map[i].end_pfn;
|
|
||||||
unsigned long mask;
|
|
||||||
|
|
||||||
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
|
||||||
if (!start || last_nid < 0 || last_nid == nid) {
|
if (!start || last_nid < 0 || last_nid == nid) {
|
||||||
last_nid = nid;
|
last_nid = nid;
|
||||||
last_end = end;
|
last_end = end;
|
||||||
@ -4669,12 +4326,12 @@ unsigned long __init node_map_pfn_alignment(void)
|
|||||||
/* Find the lowest pfn for a node */
|
/* Find the lowest pfn for a node */
|
||||||
static unsigned long __init find_min_pfn_for_node(int nid)
|
static unsigned long __init find_min_pfn_for_node(int nid)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
unsigned long min_pfn = ULONG_MAX;
|
unsigned long min_pfn = ULONG_MAX;
|
||||||
|
unsigned long start_pfn;
|
||||||
|
int i;
|
||||||
|
|
||||||
/* Assuming a sorted map, the first range found has the starting pfn */
|
for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
|
||||||
for_each_active_range_index_in_nid(i, nid)
|
min_pfn = min(min_pfn, start_pfn);
|
||||||
min_pfn = min(min_pfn, early_node_map[i].start_pfn);
|
|
||||||
|
|
||||||
if (min_pfn == ULONG_MAX) {
|
if (min_pfn == ULONG_MAX) {
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
@ -4703,15 +4360,16 @@ unsigned long __init find_min_pfn_with_active_regions(void)
|
|||||||
*/
|
*/
|
||||||
static unsigned long __init early_calculate_totalpages(void)
|
static unsigned long __init early_calculate_totalpages(void)
|
||||||
{
|
{
|
||||||
int i;
|
|
||||||
unsigned long totalpages = 0;
|
unsigned long totalpages = 0;
|
||||||
|
unsigned long start_pfn, end_pfn;
|
||||||
|
int i, nid;
|
||||||
|
|
||||||
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
|
||||||
|
unsigned long pages = end_pfn - start_pfn;
|
||||||
|
|
||||||
for (i = 0; i < nr_nodemap_entries; i++) {
|
|
||||||
unsigned long pages = early_node_map[i].end_pfn -
|
|
||||||
early_node_map[i].start_pfn;
|
|
||||||
totalpages += pages;
|
totalpages += pages;
|
||||||
if (pages)
|
if (pages)
|
||||||
node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
|
node_set_state(nid, N_HIGH_MEMORY);
|
||||||
}
|
}
|
||||||
return totalpages;
|
return totalpages;
|
||||||
}
|
}
|
||||||
@ -4766,6 +4424,8 @@ restart:
|
|||||||
/* Spread kernelcore memory as evenly as possible throughout nodes */
|
/* Spread kernelcore memory as evenly as possible throughout nodes */
|
||||||
kernelcore_node = required_kernelcore / usable_nodes;
|
kernelcore_node = required_kernelcore / usable_nodes;
|
||||||
for_each_node_state(nid, N_HIGH_MEMORY) {
|
for_each_node_state(nid, N_HIGH_MEMORY) {
|
||||||
|
unsigned long start_pfn, end_pfn;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Recalculate kernelcore_node if the division per node
|
* Recalculate kernelcore_node if the division per node
|
||||||
* now exceeds what is necessary to satisfy the requested
|
* now exceeds what is necessary to satisfy the requested
|
||||||
@ -4782,13 +4442,10 @@ restart:
|
|||||||
kernelcore_remaining = kernelcore_node;
|
kernelcore_remaining = kernelcore_node;
|
||||||
|
|
||||||
/* Go through each range of PFNs within this node */
|
/* Go through each range of PFNs within this node */
|
||||||
for_each_active_range_index_in_nid(i, nid) {
|
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
|
||||||
unsigned long start_pfn, end_pfn;
|
|
||||||
unsigned long size_pages;
|
unsigned long size_pages;
|
||||||
|
|
||||||
start_pfn = max(early_node_map[i].start_pfn,
|
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
|
||||||
zone_movable_pfn[nid]);
|
|
||||||
end_pfn = early_node_map[i].end_pfn;
|
|
||||||
if (start_pfn >= end_pfn)
|
if (start_pfn >= end_pfn)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -4890,11 +4547,8 @@ static void check_for_regular_memory(pg_data_t *pgdat)
|
|||||||
*/
|
*/
|
||||||
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
||||||
{
|
{
|
||||||
unsigned long nid;
|
unsigned long start_pfn, end_pfn;
|
||||||
int i;
|
int i, nid;
|
||||||
|
|
||||||
/* Sort early_node_map as initialisation assumes it is sorted */
|
|
||||||
sort_node_map();
|
|
||||||
|
|
||||||
/* Record where the zone boundaries are */
|
/* Record where the zone boundaries are */
|
||||||
memset(arch_zone_lowest_possible_pfn, 0,
|
memset(arch_zone_lowest_possible_pfn, 0,
|
||||||
@ -4941,11 +4595,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Print out the early_node_map[] */
|
/* Print out the early_node_map[] */
|
||||||
printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
|
printk("Early memory PFN ranges\n");
|
||||||
for (i = 0; i < nr_nodemap_entries; i++)
|
for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
|
||||||
printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
|
printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
|
||||||
early_node_map[i].start_pfn,
|
|
||||||
early_node_map[i].end_pfn);
|
|
||||||
|
|
||||||
/* Initialise every node */
|
/* Initialise every node */
|
||||||
mminit_verify_pageflags_layout();
|
mminit_verify_pageflags_layout();
|
||||||
@ -4998,7 +4650,7 @@ static int __init cmdline_parse_movablecore(char *p)
|
|||||||
early_param("kernelcore", cmdline_parse_kernelcore);
|
early_param("kernelcore", cmdline_parse_kernelcore);
|
||||||
early_param("movablecore", cmdline_parse_movablecore);
|
early_param("movablecore", cmdline_parse_movablecore);
|
||||||
|
|
||||||
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
|
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* set_dma_reserve - set the specified number of pages reserved in the first zone
|
* set_dma_reserve - set the specified number of pages reserved in the first zone
|
||||||
|
Loading…
Reference in New Issue
Block a user