mm: remove unnecessary ia64 code and comment
IA64 has gone with commit cf8e8658100d ("arch: Remove Itanium (IA-64) architecture"), remove unnecessary ia64 special mm code and comment too. Link: https://lkml.kernel.org/r/20231222070203.2966980-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4a8ffab02d
commit
e99fb98d47
@ -770,7 +770,7 @@ config DEFAULT_MMAP_MIN_ADDR
|
||||
from userspace allocation. Keeping a user from writing to low pages
|
||||
can help reduce the impact of kernel NULL pointer bugs.
|
||||
|
||||
For most ia64, ppc64 and x86 users with lots of address space
|
||||
For most ppc64 and x86 users with lots of address space
|
||||
a value of 65536 is reasonable and should cause no problems.
|
||||
On arm and other archs it should not be higher than 32768.
|
||||
Programs which use vm86 functionality or have some need to map
|
||||
|
@ -123,9 +123,7 @@ static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
|
||||
/*
|
||||
* A number of key systems in x86 including ioremap() rely on the assumption
|
||||
* that high_memory defines the upper bound on direct map memory, then end
|
||||
* of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and
|
||||
* highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
|
||||
* and ZONE_HIGHMEM.
|
||||
* of ZONE_NORMAL.
|
||||
*/
|
||||
void *high_memory;
|
||||
EXPORT_SYMBOL(high_memory);
|
||||
|
48
mm/mm_init.c
48
mm/mm_init.c
@ -1467,8 +1467,7 @@ void __init set_pageblock_order(void)
|
||||
|
||||
/*
|
||||
* Assume the largest contiguous order of interest is a huge page.
|
||||
* This value may be variable depending on boot parameters on IA64 and
|
||||
* powerpc.
|
||||
* This value may be variable depending on boot parameters on powerpc.
|
||||
*/
|
||||
pageblock_order = order;
|
||||
}
|
||||
@ -1629,8 +1628,8 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align,
|
||||
#ifdef CONFIG_FLATMEM
|
||||
static void __init alloc_node_mem_map(struct pglist_data *pgdat)
|
||||
{
|
||||
unsigned long __maybe_unused start = 0;
|
||||
unsigned long __maybe_unused offset = 0;
|
||||
unsigned long start, offset, size, end;
|
||||
struct page *map;
|
||||
|
||||
/* Skip empty nodes */
|
||||
if (!pgdat->node_spanned_pages)
|
||||
@ -1638,33 +1637,24 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
|
||||
|
||||
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
|
||||
offset = pgdat->node_start_pfn - start;
|
||||
/* ia64 gets its own node_mem_map, before this, without bootmem */
|
||||
if (!pgdat->node_mem_map) {
|
||||
unsigned long size, end;
|
||||
struct page *map;
|
||||
|
||||
/*
|
||||
* The zone's endpoints aren't required to be MAX_ORDER
|
||||
* aligned but the node_mem_map endpoints must be in order
|
||||
* for the buddy allocator to function correctly.
|
||||
*/
|
||||
end = pgdat_end_pfn(pgdat);
|
||||
end = ALIGN(end, MAX_ORDER_NR_PAGES);
|
||||
size = (end - start) * sizeof(struct page);
|
||||
map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
|
||||
pgdat->node_id, false);
|
||||
if (!map)
|
||||
panic("Failed to allocate %ld bytes for node %d memory map\n",
|
||||
size, pgdat->node_id);
|
||||
pgdat->node_mem_map = map + offset;
|
||||
}
|
||||
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
|
||||
__func__, pgdat->node_id, (unsigned long)pgdat,
|
||||
(unsigned long)pgdat->node_mem_map);
|
||||
#ifndef CONFIG_NUMA
|
||||
/*
|
||||
* With no DISCONTIG, the global mem_map is just set as node 0's
|
||||
* The zone's endpoints aren't required to be MAX_ORDER
|
||||
* aligned but the node_mem_map endpoints must be in order
|
||||
* for the buddy allocator to function correctly.
|
||||
*/
|
||||
end = ALIGN(pgdat_end_pfn(pgdat), MAX_ORDER_NR_PAGES);
|
||||
size = (end - start) * sizeof(struct page);
|
||||
map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
|
||||
pgdat->node_id, false);
|
||||
if (!map)
|
||||
panic("Failed to allocate %ld bytes for node %d memory map\n",
|
||||
size, pgdat->node_id);
|
||||
pgdat->node_mem_map = map + offset;
|
||||
pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
|
||||
__func__, pgdat->node_id, (unsigned long)pgdat,
|
||||
(unsigned long)pgdat->node_mem_map);
|
||||
#ifndef CONFIG_NUMA
|
||||
/* the global mem_map is just set as node 0's */
|
||||
if (pgdat == NODE_DATA(0)) {
|
||||
mem_map = NODE_DATA(0)->node_mem_map;
|
||||
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
|
||||
|
@ -121,7 +121,6 @@ static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
||||
* Sometimes page metadata allocation tracking requires more
|
||||
* memory to be allocated:
|
||||
* - when new stack trace is saved to stack depot
|
||||
* - when backtrace itself is calculated (ia64)
|
||||
*/
|
||||
if (current->in_page_owner)
|
||||
return dummy_handle;
|
||||
|
Loading…
x
Reference in New Issue
Block a user