mm, memory_hotplug: add nid parameter to arch_remove_memory
Patch series "Do not touch pages in hot-remove path", v2. This patchset aims for two things: 1) A better definition about offline and hot-remove stage 2) Solving bugs where we can access non-initialized pages during hot-remove operations [2] [3]. This is achieved by moving all page/zone handling to the offline stage, so we do not need to access pages when hot-removing memory. [1] https://patchwork.kernel.org/cover/10691415/ [2] https://patchwork.kernel.org/patch/10547445/ [3] https://www.spinics.net/lists/linux-mm/msg161316.html This patch (of 5): This is a preparation for the following-up patches. The idea of passing the nid is that it will allow us to get rid of the zone parameter afterwards. Link: http://lkml.kernel.org/r/20181127162005.15833-2-osalvador@suse.de Signed-off-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: David Hildenbrand <david@redhat.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jerome Glisse <jglisse@redhat.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: "Rafael J. Wysocki" <rafael@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
23b68cfaae
commit
2c2a5af6fe
@ -661,7 +661,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
@ -139,7 +139,8 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int __meminit arch_remove_memory(int nid, u64 start, u64 size,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
@ -242,7 +242,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
{
|
||||
/*
|
||||
* There is no hardware or firmware interface which could trigger a
|
||||
|
@ -443,7 +443,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
@ -860,7 +860,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int arch_remove_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
@ -1141,7 +1141,8 @@ kernel_physical_mapping_remove(unsigned long start, unsigned long end)
|
||||
remove_pagetable(start, end, true, NULL);
|
||||
}
|
||||
|
||||
int __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
||||
int __ref arch_remove_memory(int nid, u64 start, u64 size,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
|
@ -107,8 +107,8 @@ static inline bool movable_node_is_enabled(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
extern int arch_remove_memory(u64 start, u64 size,
|
||||
struct vmem_altmap *altmap);
|
||||
extern int arch_remove_memory(int nid, u64 start, u64 size,
|
||||
struct vmem_altmap *altmap);
|
||||
extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
|
||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||
|
@ -87,6 +87,7 @@ static void devm_memremap_pages_release(void *data)
|
||||
struct resource *res = &pgmap->res;
|
||||
resource_size_t align_start, align_size;
|
||||
unsigned long pfn;
|
||||
int nid;
|
||||
|
||||
pgmap->kill(pgmap->ref);
|
||||
for_each_device_pfn(pfn, pgmap)
|
||||
@ -97,13 +98,15 @@ static void devm_memremap_pages_release(void *data)
|
||||
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
||||
- align_start;
|
||||
|
||||
nid = page_to_nid(pfn_to_page(align_start >> PAGE_SHIFT));
|
||||
|
||||
mem_hotplug_begin();
|
||||
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
||||
pfn = align_start >> PAGE_SHIFT;
|
||||
__remove_pages(page_zone(pfn_to_page(pfn)), pfn,
|
||||
align_size >> PAGE_SHIFT, NULL);
|
||||
} else {
|
||||
arch_remove_memory(align_start, align_size,
|
||||
arch_remove_memory(nid, align_start, align_size,
|
||||
pgmap->altmap_valid ? &pgmap->altmap : NULL);
|
||||
kasan_remove_zero_shadow(__va(align_start), align_size);
|
||||
}
|
||||
|
@ -1841,7 +1841,7 @@ void __ref __remove_memory(int nid, u64 start, u64 size)
|
||||
memblock_free(start, size);
|
||||
memblock_remove(start, size);
|
||||
|
||||
arch_remove_memory(start, size, NULL);
|
||||
arch_remove_memory(nid, start, size, NULL);
|
||||
|
||||
try_offline_node(nid);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user