mm, memory_hotplug: provide a more generic restrictions for memory hotplug
arch_add_memory, __add_pages take a want_memblock which controls whether the newly added memory should get the sysfs memblock user API (e.g. ZONE_DEVICE users do not want/need this interface). Some callers even want to control where do we allocate the memmap from by configuring altmap. Add a more generic hotplug context for arch_add_memory and __add_pages. struct mhp_restrictions contains flags which contains additional features to be enabled by the memory hotplug (MHP_MEMBLOCK_API currently) and altmap for alternative memmap allocator. This patch shouldn't introduce any functional change. [akpm@linux-foundation.org: build fix] Link: http://lkml.kernel.org/r/20190408082633.2864-3-osalvador@suse.de Signed-off-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Oscar Salvador <osalvador@suse.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
5557c766ab
commit
940519f0c8
@ -1065,8 +1065,8 @@ int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
|
||||||
@ -1077,6 +1077,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||||||
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
|
size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
|
||||||
|
|
||||||
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
|
return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
|
||||||
altmap, want_memblock);
|
restrictions);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -666,14 +666,14 @@ mem_init (void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
if (ret)
|
if (ret)
|
||||||
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
|
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
|
@ -109,8 +109,8 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int __ref arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
@ -127,7 +127,7 @@ int __ref arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altm
|
|||||||
}
|
}
|
||||||
flush_inval_dcache_range(start, start + size);
|
flush_inval_dcache_range(start, start + size);
|
||||||
|
|
||||||
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
|
@ -219,8 +219,8 @@ device_initcall(s390_cma_mem_init);
|
|||||||
|
|
||||||
#endif /* CONFIG_CMA */
|
#endif /* CONFIG_CMA */
|
||||||
|
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long size_pages = PFN_DOWN(size);
|
unsigned long size_pages = PFN_DOWN(size);
|
||||||
@ -230,7 +230,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = __add_pages(nid, start_pfn, size_pages, altmap, want_memblock);
|
rc = __add_pages(nid, start_pfn, size_pages, restrictions);
|
||||||
if (rc)
|
if (rc)
|
||||||
vmem_remove_mapping(start, size);
|
vmem_remove_mapping(start, size);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -404,15 +404,15 @@ void __init mem_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = PFN_DOWN(start);
|
unsigned long start_pfn = PFN_DOWN(start);
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* We only have ZONE_NORMAL, so this is easy.. */
|
/* We only have ZONE_NORMAL, so this is easy.. */
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
|
||||||
|
|
||||||
|
@ -850,13 +850,13 @@ void __init mem_init(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||||
|
@ -777,11 +777,11 @@ static void update_end_of_memory_vars(u64 start, u64 size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||||
struct vmem_altmap *altmap, bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
WARN_ON_ONCE(ret);
|
WARN_ON_ONCE(ret);
|
||||||
|
|
||||||
/* update max_pfn, max_low_pfn and high_memory */
|
/* update max_pfn, max_low_pfn and high_memory */
|
||||||
@ -791,15 +791,15 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
bool want_memblock)
|
struct mhp_restrictions *restrictions)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||||
|
|
||||||
init_memory_mapping(start, start + size);
|
init_memory_mapping(start, start + size);
|
||||||
|
|
||||||
return add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PAGE_INUSE 0xFD
|
#define PAGE_INUSE 0xFD
|
||||||
|
@ -53,6 +53,16 @@ enum {
|
|||||||
MMOP_ONLINE_MOVABLE,
|
MMOP_ONLINE_MOVABLE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Restrictions for the memory hotplug:
|
||||||
|
* flags: MHP_ flags
|
||||||
|
* altmap: alternative allocator for memmap array
|
||||||
|
*/
|
||||||
|
struct mhp_restrictions {
|
||||||
|
unsigned long flags;
|
||||||
|
struct vmem_altmap *altmap;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Zone resizing functions
|
* Zone resizing functions
|
||||||
*
|
*
|
||||||
@ -101,6 +111,8 @@ extern void __online_page_free(struct page *page);
|
|||||||
|
|
||||||
extern int try_online_node(int nid);
|
extern int try_online_node(int nid);
|
||||||
|
|
||||||
|
extern int arch_add_memory(int nid, u64 start, u64 size,
|
||||||
|
struct mhp_restrictions *restrictions);
|
||||||
extern u64 max_mem_size;
|
extern u64 max_mem_size;
|
||||||
|
|
||||||
extern bool memhp_auto_online;
|
extern bool memhp_auto_online;
|
||||||
@ -118,20 +130,27 @@ extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
|
|||||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||||
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do we want sysfs memblock files created. This will allow userspace to online
|
||||||
|
* and offline memory explicitly. Lack of this bit means that the caller has to
|
||||||
|
* call move_pfn_range_to_zone to finish the initialization.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define MHP_MEMBLOCK_API (1<<0)
|
||||||
|
|
||||||
/* reasonably generic interface to expand the physical pages */
|
/* reasonably generic interface to expand the physical pages */
|
||||||
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||||
struct vmem_altmap *altmap, bool want_memblock);
|
struct mhp_restrictions *restrictions);
|
||||||
|
|
||||||
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
|
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
|
||||||
static inline int add_pages(int nid, unsigned long start_pfn,
|
static inline int add_pages(int nid, unsigned long start_pfn,
|
||||||
unsigned long nr_pages, struct vmem_altmap *altmap,
|
unsigned long nr_pages, struct mhp_restrictions *restrictions)
|
||||||
bool want_memblock)
|
|
||||||
{
|
{
|
||||||
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
|
return __add_pages(nid, start_pfn, nr_pages, restrictions);
|
||||||
}
|
}
|
||||||
#else /* ARCH_HAS_ADD_PAGES */
|
#else /* ARCH_HAS_ADD_PAGES */
|
||||||
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
|
||||||
struct vmem_altmap *altmap, bool want_memblock);
|
struct mhp_restrictions *restrictions);
|
||||||
#endif /* ARCH_HAS_ADD_PAGES */
|
#endif /* ARCH_HAS_ADD_PAGES */
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
@ -332,8 +351,6 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
|
|||||||
extern int __add_memory(int nid, u64 start, u64 size);
|
extern int __add_memory(int nid, u64 start, u64 size);
|
||||||
extern int add_memory(int nid, u64 start, u64 size);
|
extern int add_memory(int nid, u64 start, u64 size);
|
||||||
extern int add_memory_resource(int nid, struct resource *resource);
|
extern int add_memory_resource(int nid, struct resource *resource);
|
||||||
extern int arch_add_memory(int nid, u64 start, u64 size,
|
|
||||||
struct vmem_altmap *altmap, bool want_memblock);
|
|
||||||
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
|
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
|
||||||
unsigned long nr_pages, struct vmem_altmap *altmap);
|
unsigned long nr_pages, struct vmem_altmap *altmap);
|
||||||
extern bool is_memblock_offlined(struct memory_block *mem);
|
extern bool is_memblock_offlined(struct memory_block *mem);
|
||||||
|
@ -148,6 +148,12 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||||||
&pgmap->altmap : NULL;
|
&pgmap->altmap : NULL;
|
||||||
struct resource *res = &pgmap->res;
|
struct resource *res = &pgmap->res;
|
||||||
struct dev_pagemap *conflict_pgmap;
|
struct dev_pagemap *conflict_pgmap;
|
||||||
|
struct mhp_restrictions restrictions = {
|
||||||
|
/*
|
||||||
|
* We do not want any optional features only our own memmap
|
||||||
|
*/
|
||||||
|
.altmap = altmap,
|
||||||
|
};
|
||||||
pgprot_t pgprot = PAGE_KERNEL;
|
pgprot_t pgprot = PAGE_KERNEL;
|
||||||
int error, nid, is_ram;
|
int error, nid, is_ram;
|
||||||
|
|
||||||
@ -214,7 +220,7 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||||||
*/
|
*/
|
||||||
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
|
||||||
error = add_pages(nid, align_start >> PAGE_SHIFT,
|
error = add_pages(nid, align_start >> PAGE_SHIFT,
|
||||||
align_size >> PAGE_SHIFT, NULL, false);
|
align_size >> PAGE_SHIFT, &restrictions);
|
||||||
} else {
|
} else {
|
||||||
error = kasan_add_zero_shadow(__va(align_start), align_size);
|
error = kasan_add_zero_shadow(__va(align_start), align_size);
|
||||||
if (error) {
|
if (error) {
|
||||||
@ -222,8 +228,8 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
|||||||
goto err_kasan;
|
goto err_kasan;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = arch_add_memory(nid, align_start, align_size, altmap,
|
error = arch_add_memory(nid, align_start, align_size,
|
||||||
false);
|
&restrictions);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!error) {
|
if (!error) {
|
||||||
|
@ -273,12 +273,12 @@ static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
|
|||||||
* add the new pages.
|
* add the new pages.
|
||||||
*/
|
*/
|
||||||
int __ref __add_pages(int nid, unsigned long phys_start_pfn,
|
int __ref __add_pages(int nid, unsigned long phys_start_pfn,
|
||||||
unsigned long nr_pages, struct vmem_altmap *altmap,
|
unsigned long nr_pages, struct mhp_restrictions *restrictions)
|
||||||
bool want_memblock)
|
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int start_sec, end_sec;
|
int start_sec, end_sec;
|
||||||
|
struct vmem_altmap *altmap = restrictions->altmap;
|
||||||
|
|
||||||
/* during initialize mem_map, align hot-added range to section */
|
/* during initialize mem_map, align hot-added range to section */
|
||||||
start_sec = pfn_to_section_nr(phys_start_pfn);
|
start_sec = pfn_to_section_nr(phys_start_pfn);
|
||||||
@ -299,7 +299,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
|
|||||||
|
|
||||||
for (i = start_sec; i <= end_sec; i++) {
|
for (i = start_sec; i <= end_sec; i++) {
|
||||||
err = __add_section(nid, section_nr_to_pfn(i), altmap,
|
err = __add_section(nid, section_nr_to_pfn(i), altmap,
|
||||||
want_memblock);
|
restrictions->flags & MHP_MEMBLOCK_API);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* EEXIST is finally dealt with by ioresource collision
|
* EEXIST is finally dealt with by ioresource collision
|
||||||
@ -1097,6 +1097,9 @@ static int online_memory_block(struct memory_block *mem, void *arg)
|
|||||||
*/
|
*/
|
||||||
int __ref add_memory_resource(int nid, struct resource *res)
|
int __ref add_memory_resource(int nid, struct resource *res)
|
||||||
{
|
{
|
||||||
|
struct mhp_restrictions restrictions = {
|
||||||
|
.flags = MHP_MEMBLOCK_API,
|
||||||
|
};
|
||||||
u64 start, size;
|
u64 start, size;
|
||||||
bool new_node = false;
|
bool new_node = false;
|
||||||
int ret;
|
int ret;
|
||||||
@ -1124,7 +1127,7 @@ int __ref add_memory_resource(int nid, struct resource *res)
|
|||||||
new_node = ret;
|
new_node = ret;
|
||||||
|
|
||||||
/* call arch's memory hotadd */
|
/* call arch's memory hotadd */
|
||||||
ret = arch_add_memory(nid, start, size, NULL, true);
|
ret = arch_add_memory(nid, start, size, &restrictions);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user