2005-10-30 04:16:54 +03:00
/*
* linux / mm / memory_hotplug . c
*
* Copyright ( C )
*/
# include <linux/stddef.h>
# include <linux/mm.h>
# include <linux/swap.h>
# include <linux/interrupt.h>
# include <linux/pagemap.h>
# include <linux/bootmem.h>
# include <linux/compiler.h>
2011-10-16 10:01:52 +04:00
# include <linux/export.h>
2005-10-30 04:16:54 +03:00
# include <linux/pagevec.h>
2006-09-29 13:01:25 +04:00
# include <linux/writeback.h>
2005-10-30 04:16:54 +03:00
# include <linux/slab.h>
# include <linux/sysctl.h>
# include <linux/cpu.h>
# include <linux/memory.h>
# include <linux/memory_hotplug.h>
# include <linux/highmem.h>
# include <linux/vmalloc.h>
2006-06-27 13:53:35 +04:00
# include <linux/ioport.h>
2007-10-16 12:26:12 +04:00
# include <linux/delay.h>
# include <linux/migrate.h>
# include <linux/page-isolation.h>
2008-10-19 07:25:58 +04:00
# include <linux/pfn.h>
2009-11-18 01:06:22 +03:00
# include <linux/suspend.h>
2009-12-15 04:58:11 +03:00
# include <linux/mm_inline.h>
2010-03-06 00:41:58 +03:00
# include <linux/firmware-map.h>
2013-02-23 04:33:14 +04:00
# include <linux/stop_machine.h>
2013-09-12 01:22:09 +04:00
# include <linux/hugetlb.h>
mem-hotplug: introduce movable_node boot option
The hot-Pluggable field in SRAT specifies which memory is hotpluggable.
As we mentioned before, if hotpluggable memory is used by the kernel, it
cannot be hot-removed. So memory hotplug users may want to set all
hotpluggable memory in ZONE_MOVABLE so that the kernel won't use it.
Memory hotplug users may also set a node as movable node, which has
ZONE_MOVABLE only, so that the whole node can be hot-removed.
But the kernel cannot use memory in ZONE_MOVABLE. By doing this, the
kernel cannot use memory in movable nodes. This will cause NUMA
performance down. And other users may be unhappy.
So we need a way to allow users to enable and disable this functionality.
In this patch, we introduce movable_node boot option to allow users to
choose to not to consume hotpluggable memory at early boot time and later
we can set it as ZONE_MOVABLE.
To achieve this, the movable_node boot option will control the memblock
allocation direction. That said, after memblock is ready, before SRAT is
parsed, we should allocate memory near the kernel image as we explained in
the previous patches. So if movable_node boot option is set, the kernel
does the following:
1. After memblock is ready, make memblock allocate memory bottom up.
2. After SRAT is parsed, make memblock behave as default, allocate memory
top down.
Users can specify "movable_node" in kernel commandline to enable this
functionality. For those who don't use memory hotplug or who don't want
to lose their NUMA performance, just don't specify anything. The kernel
will work as before.
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Suggested-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Suggested-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Toshi Kani <toshi.kani@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Thomas Renninger <trenn@suse.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-11-13 03:08:10 +04:00
# include <linux/memblock.h>
2005-10-30 04:16:54 +03:00
# include <asm/tlbflush.h>
2008-04-28 21:40:08 +04:00
# include "internal.h"
2011-07-26 04:12:05 +04:00
/*
* online_page_callback contains pointer to current page onlining function .
* Initially it is generic_online_page ( ) . If it is required it could be
* changed by calling set_online_page_callback ( ) for callback registration
* and restore_online_page_callback ( ) for generic callback restore .
*/
static void generic_online_page ( struct page * page ) ;
static online_page_callback_t online_page_callback = generic_online_page ;
2010-12-03 01:31:19 +03:00
DEFINE_MUTEX ( mem_hotplug_mutex ) ;
void lock_memory_hotplug ( void )
{
mutex_lock ( & mem_hotplug_mutex ) ;
}
void unlock_memory_hotplug ( void )
{
mutex_unlock ( & mem_hotplug_mutex ) ;
}
2006-10-01 10:27:09 +04:00
/* add this memory to iomem resource */
static struct resource * register_memory_resource ( u64 start , u64 size )
{
struct resource * res ;
res = kzalloc ( sizeof ( struct resource ) , GFP_KERNEL ) ;
BUG_ON ( ! res ) ;
res - > name = " System RAM " ;
res - > start = start ;
res - > end = start + size - 1 ;
2007-11-15 03:59:20 +03:00
res - > flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
2006-10-01 10:27:09 +04:00
if ( request_resource ( & iomem_resource , res ) < 0 ) {
2013-07-04 02:02:39 +04:00
pr_debug ( " System RAM resource %pR cannot be added \n " , res ) ;
2006-10-01 10:27:09 +04:00
kfree ( res ) ;
res = NULL ;
}
return res ;
}
static void release_memory_resource ( struct resource * res )
{
if ( ! res )
return ;
release_resource ( res ) ;
kfree ( res ) ;
return ;
}
2006-10-01 10:27:08 +04:00
# ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
2013-02-23 04:33:00 +04:00
void get_page_bootmem ( unsigned long info , struct page * page ,
unsigned long type )
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
{
2011-01-14 02:47:00 +03:00
page - > lru . next = ( struct list_head * ) type ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
SetPagePrivate ( page ) ;
set_page_private ( page , info ) ;
atomic_inc ( & page - > _count ) ;
}
2013-07-04 02:03:17 +04:00
void put_page_bootmem ( struct page * page )
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
{
2011-01-14 02:47:00 +03:00
unsigned long type ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
2011-01-14 02:47:00 +03:00
type = ( unsigned long ) page - > lru . next ;
BUG_ON ( type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE | |
type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE ) ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
if ( atomic_dec_return ( & page - > _count ) = = 1 ) {
ClearPagePrivate ( page ) ;
set_page_private ( page , 0 ) ;
2011-01-14 02:47:00 +03:00
INIT_LIST_HEAD ( & page - > lru ) ;
2013-07-04 02:03:17 +04:00
free_reserved_page ( page ) ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
}
}
2013-02-23 04:33:00 +04:00
# ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
# ifndef CONFIG_SPARSEMEM_VMEMMAP
2008-07-24 08:28:12 +04:00
static void register_page_bootmem_info_section ( unsigned long start_pfn )
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
{
unsigned long * usemap , mapsize , section_nr , i ;
struct mem_section * ms ;
struct page * page , * memmap ;
section_nr = pfn_to_section_nr ( start_pfn ) ;
ms = __nr_to_section ( section_nr ) ;
/* Get section's memmap address */
memmap = sparse_decode_mem_map ( ms - > section_mem_map , section_nr ) ;
/*
* Get page for the memmap ' s phys address
* XXX : need more consideration for sparse_vmemmap . . .
*/
page = virt_to_page ( memmap ) ;
mapsize = sizeof ( struct page ) * PAGES_PER_SECTION ;
mapsize = PAGE_ALIGN ( mapsize ) > > PAGE_SHIFT ;
/* remember memmap's page */
for ( i = 0 ; i < mapsize ; i + + , page + + )
get_page_bootmem ( section_nr , page , SECTION_INFO ) ;
usemap = __nr_to_section ( section_nr ) - > pageblock_flags ;
page = virt_to_page ( usemap ) ;
mapsize = PAGE_ALIGN ( usemap_size ( ) ) > > PAGE_SHIFT ;
for ( i = 0 ; i < mapsize ; i + + , page + + )
2008-07-24 08:28:17 +04:00
get_page_bootmem ( section_nr , page , MIX_SECTION_INFO ) ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
}
2013-02-23 04:33:00 +04:00
# else /* CONFIG_SPARSEMEM_VMEMMAP */
static void register_page_bootmem_info_section ( unsigned long start_pfn )
{
unsigned long * usemap , mapsize , section_nr , i ;
struct mem_section * ms ;
struct page * page , * memmap ;
if ( ! pfn_valid ( start_pfn ) )
return ;
section_nr = pfn_to_section_nr ( start_pfn ) ;
ms = __nr_to_section ( section_nr ) ;
memmap = sparse_decode_mem_map ( ms - > section_mem_map , section_nr ) ;
register_page_bootmem_memmap ( section_nr , memmap , PAGES_PER_SECTION ) ;
usemap = __nr_to_section ( section_nr ) - > pageblock_flags ;
page = virt_to_page ( usemap ) ;
mapsize = PAGE_ALIGN ( usemap_size ( ) ) > > PAGE_SHIFT ;
for ( i = 0 ; i < mapsize ; i + + , page + + )
get_page_bootmem ( section_nr , page , MIX_SECTION_INFO ) ;
}
# endif /* !CONFIG_SPARSEMEM_VMEMMAP */
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
void register_page_bootmem_info_node ( struct pglist_data * pgdat )
{
unsigned long i , pfn , end_pfn , nr_pages ;
int node = pgdat - > node_id ;
struct page * page ;
struct zone * zone ;
nr_pages = PAGE_ALIGN ( sizeof ( struct pglist_data ) ) > > PAGE_SHIFT ;
page = virt_to_page ( pgdat ) ;
for ( i = 0 ; i < nr_pages ; i + + , page + + )
get_page_bootmem ( node , page , NODE_INFO ) ;
zone = & pgdat - > node_zones [ 0 ] ;
for ( ; zone < pgdat - > node_zones + MAX_NR_ZONES - 1 ; zone + + ) {
2013-09-12 01:21:46 +04:00
if ( zone_is_initialized ( zone ) ) {
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
nr_pages = zone - > wait_table_hash_nr_entries
* sizeof ( wait_queue_head_t ) ;
nr_pages = PAGE_ALIGN ( nr_pages ) > > PAGE_SHIFT ;
page = virt_to_page ( zone - > wait_table ) ;
for ( i = 0 ; i < nr_pages ; i + + , page + + )
get_page_bootmem ( node , page , NODE_INFO ) ;
}
}
pfn = pgdat - > node_start_pfn ;
2013-02-23 04:35:32 +04:00
end_pfn = pgdat_end_pfn ( pgdat ) ;
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
2013-07-09 03:00:23 +04:00
/* register section info */
memory hotplug: fix section info double registration bug
There may be a bug when registering section info. For example, on my
Itanium platform, the pfn range of node0 includes the other nodes, so
other nodes' section info will be double registered, and memmap's page
count will equal to 3.
node0: start_pfn=0x100, spanned_pfn=0x20fb00, present_pfn=0x7f8a3, => 0x000100-0x20fc00
node1: start_pfn=0x80000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x080000-0x100000
node2: start_pfn=0x100000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x100000-0x180000
node3: start_pfn=0x180000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x180000-0x200000
free_all_bootmem_node()
register_page_bootmem_info_node()
register_page_bootmem_info_section()
When hot remove memory, we can't free the memmap's page because
page_count() is 2 after put_page_bootmem().
sparse_remove_one_section()
free_section_usemap()
free_map_bootmem()
put_page_bootmem()
[akpm@linux-foundation.org: add code comment]
Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-09-18 01:09:24 +04:00
for ( ; pfn < end_pfn ; pfn + = PAGES_PER_SECTION ) {
/*
* Some platforms can assign the same pfn to multiple nodes - on
* node0 as well as nodeN . To avoid registering a pfn against
* multiple nodes we check that this pfn does not already
2013-07-09 03:00:23 +04:00
* reside in some other nodes .
memory hotplug: fix section info double registration bug
There may be a bug when registering section info. For example, on my
Itanium platform, the pfn range of node0 includes the other nodes, so
other nodes' section info will be double registered, and memmap's page
count will equal to 3.
node0: start_pfn=0x100, spanned_pfn=0x20fb00, present_pfn=0x7f8a3, => 0x000100-0x20fc00
node1: start_pfn=0x80000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x080000-0x100000
node2: start_pfn=0x100000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x100000-0x180000
node3: start_pfn=0x180000, spanned_pfn=0x80000, present_pfn=0x80000, => 0x180000-0x200000
free_all_bootmem_node()
register_page_bootmem_info_node()
register_page_bootmem_info_section()
When hot remove memory, we can't free the memmap's page because
page_count() is 2 after put_page_bootmem().
sparse_remove_one_section()
free_section_usemap()
free_map_bootmem()
put_page_bootmem()
[akpm@linux-foundation.org: add code comment]
Signed-off-by: Xishi Qiu <qiuxishi@huawei.com>
Signed-off-by: Jiang Liu <jiang.liu@huawei.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-09-18 01:09:24 +04:00
*/
if ( pfn_valid ( pfn ) & & ( pfn_to_nid ( pfn ) = = node ) )
register_page_bootmem_info_section ( pfn ) ;
}
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
}
2013-02-23 04:33:00 +04:00
# endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */
memory hotplug: register section/node id to free
This patch set is to free pages which is allocated by bootmem for
memory-hotremove. Some structures of memory management are allocated by
bootmem. ex) memmap, etc.
To remove memory physically, some of them must be freed according to
circumstance. This patch set makes basis to free those pages, and free
memmaps.
Basic my idea is using remain members of struct page to remember information
of users of bootmem (section number or node id). When the section is
removing, kernel can confirm it. By this information, some issues can be
solved.
1) When the memmap of removing section is allocated on other
section by bootmem, it should/can be free.
2) When the memmap of removing section is allocated on the
same section, it shouldn't be freed. Because the section has to be
logical memory offlined already and all pages must be isolated against
page allocater. If it is freed, page allocator may use it which will
be removed physically soon.
3) When removing section has other section's memmap,
kernel will be able to show easily which section should be removed
before it for user. (Not implemented yet)
4) When the above case 2), the page isolation will be able to check and skip
memmap's page when logical memory offline (offline_pages()).
Current page isolation code fails in this case because this page is
just reserved page and it can't distinguish this pages can be
removed or not. But, it will be able to do by this patch.
(Not implemented yet.)
5) The node information like pgdat has similar issues. But, this
will be able to be solved too by this.
(Not implemented yet, but, remembering node id in the pages.)
Fortunately, current bootmem allocator just keeps PageReserved flags,
and doesn't use any other members of page struct. The users of
bootmem doesn't use them too.
This patch:
This is to register information which is node or section's id. Kernel can
distinguish which node/section uses the pages allcated by bootmem. This is
basis for hot-remove sections or nodes.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Badari Pulavarty <pbadari@us.ibm.com>
Cc: Yinghai Lu <yhlu.kernel@gmail.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-04-28 13:13:31 +04:00
2008-05-15 03:05:52 +04:00
static void grow_zone_span ( struct zone * zone , unsigned long start_pfn ,
unsigned long end_pfn )
{
unsigned long old_zone_end_pfn ;
zone_span_writelock ( zone ) ;
2013-09-12 01:21:44 +04:00
old_zone_end_pfn = zone_end_pfn ( zone ) ;
2013-09-12 01:21:45 +04:00
if ( zone_is_empty ( zone ) | | start_pfn < zone - > zone_start_pfn )
2008-05-15 03:05:52 +04:00
zone - > zone_start_pfn = start_pfn ;
zone - > spanned_pages = max ( old_zone_end_pfn , end_pfn ) -
zone - > zone_start_pfn ;
zone_span_writeunlock ( zone ) ;
}
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
static void resize_zone ( struct zone * zone , unsigned long start_pfn ,
unsigned long end_pfn )
{
zone_span_writelock ( zone ) ;
2012-12-12 04:03:20 +04:00
if ( end_pfn - start_pfn ) {
zone - > zone_start_pfn = start_pfn ;
zone - > spanned_pages = end_pfn - start_pfn ;
} else {
/*
* make it consist as free_area_init_core ( ) ,
* if spanned_pages = 0 , then keep start_pfn = 0
*/
zone - > zone_start_pfn = 0 ;
zone - > spanned_pages = 0 ;
}
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
zone_span_writeunlock ( zone ) ;
}
static void fix_zone_id ( struct zone * zone , unsigned long start_pfn ,
unsigned long end_pfn )
{
enum zone_type zid = zone_idx ( zone ) ;
int nid = zone - > zone_pgdat - > node_id ;
unsigned long pfn ;
for ( pfn = start_pfn ; pfn < end_pfn ; pfn + + )
set_page_links ( pfn_to_page ( pfn ) , zid , nid , pfn ) ;
}
2013-02-23 04:35:30 +04:00
/* Can fail with -ENOMEM from allocating a wait table with vmalloc() or
* alloc_bootmem_node_nopanic ( ) */
static int __ref ensure_zone_is_initialized ( struct zone * zone ,
unsigned long start_pfn , unsigned long num_pages )
{
if ( ! zone_is_initialized ( zone ) )
return init_currently_empty_zone ( zone , start_pfn , num_pages ,
MEMMAP_HOTPLUG ) ;
return 0 ;
}
2012-12-12 04:03:20 +04:00
static int __meminit move_pfn_range_left ( struct zone * z1 , struct zone * z2 ,
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
unsigned long start_pfn , unsigned long end_pfn )
{
2012-12-12 04:03:20 +04:00
int ret ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
unsigned long flags ;
2012-12-12 04:03:20 +04:00
unsigned long z1_start_pfn ;
2013-02-23 04:35:31 +04:00
ret = ensure_zone_is_initialized ( z1 , start_pfn , end_pfn - start_pfn ) ;
if ( ret )
return ret ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
pgdat_resize_lock ( z1 - > zone_pgdat , & flags ) ;
/* can't move pfns which are higher than @z2 */
2013-02-23 04:35:23 +04:00
if ( end_pfn > zone_end_pfn ( z2 ) )
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
goto out_fail ;
2013-07-04 02:03:04 +04:00
/* the move out part must be at the left most of @z2 */
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
if ( start_pfn > z2 - > zone_start_pfn )
goto out_fail ;
/* must included/overlap */
if ( end_pfn < = z2 - > zone_start_pfn )
goto out_fail ;
2012-12-12 04:03:20 +04:00
/* use start_pfn for z1's start_pfn if z1 is empty */
2013-09-12 01:21:45 +04:00
if ( ! zone_is_empty ( z1 ) )
2012-12-12 04:03:20 +04:00
z1_start_pfn = z1 - > zone_start_pfn ;
else
z1_start_pfn = start_pfn ;
resize_zone ( z1 , z1_start_pfn , end_pfn ) ;
2013-02-23 04:35:23 +04:00
resize_zone ( z2 , end_pfn , zone_end_pfn ( z2 ) ) ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
pgdat_resize_unlock ( z1 - > zone_pgdat , & flags ) ;
fix_zone_id ( z1 , start_pfn , end_pfn ) ;
return 0 ;
out_fail :
pgdat_resize_unlock ( z1 - > zone_pgdat , & flags ) ;
return - 1 ;
}
2012-12-12 04:03:20 +04:00
static int __meminit move_pfn_range_right ( struct zone * z1 , struct zone * z2 ,
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
unsigned long start_pfn , unsigned long end_pfn )
{
2012-12-12 04:03:20 +04:00
int ret ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
unsigned long flags ;
2012-12-12 04:03:20 +04:00
unsigned long z2_end_pfn ;
2013-02-23 04:35:31 +04:00
ret = ensure_zone_is_initialized ( z2 , start_pfn , end_pfn - start_pfn ) ;
if ( ret )
return ret ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
pgdat_resize_lock ( z1 - > zone_pgdat , & flags ) ;
/* can't move pfns which are lower than @z1 */
if ( z1 - > zone_start_pfn > start_pfn )
goto out_fail ;
/* the move out part mast at the right most of @z1 */
2013-02-23 04:35:23 +04:00
if ( zone_end_pfn ( z1 ) > end_pfn )
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
goto out_fail ;
/* must included/overlap */
2013-02-23 04:35:23 +04:00
if ( start_pfn > = zone_end_pfn ( z1 ) )
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
goto out_fail ;
2012-12-12 04:03:20 +04:00
/* use end_pfn for z2's end_pfn if z2 is empty */
2013-09-12 01:21:45 +04:00
if ( ! zone_is_empty ( z2 ) )
2013-02-23 04:35:23 +04:00
z2_end_pfn = zone_end_pfn ( z2 ) ;
2012-12-12 04:03:20 +04:00
else
z2_end_pfn = end_pfn ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
resize_zone ( z1 , z1 - > zone_start_pfn , start_pfn ) ;
2012-12-12 04:03:20 +04:00
resize_zone ( z2 , start_pfn , z2_end_pfn ) ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
pgdat_resize_unlock ( z1 - > zone_pgdat , & flags ) ;
fix_zone_id ( z2 , start_pfn , end_pfn ) ;
return 0 ;
out_fail :
pgdat_resize_unlock ( z1 - > zone_pgdat , & flags ) ;
return - 1 ;
}
2008-05-15 03:05:52 +04:00
static void grow_pgdat_span ( struct pglist_data * pgdat , unsigned long start_pfn ,
unsigned long end_pfn )
{
2013-11-13 03:07:19 +04:00
unsigned long old_pgdat_end_pfn = pgdat_end_pfn ( pgdat ) ;
2008-05-15 03:05:52 +04:00
2012-12-12 04:01:07 +04:00
if ( ! pgdat - > node_spanned_pages | | start_pfn < pgdat - > node_start_pfn )
2008-05-15 03:05:52 +04:00
pgdat - > node_start_pfn = start_pfn ;
pgdat - > node_spanned_pages = max ( old_pgdat_end_pfn , end_pfn ) -
pgdat - > node_start_pfn ;
}
2008-11-22 20:33:24 +03:00
static int __meminit __add_zone ( struct zone * zone , unsigned long phys_start_pfn )
2005-10-30 04:16:54 +03:00
{
struct pglist_data * pgdat = zone - > zone_pgdat ;
int nr_pages = PAGES_PER_SECTION ;
int nid = pgdat - > node_id ;
int zone_type ;
2008-05-15 03:05:52 +04:00
unsigned long flags ;
2013-02-23 04:35:31 +04:00
int ret ;
2005-10-30 04:16:54 +03:00
zone_type = zone - pgdat - > node_zones ;
2013-02-23 04:35:31 +04:00
ret = ensure_zone_is_initialized ( zone , phys_start_pfn , nr_pages ) ;
if ( ret )
return ret ;
2008-05-15 03:05:52 +04:00
pgdat_resize_lock ( zone - > zone_pgdat , & flags ) ;
grow_zone_span ( zone , phys_start_pfn , phys_start_pfn + nr_pages ) ;
grow_pgdat_span ( zone - > zone_pgdat , phys_start_pfn ,
phys_start_pfn + nr_pages ) ;
pgdat_resize_unlock ( zone - > zone_pgdat , & flags ) ;
2007-01-11 10:15:30 +03:00
memmap_init_zone ( nr_pages , nid , zone_type ,
phys_start_pfn , MEMMAP_HOTPLUG ) ;
2006-06-23 13:03:10 +04:00
return 0 ;
2005-10-30 04:16:54 +03:00
}
2009-01-07 01:39:14 +03:00
static int __meminit __add_section ( int nid , struct zone * zone ,
unsigned long phys_start_pfn )
2005-10-30 04:16:54 +03:00
{
int ret ;
2006-08-05 23:15:06 +04:00
if ( pfn_valid ( phys_start_pfn ) )
return - EEXIST ;
2013-11-13 03:07:42 +04:00
ret = sparse_add_one_section ( zone , phys_start_pfn ) ;
2005-10-30 04:16:54 +03:00
if ( ret < 0 )
return ret ;
2006-06-23 13:03:10 +04:00
ret = __add_zone ( zone , phys_start_pfn ) ;
if ( ret < 0 )
return ret ;
2009-01-07 01:39:14 +03:00
return register_new_memory ( nid , __pfn_to_section ( phys_start_pfn ) ) ;
2005-10-30 04:16:54 +03:00
}
2013-04-30 02:08:22 +04:00
/*
* Reasonably generic function for adding memory . It is
* expected that archs that support memory hotplug will
* call this function after deciding the zone to which to
* add the new pages .
*/
int __ref __add_pages ( int nid , struct zone * zone , unsigned long phys_start_pfn ,
unsigned long nr_pages )
{
unsigned long i ;
int err = 0 ;
int start_sec , end_sec ;
/* during initialize mem_map, align hot-added range to section */
start_sec = pfn_to_section_nr ( phys_start_pfn ) ;
end_sec = pfn_to_section_nr ( phys_start_pfn + nr_pages - 1 ) ;
for ( i = start_sec ; i < = end_sec ; i + + ) {
err = __add_section ( nid , zone , i < < PFN_SECTION_SHIFT ) ;
/*
* EEXIST is finally dealt with by ioresource collision
* check . see add_memory ( ) = > register_memory_resource ( )
* Warning will be printed if there is collision .
*/
if ( err & & ( err ! = - EEXIST ) )
break ;
err = 0 ;
}
return err ;
}
EXPORT_SYMBOL_GPL ( __add_pages ) ;
# ifdef CONFIG_MEMORY_HOTREMOVE
2013-02-23 04:33:12 +04:00
/* find the smallest valid pfn in the range [start_pfn, end_pfn) */
static int find_smallest_section_pfn ( int nid , struct zone * zone ,
unsigned long start_pfn ,
unsigned long end_pfn )
{
struct mem_section * ms ;
for ( ; start_pfn < end_pfn ; start_pfn + = PAGES_PER_SECTION ) {
ms = __pfn_to_section ( start_pfn ) ;
if ( unlikely ( ! valid_section ( ms ) ) )
continue ;
if ( unlikely ( pfn_to_nid ( start_pfn ) ! = nid ) )
continue ;
if ( zone & & zone ! = page_zone ( pfn_to_page ( start_pfn ) ) )
continue ;
return start_pfn ;
}
return 0 ;
}
/* find the biggest valid pfn in the range [start_pfn, end_pfn). */
static int find_biggest_section_pfn ( int nid , struct zone * zone ,
unsigned long start_pfn ,
unsigned long end_pfn )
{
struct mem_section * ms ;
unsigned long pfn ;
/* pfn is the end pfn of a memory section. */
pfn = end_pfn - 1 ;
for ( ; pfn > = start_pfn ; pfn - = PAGES_PER_SECTION ) {
ms = __pfn_to_section ( pfn ) ;
if ( unlikely ( ! valid_section ( ms ) ) )
continue ;
if ( unlikely ( pfn_to_nid ( pfn ) ! = nid ) )
continue ;
if ( zone & & zone ! = page_zone ( pfn_to_page ( pfn ) ) )
continue ;
return pfn ;
}
return 0 ;
}
static void shrink_zone_span ( struct zone * zone , unsigned long start_pfn ,
unsigned long end_pfn )
{
2013-09-12 01:21:44 +04:00
unsigned long zone_start_pfn = zone - > zone_start_pfn ;
unsigned long z = zone_end_pfn ( zone ) ; /* zone_end_pfn namespace clash */
unsigned long zone_end_pfn = z ;
2013-02-23 04:33:12 +04:00
unsigned long pfn ;
struct mem_section * ms ;
int nid = zone_to_nid ( zone ) ;
zone_span_writelock ( zone ) ;
if ( zone_start_pfn = = start_pfn ) {
/*
* If the section is smallest section in the zone , it need
* shrink zone - > zone_start_pfn and zone - > zone_spanned_pages .
* In this case , we find second smallest valid mem_section
* for shrinking zone .
*/
pfn = find_smallest_section_pfn ( nid , zone , end_pfn ,
zone_end_pfn ) ;
if ( pfn ) {
zone - > zone_start_pfn = pfn ;
zone - > spanned_pages = zone_end_pfn - pfn ;
}
} else if ( zone_end_pfn = = end_pfn ) {
/*
* If the section is biggest section in the zone , it need
* shrink zone - > spanned_pages .
* In this case , we find second biggest valid mem_section for
* shrinking zone .
*/
pfn = find_biggest_section_pfn ( nid , zone , zone_start_pfn ,
start_pfn ) ;
if ( pfn )
zone - > spanned_pages = pfn - zone_start_pfn + 1 ;
}
/*
* The section is not biggest or smallest mem_section in the zone , it
* only creates a hole in the zone . So in this case , we need not
* change the zone . But perhaps , the zone has only hole data . Thus
* it check the zone has only hole or not .
*/
pfn = zone_start_pfn ;
for ( ; pfn < zone_end_pfn ; pfn + = PAGES_PER_SECTION ) {
ms = __pfn_to_section ( pfn ) ;
if ( unlikely ( ! valid_section ( ms ) ) )
continue ;
if ( page_zone ( pfn_to_page ( pfn ) ) ! = zone )
continue ;
/* If the section is current section, it continues the loop */
if ( start_pfn = = pfn )
continue ;
/* If we find valid section, we have nothing to do */
zone_span_writeunlock ( zone ) ;
return ;
}
/* The zone has no valid section */
zone - > zone_start_pfn = 0 ;
zone - > spanned_pages = 0 ;
zone_span_writeunlock ( zone ) ;
}
static void shrink_pgdat_span ( struct pglist_data * pgdat ,
unsigned long start_pfn , unsigned long end_pfn )
{
2013-11-13 03:07:19 +04:00
unsigned long pgdat_start_pfn = pgdat - > node_start_pfn ;
unsigned long p = pgdat_end_pfn ( pgdat ) ; /* pgdat_end_pfn namespace clash */
unsigned long pgdat_end_pfn = p ;
2013-02-23 04:33:12 +04:00
unsigned long pfn ;
struct mem_section * ms ;
int nid = pgdat - > node_id ;
if ( pgdat_start_pfn = = start_pfn ) {
/*
* If the section is smallest section in the pgdat , it need
* shrink pgdat - > node_start_pfn and pgdat - > node_spanned_pages .
* In this case , we find second smallest valid mem_section
* for shrinking zone .
*/
pfn = find_smallest_section_pfn ( nid , NULL , end_pfn ,
pgdat_end_pfn ) ;
if ( pfn ) {
pgdat - > node_start_pfn = pfn ;
pgdat - > node_spanned_pages = pgdat_end_pfn - pfn ;
}
} else if ( pgdat_end_pfn = = end_pfn ) {
/*
* If the section is biggest section in the pgdat , it need
* shrink pgdat - > node_spanned_pages .
* In this case , we find second biggest valid mem_section for
* shrinking zone .
*/
pfn = find_biggest_section_pfn ( nid , NULL , pgdat_start_pfn ,
start_pfn ) ;
if ( pfn )
pgdat - > node_spanned_pages = pfn - pgdat_start_pfn + 1 ;
}
/*
* If the section is not biggest or smallest mem_section in the pgdat ,
* it only creates a hole in the pgdat . So in this case , we need not
* change the pgdat .
* But perhaps , the pgdat has only hole data . Thus it check the pgdat
* has only hole or not .
*/
pfn = pgdat_start_pfn ;
for ( ; pfn < pgdat_end_pfn ; pfn + = PAGES_PER_SECTION ) {
ms = __pfn_to_section ( pfn ) ;
if ( unlikely ( ! valid_section ( ms ) ) )
continue ;
if ( pfn_to_nid ( pfn ) ! = nid )
continue ;
/* If the section is current section, it continues the loop */
if ( start_pfn = = pfn )
continue ;
/* If we find valid section, we have nothing to do */
return ;
}
/* The pgdat has no valid section */
pgdat - > node_start_pfn = 0 ;
pgdat - > node_spanned_pages = 0 ;
}
static void __remove_zone ( struct zone * zone , unsigned long start_pfn )
{
struct pglist_data * pgdat = zone - > zone_pgdat ;
int nr_pages = PAGES_PER_SECTION ;
int zone_type ;
unsigned long flags ;
zone_type = zone - pgdat - > node_zones ;
pgdat_resize_lock ( zone - > zone_pgdat , & flags ) ;
shrink_zone_span ( zone , start_pfn , start_pfn + nr_pages ) ;
shrink_pgdat_span ( pgdat , start_pfn , start_pfn + nr_pages ) ;
pgdat_resize_unlock ( zone - > zone_pgdat , & flags ) ;
}
2008-04-28 13:12:01 +04:00
static int __remove_section ( struct zone * zone , struct mem_section * ms )
{
2013-02-23 04:33:12 +04:00
unsigned long start_pfn ;
int scn_nr ;
2008-04-28 13:12:01 +04:00
int ret = - EINVAL ;
if ( ! valid_section ( ms ) )
return ret ;
ret = unregister_memory_section ( ms ) ;
if ( ret )
return ret ;
2013-02-23 04:33:12 +04:00
scn_nr = __section_nr ( ms ) ;
start_pfn = section_nr_to_pfn ( scn_nr ) ;
__remove_zone ( zone , start_pfn ) ;
2008-04-28 13:12:01 +04:00
sparse_remove_one_section ( zone , ms ) ;
return 0 ;
}
/**
* __remove_pages ( ) - remove sections of pages from a zone
* @ zone : zone from which pages need to be removed
* @ phys_start_pfn : starting pageframe ( must be aligned to start of a section )
* @ nr_pages : number of pages to remove ( must be multiple of section size )
*
* Generic helper function to remove section mappings and sysfs entries
* for the section of the memory we are removing . Caller needs to make
* sure that pages are marked reserved and zones are adjust properly by
* calling offline_pages ( ) .
*/
int __remove_pages ( struct zone * zone , unsigned long phys_start_pfn ,
unsigned long nr_pages )
{
2013-04-30 02:08:20 +04:00
unsigned long i ;
2008-04-28 13:12:01 +04:00
int sections_to_remove ;
2013-04-30 02:08:20 +04:00
resource_size_t start , size ;
int ret = 0 ;
2008-04-28 13:12:01 +04:00
/*
* We can only remove entire sections
*/
BUG_ON ( phys_start_pfn & ~ PAGE_SECTION_MASK ) ;
BUG_ON ( nr_pages % PAGES_PER_SECTION ) ;
2013-04-30 02:08:20 +04:00
start = phys_start_pfn < < PAGE_SHIFT ;
size = nr_pages * PAGE_SIZE ;
ret = release_mem_region_adjustable ( & iomem_resource , start , size ) ;
2013-05-25 02:55:30 +04:00
if ( ret ) {
resource_size_t endres = start + size - 1 ;
pr_warn ( " Unable to release resource <%pa-%pa> (%d) \n " ,
& start , & endres , ret ) ;
}
2012-10-09 03:34:14 +04:00
2008-04-28 13:12:01 +04:00
sections_to_remove = nr_pages / PAGES_PER_SECTION ;
for ( i = 0 ; i < sections_to_remove ; i + + ) {
unsigned long pfn = phys_start_pfn + i * PAGES_PER_SECTION ;
ret = __remove_section ( zone , __pfn_to_section ( pfn ) ) ;
if ( ret )
break ;
}
return ret ;
}
EXPORT_SYMBOL_GPL ( __remove_pages ) ;
2013-04-30 02:08:22 +04:00
# endif /* CONFIG_MEMORY_HOTREMOVE */
2008-04-28 13:12:01 +04:00
2011-07-26 04:12:05 +04:00
int set_online_page_callback ( online_page_callback_t callback )
{
int rc = - EINVAL ;
lock_memory_hotplug ( ) ;
if ( online_page_callback = = generic_online_page ) {
online_page_callback = callback ;
rc = 0 ;
}
unlock_memory_hotplug ( ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( set_online_page_callback ) ;
int restore_online_page_callback ( online_page_callback_t callback )
{
int rc = - EINVAL ;
lock_memory_hotplug ( ) ;
if ( online_page_callback = = callback ) {
online_page_callback = generic_online_page ;
rc = 0 ;
}
unlock_memory_hotplug ( ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( restore_online_page_callback ) ;
void __online_page_set_limits ( struct page * page )
2008-04-28 13:12:03 +04:00
{
2011-07-26 04:12:05 +04:00
}
EXPORT_SYMBOL_GPL ( __online_page_set_limits ) ;
void __online_page_increment_counters ( struct page * page )
{
2013-07-04 02:03:21 +04:00
adjust_managed_page_count ( page , 1 ) ;
2011-07-26 04:12:05 +04:00
}
EXPORT_SYMBOL_GPL ( __online_page_increment_counters ) ;
2008-04-28 13:12:03 +04:00
2011-07-26 04:12:05 +04:00
void __online_page_free ( struct page * page )
{
2013-07-04 02:03:21 +04:00
__free_reserved_page ( page ) ;
2008-04-28 13:12:03 +04:00
}
2011-07-26 04:12:05 +04:00
EXPORT_SYMBOL_GPL ( __online_page_free ) ;
static void generic_online_page ( struct page * page )
{
__online_page_set_limits ( page ) ;
__online_page_increment_counters ( page ) ;
__online_page_free ( page ) ;
}
2008-04-28 13:12:03 +04:00
2007-10-16 12:26:10 +04:00
static int online_pages_range ( unsigned long start_pfn , unsigned long nr_pages ,
void * arg )
2005-10-30 04:16:54 +03:00
{
unsigned long i ;
2007-10-16 12:26:10 +04:00
unsigned long onlined_pages = * ( unsigned long * ) arg ;
struct page * page ;
if ( PageReserved ( pfn_to_page ( start_pfn ) ) )
for ( i = 0 ; i < nr_pages ; i + + ) {
page = pfn_to_page ( start_pfn + i ) ;
2011-07-26 04:12:05 +04:00
( * online_page_callback ) ( page ) ;
2007-10-16 12:26:10 +04:00
onlined_pages + + ;
}
* ( unsigned long * ) arg = onlined_pages ;
return 0 ;
}
2012-12-13 01:52:04 +04:00
# ifdef CONFIG_MOVABLE_NODE
2012-12-19 02:23:24 +04:00
/*
* When CONFIG_MOVABLE_NODE , we permit onlining of a node which doesn ' t have
* normal memory .
*/
2012-12-13 01:52:04 +04:00
static bool can_online_high_movable ( struct zone * zone )
{
return true ;
}
2012-12-19 02:23:24 +04:00
# else /* CONFIG_MOVABLE_NODE */
2012-12-12 04:03:23 +04:00
/* ensure every online node has NORMAL memory */
static bool can_online_high_movable ( struct zone * zone )
{
return node_state ( zone_to_nid ( zone ) , N_NORMAL_MEMORY ) ;
}
2012-12-19 02:23:24 +04:00
# endif /* CONFIG_MOVABLE_NODE */
2012-12-12 04:03:23 +04:00
2012-12-12 04:01:03 +04:00
/* check which state of node_states will be changed when online memory */
static void node_states_check_changes_online ( unsigned long nr_pages ,
struct zone * zone , struct memory_notify * arg )
{
int nid = zone_to_nid ( zone ) ;
enum zone_type zone_last = ZONE_NORMAL ;
/*
2012-12-13 01:51:49 +04:00
* If we have HIGHMEM or movable node , node_states [ N_NORMAL_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_NORMAL ,
* set zone_last to ZONE_NORMAL .
2012-12-12 04:01:03 +04:00
*
2012-12-13 01:51:49 +04:00
* If we don ' t have HIGHMEM nor movable node ,
* node_states [ N_NORMAL_MEMORY ] contains nodes which have zones of
* 0. . . ZONE_MOVABLE , set zone_last to ZONE_MOVABLE .
2012-12-12 04:01:03 +04:00
*/
2012-12-13 01:51:49 +04:00
if ( N_MEMORY = = N_NORMAL_MEMORY )
2012-12-12 04:01:03 +04:00
zone_last = ZONE_MOVABLE ;
/*
* if the memory to be online is in a zone of 0. . . zone_last , and
* the zones of 0. . . zone_last don ' t have memory before online , we will
* need to set the node to node_states [ N_NORMAL_MEMORY ] after
* the memory is online .
*/
if ( zone_idx ( zone ) < = zone_last & & ! node_state ( nid , N_NORMAL_MEMORY ) )
arg - > status_change_nid_normal = nid ;
else
arg - > status_change_nid_normal = - 1 ;
2012-12-13 01:51:49 +04:00
# ifdef CONFIG_HIGHMEM
/*
* If we have movable node , node_states [ N_HIGH_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_HIGHMEM ,
* set zone_last to ZONE_HIGHMEM .
*
* If we don ' t have movable node , node_states [ N_NORMAL_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_MOVABLE ,
* set zone_last to ZONE_MOVABLE .
*/
zone_last = ZONE_HIGHMEM ;
if ( N_MEMORY = = N_HIGH_MEMORY )
zone_last = ZONE_MOVABLE ;
if ( zone_idx ( zone ) < = zone_last & & ! node_state ( nid , N_HIGH_MEMORY ) )
arg - > status_change_nid_high = nid ;
else
arg - > status_change_nid_high = - 1 ;
# else
arg - > status_change_nid_high = arg - > status_change_nid_normal ;
# endif
2012-12-12 04:01:03 +04:00
/*
* if the node don ' t have memory befor online , we will need to
2012-12-13 01:51:49 +04:00
* set the node to node_states [ N_MEMORY ] after the memory
2012-12-12 04:01:03 +04:00
* is online .
*/
2012-12-13 01:51:49 +04:00
if ( ! node_state ( nid , N_MEMORY ) )
2012-12-12 04:01:03 +04:00
arg - > status_change_nid = nid ;
else
arg - > status_change_nid = - 1 ;
}
static void node_states_set_node ( int node , struct memory_notify * arg )
{
if ( arg - > status_change_nid_normal > = 0 )
node_set_state ( node , N_NORMAL_MEMORY ) ;
2012-12-13 01:51:49 +04:00
if ( arg - > status_change_nid_high > = 0 )
node_set_state ( node , N_HIGH_MEMORY ) ;
node_set_state ( node , N_MEMORY ) ;
2012-12-12 04:01:03 +04:00
}
2007-10-16 12:26:10 +04:00
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
int __ref online_pages ( unsigned long pfn , unsigned long nr_pages , int online_type )
2007-10-16 12:26:10 +04:00
{
2013-07-04 02:02:10 +04:00
unsigned long flags ;
2005-10-30 04:16:54 +03:00
unsigned long onlined_pages = 0 ;
struct zone * zone ;
2006-06-23 13:03:11 +04:00
int need_zonelists_rebuild = 0 ;
2007-10-22 03:41:36 +04:00
int nid ;
int ret ;
struct memory_notify arg ;
2011-01-11 10:44:01 +03:00
lock_memory_hotplug ( ) ;
2012-12-12 04:01:03 +04:00
/*
* This doesn ' t need a lock to do pfn_to_page ( ) .
* The section can ' t be removed here because of the
* memory_block - > state_mutex .
*/
zone = page_zone ( pfn_to_page ( pfn ) ) ;
2012-12-12 04:03:23 +04:00
if ( ( zone_idx ( zone ) > ZONE_NORMAL | | online_type = = ONLINE_MOVABLE ) & &
! can_online_high_movable ( zone ) ) {
unlock_memory_hotplug ( ) ;
2013-07-09 03:00:41 +04:00
return - EINVAL ;
2012-12-12 04:03:23 +04:00
}
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
if ( online_type = = ONLINE_KERNEL & & zone_idx ( zone ) = = ZONE_MOVABLE ) {
if ( move_pfn_range_left ( zone - 1 , zone , pfn , pfn + nr_pages ) ) {
unlock_memory_hotplug ( ) ;
2013-07-09 03:00:41 +04:00
return - EINVAL ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
}
}
if ( online_type = = ONLINE_MOVABLE & & zone_idx ( zone ) = = ZONE_MOVABLE - 1 ) {
if ( move_pfn_range_right ( zone , zone + 1 , pfn , pfn + nr_pages ) ) {
unlock_memory_hotplug ( ) ;
2013-07-09 03:00:41 +04:00
return - EINVAL ;
mm, memory-hotplug: dynamic configure movable memory and portion memory
Add online_movable and online_kernel for logic memory hotplug. This is
the dynamic version of "movablecore" & "kernelcore".
We have the same reason to introduce it as to introduce "movablecore" &
"kernelcore". It has the same motive as "movablecore" & "kernelcore", but
it is dynamic/running-time:
o We can configure memory as kernelcore or movablecore after boot.
Userspace workload is increased, we need more hugepage, we can't use
"online_movable" to add memory and allow the system use more
THP(transparent-huge-page), vice-verse when kernel workload is increase.
Also help for virtualization to dynamic configure host/guest's memory,
to save/(reduce waste) memory.
Memory capacity on Demand
o When a new node is physically online after boot, we need to use
"online_movable" or "online_kernel" to configure/portion it as we
expected when we logic-online it.
This configuration also helps for physically-memory-migrate.
o all benefit as the same as existed "movablecore" & "kernelcore".
o Preparing for movable-node, which is very important for power-saving,
hardware partitioning and high-available-system(hardware fault
management).
(Note, we don't introduce movable-node here.)
Action behavior:
When a memoryblock/memorysection is onlined by "online_movable", the kernel
will not have directly reference to the page of the memoryblock,
thus we can remove that memory any time when needed.
When it is online by "online_kernel", the kernel can use it.
When it is online by "online", the zone type doesn't changed.
Current constraints:
Only the memoryblock which is adjacent to the ZONE_MOVABLE
can be online from ZONE_NORMAL to ZONE_MOVABLE.
[akpm@linux-foundation.org: use min_t, cleanups]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-12-12 04:03:16 +04:00
}
}
/* Previous code may changed the zone of the pfn range */
zone = page_zone ( pfn_to_page ( pfn ) ) ;
2007-10-22 03:41:36 +04:00
arg . start_pfn = pfn ;
arg . nr_pages = nr_pages ;
2012-12-12 04:01:03 +04:00
node_states_check_changes_online ( nr_pages , zone , & arg ) ;
2007-10-22 03:41:36 +04:00
2013-11-13 03:07:21 +04:00
nid = pfn_to_nid ( pfn ) ;
2005-10-30 04:16:54 +03:00
2007-10-22 03:41:36 +04:00
ret = memory_notify ( MEM_GOING_ONLINE , & arg ) ;
ret = notifier_to_errno ( ret ) ;
if ( ret ) {
memory_notify ( MEM_CANCEL_ONLINE , & arg ) ;
2011-01-11 10:44:01 +03:00
unlock_memory_hotplug ( ) ;
2007-10-22 03:41:36 +04:00
return ret ;
}
2006-06-23 13:03:11 +04:00
/*
* If this zone is not populated , then it is not in zonelist .
* This means the page allocator ignores this zone .
* So , zonelist must be updated after online .
*/
2010-05-25 01:32:52 +04:00
mutex_lock ( & zonelists_mutex ) ;
2012-12-12 04:01:01 +04:00
if ( ! populated_zone ( zone ) ) {
2006-06-23 13:03:11 +04:00
need_zonelists_rebuild = 1 ;
2012-12-12 04:01:01 +04:00
build_all_zonelists ( NULL , zone ) ;
}
2006-06-23 13:03:11 +04:00
2009-09-23 03:45:46 +04:00
ret = walk_system_ram_range ( pfn , nr_pages , & onlined_pages ,
2007-10-16 12:26:10 +04:00
online_pages_range ) ;
2008-05-15 03:05:50 +04:00
if ( ret ) {
2012-12-12 04:01:01 +04:00
if ( need_zonelists_rebuild )
zone_pcp_reset ( zone ) ;
2010-05-25 01:32:52 +04:00
mutex_unlock ( & zonelists_mutex ) ;
2012-05-30 02:06:30 +04:00
printk ( KERN_DEBUG " online_pages [mem %#010llx-%#010llx] failed \n " ,
( unsigned long long ) pfn < < PAGE_SHIFT ,
( ( ( unsigned long long ) pfn + nr_pages )
< < PAGE_SHIFT ) - 1 ) ;
2008-05-15 03:05:50 +04:00
memory_notify ( MEM_CANCEL_ONLINE , & arg ) ;
2011-01-11 10:44:01 +03:00
unlock_memory_hotplug ( ) ;
2008-05-15 03:05:50 +04:00
return ret ;
}
2005-10-30 04:16:54 +03:00
zone - > present_pages + = onlined_pages ;
2013-07-04 02:02:10 +04:00
pgdat_resize_lock ( zone - > zone_pgdat , & flags ) ;
2006-03-10 04:33:51 +03:00
zone - > zone_pgdat - > node_present_pages + = onlined_pages ;
2013-07-04 02:02:10 +04:00
pgdat_resize_unlock ( zone - > zone_pgdat , & flags ) ;
2012-08-01 03:43:30 +04:00
if ( onlined_pages ) {
2012-12-12 04:01:03 +04:00
node_states_set_node ( zone_to_nid ( zone ) , & arg ) ;
2012-08-01 03:43:30 +04:00
if ( need_zonelists_rebuild )
2012-12-12 04:01:01 +04:00
build_all_zonelists ( NULL , NULL ) ;
2012-08-01 03:43:30 +04:00
else
zone_pcp_update ( zone ) ;
}
2005-10-30 04:16:54 +03:00
2010-05-25 01:32:52 +04:00
mutex_unlock ( & zonelists_mutex ) ;
2011-05-25 04:11:32 +04:00
init_per_zone_wmark_min ( ) ;
2012-08-01 03:43:30 +04:00
if ( onlined_pages )
2007-10-16 12:25:29 +04:00
kswapd_run ( zone_to_nid ( zone ) ) ;
2005-10-30 04:16:56 +03:00
2010-05-25 01:32:51 +04:00
vm_total_pages = nr_free_pagecache_pages ( ) ;
2008-07-24 08:28:18 +04:00
2006-09-29 13:01:25 +04:00
writeback_set_ratelimit ( ) ;
2007-10-22 03:41:36 +04:00
if ( onlined_pages )
memory_notify ( MEM_ONLINE , & arg ) ;
2011-01-11 10:44:01 +03:00
unlock_memory_hotplug ( ) ;
2007-10-22 03:41:36 +04:00
2005-10-30 04:16:54 +03:00
return 0 ;
}
2006-10-01 10:27:08 +04:00
# endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
2006-06-27 13:53:30 +04:00
2009-11-18 01:06:18 +03:00
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
static pg_data_t __ref * hotadd_new_pgdat ( int nid , u64 start )
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
{
struct pglist_data * pgdat ;
unsigned long zones_size [ MAX_NR_ZONES ] = { 0 } ;
unsigned long zholes_size [ MAX_NR_ZONES ] = { 0 } ;
unsigned long start_pfn = start > > PAGE_SHIFT ;
2013-02-23 04:33:18 +04:00
pgdat = NODE_DATA ( nid ) ;
if ( ! pgdat ) {
pgdat = arch_alloc_nodedata ( nid ) ;
if ( ! pgdat )
return NULL ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
2013-02-23 04:33:18 +04:00
arch_refresh_nodedata ( nid , pgdat ) ;
}
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
/* we can use NODE_DATA(nid) from here */
/* init node's zones as empty zones, we don't have any present pages.*/
2008-07-24 08:27:20 +04:00
free_area_init_node ( nid , zones_size , start_pfn , zholes_size ) ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
2011-06-16 02:08:38 +04:00
/*
* The node we allocated has no zone fallback lists . For avoiding
* to access not - initialized zonelist , build here .
*/
2011-06-23 05:13:04 +04:00
mutex_lock ( & zonelists_mutex ) ;
2012-08-01 03:43:28 +04:00
build_all_zonelists ( pgdat , NULL ) ;
2011-06-23 05:13:04 +04:00
mutex_unlock ( & zonelists_mutex ) ;
2011-06-16 02:08:38 +04:00
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
return pgdat ;
}
static void rollback_node_hotadd ( int nid , pg_data_t * pgdat )
{
arch_refresh_nodedata ( nid , NULL ) ;
arch_free_nodedata ( pgdat ) ;
return ;
}
2006-06-27 13:53:35 +04:00
2013-11-13 03:07:25 +04:00
/**
* try_online_node - online a node if offlined
*
2010-05-25 01:32:41 +04:00
* called by cpu_up ( ) to online a node without onlined memory .
*/
2013-11-13 03:07:25 +04:00
int try_online_node ( int nid )
2010-05-25 01:32:41 +04:00
{
pg_data_t * pgdat ;
int ret ;
2013-11-13 03:07:25 +04:00
if ( node_online ( nid ) )
return 0 ;
2010-12-03 01:31:19 +03:00
lock_memory_hotplug ( ) ;
2010-05-25 01:32:41 +04:00
pgdat = hotadd_new_pgdat ( nid , 0 ) ;
2011-06-23 05:13:01 +04:00
if ( ! pgdat ) {
2013-11-13 03:07:25 +04:00
pr_err ( " Cannot online node %d due to NULL pgdat \n " , nid ) ;
2010-05-25 01:32:41 +04:00
ret = - ENOMEM ;
goto out ;
}
node_set_online ( nid ) ;
ret = register_one_node ( nid ) ;
BUG_ON ( ret ) ;
2013-11-13 03:07:25 +04:00
if ( pgdat - > node_zonelists - > _zonerefs - > zone = = NULL ) {
mutex_lock ( & zonelists_mutex ) ;
build_all_zonelists ( NULL , NULL ) ;
mutex_unlock ( & zonelists_mutex ) ;
}
2010-05-25 01:32:41 +04:00
out :
2010-12-03 01:31:19 +03:00
unlock_memory_hotplug ( ) ;
2010-05-25 01:32:41 +04:00
return ret ;
}
2013-09-12 01:21:49 +04:00
static int check_hotplug_memory_range ( u64 start , u64 size )
{
u64 start_pfn = start > > PAGE_SHIFT ;
u64 nr_pages = size > > PAGE_SHIFT ;
/* Memory range must be aligned with section */
if ( ( start_pfn & ~ PAGE_SECTION_MASK ) | |
( nr_pages % PAGES_PER_SECTION ) | | ( ! nr_pages ) ) {
pr_err ( " Section-unaligned hotplug range: start 0x%llx, size 0x%llx \n " ,
( unsigned long long ) start ,
( unsigned long long ) size ) ;
return - EINVAL ;
}
return 0 ;
}
2008-11-22 20:33:24 +03:00
/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
int __ref add_memory ( int nid , u64 start , u64 size )
2006-06-27 13:53:30 +04:00
{
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
pg_data_t * pgdat = NULL ;
2013-02-23 04:33:18 +04:00
bool new_pgdat ;
bool new_node ;
2006-08-05 23:15:06 +04:00
struct resource * res ;
2006-06-27 13:53:30 +04:00
int ret ;
2013-09-12 01:21:49 +04:00
ret = check_hotplug_memory_range ( start , size ) ;
if ( ret )
return ret ;
2010-12-03 01:31:19 +03:00
lock_memory_hotplug ( ) ;
2009-11-18 01:06:22 +03:00
2006-08-05 23:15:06 +04:00
res = register_memory_resource ( start , size ) ;
2009-11-18 01:06:22 +03:00
ret = - EEXIST ;
2006-08-05 23:15:06 +04:00
if ( ! res )
2009-11-18 01:06:22 +03:00
goto out ;
2006-08-05 23:15:06 +04:00
2013-02-23 04:33:18 +04:00
{ /* Stupid hack to suppress address-never-null warning */
void * p = NODE_DATA ( nid ) ;
new_pgdat = ! p ;
}
new_node = ! node_online ( nid ) ;
if ( new_node ) {
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
pgdat = hotadd_new_pgdat ( nid , start ) ;
2009-11-18 01:06:22 +03:00
ret = - ENOMEM ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
if ( ! pgdat )
2012-07-12 01:02:31 +04:00
goto error ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
}
2006-06-27 13:53:30 +04:00
/* call arch's memory hotadd */
ret = arch_add_memory ( nid , start , size ) ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
if ( ret < 0 )
goto error ;
2006-06-27 13:53:38 +04:00
/* we online node here. we can't roll back from here. */
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
node_set_online ( nid ) ;
2013-02-23 04:33:18 +04:00
if ( new_node ) {
2006-06-27 13:53:38 +04:00
ret = register_one_node ( nid ) ;
/*
* If sysfs file of new node can ' t create , cpu on the node
* can ' t be hot - added . There is no rollback way now .
* So , check by BUG_ON ( ) to catch it reluctantly . .
*/
BUG_ON ( ret ) ;
}
2010-03-06 00:41:58 +03:00
/* create new memmap entry */
firmware_map_add_hotplug ( start , start + size , " System RAM " ) ;
2009-11-18 01:06:22 +03:00
goto out ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
error :
/* rollback pgdat allocation and others */
if ( new_pgdat )
rollback_node_hotadd ( nid , pgdat ) ;
2013-02-23 04:32:48 +04:00
release_memory_resource ( res ) ;
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 13:53:34 +04:00
2009-11-18 01:06:22 +03:00
out :
2010-12-03 01:31:19 +03:00
unlock_memory_hotplug ( ) ;
2006-06-27 13:53:30 +04:00
return ret ;
}
EXPORT_SYMBOL_GPL ( add_memory ) ;
2007-10-16 12:26:12 +04:00
# ifdef CONFIG_MEMORY_HOTREMOVE
2008-07-24 08:28:19 +04:00
/*
* A free page on the buddy free lists ( not the per - cpu lists ) has PageBuddy
* set and the size of the free page is given by page_order ( ) . Using this ,
* the function determines if the pageblock contains only free pages .
* Due to buddy contraints , a free page at least the size of a pageblock will
* be located at the start of the pageblock
*/
static inline int pageblock_free ( struct page * page )
{
return PageBuddy ( page ) & & page_order ( page ) > = pageblock_order ;
}
/* Return the start of the next active pageblock after a given page */
static struct page * next_active_pageblock ( struct page * page )
{
/* Ensure the starting page is pageblock-aligned */
BUG_ON ( page_to_pfn ( page ) & ( pageblock_nr_pages - 1 ) ) ;
/* If the entire pageblock is free, move to the end of free page */
2010-09-10 03:38:01 +04:00
if ( pageblock_free ( page ) ) {
int order ;
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order ( page ) ;
if ( ( order < MAX_ORDER ) & & ( order > = pageblock_order ) )
return page + ( 1 < < order ) ;
}
2008-07-24 08:28:19 +04:00
2010-09-10 03:38:01 +04:00
return page + pageblock_nr_pages ;
2008-07-24 08:28:19 +04:00
}
/* Checks if this range of memory is likely to be hot-removable. */
int is_mem_section_removable ( unsigned long start_pfn , unsigned long nr_pages )
{
struct page * page = pfn_to_page ( start_pfn ) ;
struct page * end_page = page + nr_pages ;
/* Check the starting page of each pageblock within the range */
for ( ; page < end_page ; page = next_active_pageblock ( page ) ) {
2010-10-27 01:21:30 +04:00
if ( ! is_pageblock_removable_nolock ( page ) )
2008-07-24 08:28:19 +04:00
return 0 ;
2010-10-27 01:21:30 +04:00
cond_resched ( ) ;
2008-07-24 08:28:19 +04:00
}
/* All pageblocks in the memory block are likely to be hot-removable */
return 1 ;
}
2007-10-16 12:26:12 +04:00
/*
* Confirm all pages in a range [ start , end ) is belongs to the same zone .
*/
static int test_pages_in_a_zone ( unsigned long start_pfn , unsigned long end_pfn )
{
unsigned long pfn ;
struct zone * zone = NULL ;
struct page * page ;
int i ;
for ( pfn = start_pfn ;
pfn < end_pfn ;
pfn + = MAX_ORDER_NR_PAGES ) {
i = 0 ;
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
while ( ( i < MAX_ORDER_NR_PAGES ) & & ! pfn_valid_within ( pfn + i ) )
i + + ;
if ( i = = MAX_ORDER_NR_PAGES )
continue ;
page = pfn_to_page ( pfn + i ) ;
if ( zone & & page_zone ( page ) ! = zone )
return 0 ;
zone = page_zone ( page ) ;
}
return 1 ;
}
/*
2013-09-12 01:22:09 +04:00
* Scan pfn range [ start , end ) to find movable / migratable pages ( LRU pages
* and hugepages ) . We scan pfn because it ' s much easier than scanning over
* linked list . This function returns the pfn of the first found movable
* page if it ' s found , otherwise 0.
2007-10-16 12:26:12 +04:00
*/
2013-09-12 01:22:09 +04:00
static unsigned long scan_movable_pages ( unsigned long start , unsigned long end )
2007-10-16 12:26:12 +04:00
{
unsigned long pfn ;
struct page * page ;
for ( pfn = start ; pfn < end ; pfn + + ) {
if ( pfn_valid ( pfn ) ) {
page = pfn_to_page ( pfn ) ;
if ( PageLRU ( page ) )
return pfn ;
2013-09-12 01:22:09 +04:00
if ( PageHuge ( page ) ) {
if ( is_hugepage_active ( page ) )
return pfn ;
else
pfn = round_up ( pfn + 1 ,
1 < < compound_order ( page ) ) - 1 ;
}
2007-10-16 12:26:12 +04:00
}
}
return 0 ;
}
# define NR_OFFLINE_AT_ONCE_PAGES (256)
static int
do_migrate_range ( unsigned long start_pfn , unsigned long end_pfn )
{
unsigned long pfn ;
struct page * page ;
int move_pages = NR_OFFLINE_AT_ONCE_PAGES ;
int not_managed = 0 ;
int ret = 0 ;
LIST_HEAD ( source ) ;
for ( pfn = start_pfn ; pfn < end_pfn & & move_pages > 0 ; pfn + + ) {
if ( ! pfn_valid ( pfn ) )
continue ;
page = pfn_to_page ( pfn ) ;
2013-09-12 01:22:09 +04:00
if ( PageHuge ( page ) ) {
struct page * head = compound_head ( page ) ;
pfn = page_to_pfn ( head ) + ( 1 < < compound_order ( head ) ) - 1 ;
if ( compound_order ( head ) > PFN_SECTION_SHIFT ) {
ret = - EBUSY ;
break ;
}
if ( isolate_huge_page ( page , & source ) )
move_pages - = 1 < < compound_order ( head ) ;
continue ;
}
2011-05-25 04:12:19 +04:00
if ( ! get_page_unless_zero ( page ) )
2007-10-16 12:26:12 +04:00
continue ;
/*
* We can skip free pages . And we can only deal with pages on
* LRU .
*/
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
ret = isolate_lru_page ( page ) ;
2007-10-16 12:26:12 +04:00
if ( ! ret ) { /* Success */
2011-05-25 04:12:19 +04:00
put_page ( page ) ;
vmscan: move isolate_lru_page() to vmscan.c
On large memory systems, the VM can spend way too much time scanning
through pages that it cannot (or should not) evict from memory. Not only
does it use up CPU time, but it also provokes lock contention and can
leave large systems under memory presure in a catatonic state.
This patch series improves VM scalability by:
1) putting filesystem backed, swap backed and unevictable pages
onto their own LRUs, so the system only scans the pages that it
can/should evict from memory
2) switching to two handed clock replacement for the anonymous LRUs,
so the number of pages that need to be scanned when the system
starts swapping is bound to a reasonable number
3) keeping unevictable pages off the LRU completely, so the
VM does not waste CPU time scanning them. ramfs, ramdisk,
SHM_LOCKED shared memory segments and mlock()ed VMA pages
are keept on the unevictable list.
This patch:
isolate_lru_page logically belongs to be in vmscan.c than migrate.c.
It is tough, because we don't need that function without memory migration
so there is a valid argument to have it in migrate.c. However a
subsequent patch needs to make use of it in the core mm, so we can happily
move it to vmscan.c.
Also, make the function a little more generic by not requiring that it
adds an isolated page to a given list. Callers can do that.
Note that we now have '__isolate_lru_page()', that does
something quite different, visible outside of vmscan.c
for use with memory controller. Methinks we need to
rationalize these names/purposes. --lts
[akpm@linux-foundation.org: fix mm/memory_hotplug.c build]
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-10-19 07:26:09 +04:00
list_add_tail ( & page - > lru , & source ) ;
2007-10-16 12:26:12 +04:00
move_pages - - ;
2009-12-15 04:58:11 +03:00
inc_zone_page_state ( page , NR_ISOLATED_ANON +
page_is_file_cache ( page ) ) ;
2007-10-16 12:26:12 +04:00
} else {
# ifdef CONFIG_DEBUG_VM
2010-03-11 02:20:43 +03:00
printk ( KERN_ALERT " removing pfn %lx from LRU failed \n " ,
pfn ) ;
dump_page ( page ) ;
2007-10-16 12:26:12 +04:00
# endif
2011-05-25 04:12:19 +04:00
put_page ( page ) ;
2011-03-31 05:57:33 +04:00
/* Because we don't have big zone->lock. we should
2010-10-27 01:22:10 +04:00
check this again here . */
if ( page_count ( page ) ) {
not_managed + + ;
2010-10-27 01:22:10 +04:00
ret = - EBUSY ;
2010-10-27 01:22:10 +04:00
break ;
}
2007-10-16 12:26:12 +04:00
}
}
2010-10-27 01:22:10 +04:00
if ( ! list_empty ( & source ) ) {
if ( not_managed ) {
2013-09-12 01:22:09 +04:00
putback_movable_pages ( & source ) ;
2010-10-27 01:22:10 +04:00
goto out ;
}
2012-10-09 03:32:54 +04:00
/*
* alloc_migrate_target should be improooooved ! !
* migrate_pages returns # of failed pages .
*/
ret = migrate_pages ( & source , alloc_migrate_target , 0 ,
2013-02-23 04:35:14 +04:00
MIGRATE_SYNC , MR_MEMORY_HOTPLUG ) ;
2010-10-27 01:22:10 +04:00
if ( ret )
2013-09-12 01:22:09 +04:00
putback_movable_pages ( & source ) ;
2007-10-16 12:26:12 +04:00
}
out :
return ret ;
}
/*
* remove from free_area [ ] and mark all as Reserved .
*/
static int
offline_isolated_pages_cb ( unsigned long start , unsigned long nr_pages ,
void * data )
{
__offline_isolated_pages ( start , start + nr_pages ) ;
return 0 ;
}
static void
offline_isolated_pages ( unsigned long start_pfn , unsigned long end_pfn )
{
2009-09-23 03:45:46 +04:00
walk_system_ram_range ( start_pfn , end_pfn - start_pfn , NULL ,
2007-10-16 12:26:12 +04:00
offline_isolated_pages_cb ) ;
}
/*
* Check all pages in range , recoreded as memory resource , are isolated .
*/
static int
check_pages_isolated_cb ( unsigned long start_pfn , unsigned long nr_pages ,
void * data )
{
int ret ;
long offlined = * ( long * ) data ;
2012-12-12 04:00:45 +04:00
ret = test_pages_isolated ( start_pfn , start_pfn + nr_pages , true ) ;
2007-10-16 12:26:12 +04:00
offlined = nr_pages ;
if ( ! ret )
* ( long * ) data + = offlined ;
return ret ;
}
static long
check_pages_isolated ( unsigned long start_pfn , unsigned long end_pfn )
{
long offlined = 0 ;
int ret ;
2009-09-23 03:45:46 +04:00
ret = walk_system_ram_range ( start_pfn , end_pfn - start_pfn , & offlined ,
2007-10-16 12:26:12 +04:00
check_pages_isolated_cb ) ;
if ( ret < 0 )
offlined = ( long ) ret ;
return offlined ;
}
2012-12-13 01:52:04 +04:00
# ifdef CONFIG_MOVABLE_NODE
2012-12-19 02:23:24 +04:00
/*
* When CONFIG_MOVABLE_NODE , we permit offlining of a node which doesn ' t have
* normal memory .
*/
2012-12-13 01:52:04 +04:00
static bool can_offline_normal ( struct zone * zone , unsigned long nr_pages )
{
return true ;
}
2012-12-19 02:23:24 +04:00
# else /* CONFIG_MOVABLE_NODE */
2012-12-12 04:03:23 +04:00
/* ensure the node has NORMAL memory if it is still online */
static bool can_offline_normal ( struct zone * zone , unsigned long nr_pages )
{
struct pglist_data * pgdat = zone - > zone_pgdat ;
unsigned long present_pages = 0 ;
enum zone_type zt ;
for ( zt = 0 ; zt < = ZONE_NORMAL ; zt + + )
present_pages + = pgdat - > node_zones [ zt ] . present_pages ;
if ( present_pages > nr_pages )
return true ;
present_pages = 0 ;
for ( ; zt < = ZONE_MOVABLE ; zt + + )
present_pages + = pgdat - > node_zones [ zt ] . present_pages ;
/*
* we can ' t offline the last normal memory until all
* higher memory is offlined .
*/
return present_pages = = 0 ;
}
2012-12-19 02:23:24 +04:00
# endif /* CONFIG_MOVABLE_NODE */
2012-12-12 04:03:23 +04:00
mem-hotplug: introduce movable_node boot option
The hot-Pluggable field in SRAT specifies which memory is hotpluggable.
As we mentioned before, if hotpluggable memory is used by the kernel, it
cannot be hot-removed. So memory hotplug users may want to set all
hotpluggable memory in ZONE_MOVABLE so that the kernel won't use it.
Memory hotplug users may also set a node as movable node, which has
ZONE_MOVABLE only, so that the whole node can be hot-removed.
But the kernel cannot use memory in ZONE_MOVABLE. By doing this, the
kernel cannot use memory in movable nodes. This will cause NUMA
performance down. And other users may be unhappy.
So we need a way to allow users to enable and disable this functionality.
In this patch, we introduce movable_node boot option to allow users to
choose to not to consume hotpluggable memory at early boot time and later
we can set it as ZONE_MOVABLE.
To achieve this, the movable_node boot option will control the memblock
allocation direction. That said, after memblock is ready, before SRAT is
parsed, we should allocate memory near the kernel image as we explained in
the previous patches. So if movable_node boot option is set, the kernel
does the following:
1. After memblock is ready, make memblock allocate memory bottom up.
2. After SRAT is parsed, make memblock behave as default, allocate memory
top down.
Users can specify "movable_node" in kernel commandline to enable this
functionality. For those who don't use memory hotplug or who don't want
to lose their NUMA performance, just don't specify anything. The kernel
will work as before.
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Suggested-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Suggested-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Tejun Heo <tj@kernel.org>
Acked-by: Toshi Kani <toshi.kani@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Thomas Renninger <trenn@suse.de>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-11-13 03:08:10 +04:00
static int __init cmdline_parse_movable_node ( char * p )
{
# ifdef CONFIG_MOVABLE_NODE
/*
* Memory used by the kernel cannot be hot - removed because Linux
* cannot migrate the kernel pages . When memory hotplug is
* enabled , we should prevent memblock from allocating memory
* for the kernel .
*
* ACPI SRAT records all hotpluggable memory ranges . But before
* SRAT is parsed , we don ' t know about it .
*
* The kernel image is loaded into memory at very early time . We
* cannot prevent this anyway . So on NUMA system , we set any
* node the kernel resides in as un - hotpluggable .
*
* Since on modern servers , one node could have double - digit
* gigabytes memory , we can assume the memory around the kernel
* image is also un - hotpluggable . So before SRAT is parsed , just
* allocate memory near the kernel image to try the best to keep
* the kernel away from hotpluggable memory .
*/
memblock_set_bottom_up ( true ) ;
# else
pr_warn ( " movable_node option not supported \n " ) ;
# endif
return 0 ;
}
early_param ( " movable_node " , cmdline_parse_movable_node ) ;
2012-12-12 04:01:03 +04:00
/* check which state of node_states will be changed when offline memory */
static void node_states_check_changes_offline ( unsigned long nr_pages ,
struct zone * zone , struct memory_notify * arg )
{
struct pglist_data * pgdat = zone - > zone_pgdat ;
unsigned long present_pages = 0 ;
enum zone_type zt , zone_last = ZONE_NORMAL ;
/*
2012-12-13 01:51:49 +04:00
* If we have HIGHMEM or movable node , node_states [ N_NORMAL_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_NORMAL ,
* set zone_last to ZONE_NORMAL .
2012-12-12 04:01:03 +04:00
*
2012-12-13 01:51:49 +04:00
* If we don ' t have HIGHMEM nor movable node ,
* node_states [ N_NORMAL_MEMORY ] contains nodes which have zones of
* 0. . . ZONE_MOVABLE , set zone_last to ZONE_MOVABLE .
2012-12-12 04:01:03 +04:00
*/
2012-12-13 01:51:49 +04:00
if ( N_MEMORY = = N_NORMAL_MEMORY )
2012-12-12 04:01:03 +04:00
zone_last = ZONE_MOVABLE ;
/*
* check whether node_states [ N_NORMAL_MEMORY ] will be changed .
* If the memory to be offline is in a zone of 0. . . zone_last ,
* and it is the last present memory , 0. . . zone_last will
* become empty after offline , thus we can determind we will
* need to clear the node from node_states [ N_NORMAL_MEMORY ] .
*/
for ( zt = 0 ; zt < = zone_last ; zt + + )
present_pages + = pgdat - > node_zones [ zt ] . present_pages ;
if ( zone_idx ( zone ) < = zone_last & & nr_pages > = present_pages )
arg - > status_change_nid_normal = zone_to_nid ( zone ) ;
else
arg - > status_change_nid_normal = - 1 ;
2012-12-13 01:51:49 +04:00
# ifdef CONFIG_HIGHMEM
/*
* If we have movable node , node_states [ N_HIGH_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_HIGHMEM ,
* set zone_last to ZONE_HIGHMEM .
*
* If we don ' t have movable node , node_states [ N_NORMAL_MEMORY ]
* contains nodes which have zones of 0. . . ZONE_MOVABLE ,
* set zone_last to ZONE_MOVABLE .
*/
zone_last = ZONE_HIGHMEM ;
if ( N_MEMORY = = N_HIGH_MEMORY )
zone_last = ZONE_MOVABLE ;
for ( ; zt < = zone_last ; zt + + )
present_pages + = pgdat - > node_zones [ zt ] . present_pages ;
if ( zone_idx ( zone ) < = zone_last & & nr_pages > = present_pages )
arg - > status_change_nid_high = zone_to_nid ( zone ) ;
else
arg - > status_change_nid_high = - 1 ;
# else
arg - > status_change_nid_high = arg - > status_change_nid_normal ;
# endif
2012-12-12 04:01:03 +04:00
/*
* node_states [ N_HIGH_MEMORY ] contains nodes which have 0. . . ZONE_MOVABLE
*/
zone_last = ZONE_MOVABLE ;
/*
* check whether node_states [ N_HIGH_MEMORY ] will be changed
* If we try to offline the last present @ nr_pages from the node ,
* we can determind we will need to clear the node from
* node_states [ N_HIGH_MEMORY ] .
*/
for ( ; zt < = zone_last ; zt + + )
present_pages + = pgdat - > node_zones [ zt ] . present_pages ;
if ( nr_pages > = present_pages )
arg - > status_change_nid = zone_to_nid ( zone ) ;
else
arg - > status_change_nid = - 1 ;
}
static void node_states_clear_node ( int node , struct memory_notify * arg )
{
if ( arg - > status_change_nid_normal > = 0 )
node_clear_state ( node , N_NORMAL_MEMORY ) ;
2012-12-13 01:51:49 +04:00
if ( ( N_MEMORY ! = N_NORMAL_MEMORY ) & &
( arg - > status_change_nid_high > = 0 ) )
2012-12-12 04:01:03 +04:00
node_clear_state ( node , N_HIGH_MEMORY ) ;
2012-12-13 01:51:49 +04:00
if ( ( N_MEMORY ! = N_HIGH_MEMORY ) & &
( arg - > status_change_nid > = 0 ) )
node_clear_state ( node , N_MEMORY ) ;
2012-12-12 04:01:03 +04:00
}
2012-10-09 03:33:58 +04:00
static int __ref __offline_pages ( unsigned long start_pfn ,
2007-10-16 12:26:12 +04:00
unsigned long end_pfn , unsigned long timeout )
{
unsigned long pfn , nr_pages , expire ;
long offlined_pages ;
2007-10-22 03:41:36 +04:00
int ret , drain , retry_max , node ;
2013-07-04 02:02:11 +04:00
unsigned long flags ;
2007-10-16 12:26:12 +04:00
struct zone * zone ;
2007-10-22 03:41:36 +04:00
struct memory_notify arg ;
2007-10-16 12:26:12 +04:00
/* at least, alignment against pageblock is necessary */
if ( ! IS_ALIGNED ( start_pfn , pageblock_nr_pages ) )
return - EINVAL ;
if ( ! IS_ALIGNED ( end_pfn , pageblock_nr_pages ) )
return - EINVAL ;
/* This makes hotplug much easier...and readable.
we assume this for now . . */
if ( ! test_pages_in_a_zone ( start_pfn , end_pfn ) )
return - EINVAL ;
2007-10-22 03:41:36 +04:00
2010-12-03 01:31:19 +03:00
lock_memory_hotplug ( ) ;
2009-11-18 01:06:22 +03:00
2007-10-22 03:41:36 +04:00
zone = page_zone ( pfn_to_page ( start_pfn ) ) ;
node = zone_to_nid ( zone ) ;
nr_pages = end_pfn - start_pfn ;
2012-12-12 04:03:23 +04:00
ret = - EINVAL ;
if ( zone_idx ( zone ) < = ZONE_NORMAL & & ! can_offline_normal ( zone , nr_pages ) )
goto out ;
2007-10-16 12:26:12 +04:00
/* set above range as isolated */
2012-12-12 04:00:45 +04:00
ret = start_isolate_page_range ( start_pfn , end_pfn ,
MIGRATE_MOVABLE , true ) ;
2007-10-16 12:26:12 +04:00
if ( ret )
2009-11-18 01:06:22 +03:00
goto out ;
2007-10-22 03:41:36 +04:00
arg . start_pfn = start_pfn ;
arg . nr_pages = nr_pages ;
2012-12-12 04:01:03 +04:00
node_states_check_changes_offline ( nr_pages , zone , & arg ) ;
2007-10-22 03:41:36 +04:00
ret = memory_notify ( MEM_GOING_OFFLINE , & arg ) ;
ret = notifier_to_errno ( ret ) ;
if ( ret )
goto failed_removal ;
2007-10-16 12:26:12 +04:00
pfn = start_pfn ;
expire = jiffies + timeout ;
drain = 0 ;
retry_max = 5 ;
repeat :
/* start memory hot removal */
ret = - EAGAIN ;
if ( time_after ( jiffies , expire ) )
goto failed_removal ;
ret = - EINTR ;
if ( signal_pending ( current ) )
goto failed_removal ;
ret = 0 ;
if ( drain ) {
lru_add_drain_all ( ) ;
cond_resched ( ) ;
2008-02-05 09:29:11 +03:00
drain_all_pages ( ) ;
2007-10-16 12:26:12 +04:00
}
2013-09-12 01:22:09 +04:00
pfn = scan_movable_pages ( start_pfn , end_pfn ) ;
if ( pfn ) { /* We have movable pages */
2007-10-16 12:26:12 +04:00
ret = do_migrate_range ( pfn , end_pfn ) ;
if ( ! ret ) {
drain = 1 ;
goto repeat ;
} else {
if ( ret < 0 )
if ( - - retry_max = = 0 )
goto failed_removal ;
yield ( ) ;
drain = 1 ;
goto repeat ;
}
}
2012-09-20 05:48:02 +04:00
/* drain all zone's lru pagevec, this is asynchronous... */
2007-10-16 12:26:12 +04:00
lru_add_drain_all ( ) ;
yield ( ) ;
2012-09-20 05:48:02 +04:00
/* drain pcp pages, this is synchronous. */
2008-02-05 09:29:11 +03:00
drain_all_pages ( ) ;
2013-09-12 01:22:09 +04:00
/*
* dissolve free hugepages in the memory block before doing offlining
* actually in order to make hugetlbfs ' s object counting consistent .
*/
dissolve_free_huge_pages ( start_pfn , end_pfn ) ;
2007-10-16 12:26:12 +04:00
/* check again */
offlined_pages = check_pages_isolated ( start_pfn , end_pfn ) ;
if ( offlined_pages < 0 ) {
ret = - EBUSY ;
goto failed_removal ;
}
printk ( KERN_INFO " Offlined Pages %ld \n " , offlined_pages ) ;
2012-09-20 05:48:02 +04:00
/* Ok, all of our target is isolated.
2007-10-16 12:26:12 +04:00
We cannot do rollback at this point . */
offline_isolated_pages ( start_pfn , end_pfn ) ;
2007-11-15 03:59:12 +03:00
/* reset pagetype flags and makes migrate type to be MOVABLE */
2012-04-03 17:06:15 +04:00
undo_isolate_page_range ( start_pfn , end_pfn , MIGRATE_MOVABLE ) ;
2007-10-16 12:26:12 +04:00
/* removal success */
2013-07-04 02:03:21 +04:00
adjust_managed_page_count ( pfn_to_page ( start_pfn ) , - offlined_pages ) ;
2007-10-16 12:26:12 +04:00
zone - > present_pages - = offlined_pages ;
2013-07-04 02:02:11 +04:00
pgdat_resize_lock ( zone - > zone_pgdat , & flags ) ;
2007-10-16 12:26:12 +04:00
zone - > zone_pgdat - > node_present_pages - = offlined_pages ;
2013-07-04 02:02:11 +04:00
pgdat_resize_unlock ( zone - > zone_pgdat , & flags ) ;
2007-10-22 03:41:36 +04:00
2011-05-25 04:11:32 +04:00
init_per_zone_wmark_min ( ) ;
2012-10-09 03:31:51 +04:00
if ( ! populated_zone ( zone ) ) {
2012-08-01 03:43:32 +04:00
zone_pcp_reset ( zone ) ;
2012-10-09 03:31:51 +04:00
mutex_lock ( & zonelists_mutex ) ;
build_all_zonelists ( NULL , NULL ) ;
mutex_unlock ( & zonelists_mutex ) ;
} else
zone_pcp_update ( zone ) ;
2012-08-01 03:43:32 +04:00
2012-12-12 04:01:03 +04:00
node_states_clear_node ( node , & arg ) ;
if ( arg . status_change_nid > = 0 )
2009-12-15 04:58:33 +03:00
kswapd_stop ( node ) ;
2009-06-17 02:32:50 +04:00
2007-10-16 12:26:12 +04:00
vm_total_pages = nr_free_pagecache_pages ( ) ;
writeback_set_ratelimit ( ) ;
2007-10-22 03:41:36 +04:00
memory_notify ( MEM_OFFLINE , & arg ) ;
2010-12-03 01:31:19 +03:00
unlock_memory_hotplug ( ) ;
2007-10-16 12:26:12 +04:00
return 0 ;
failed_removal :
2012-05-30 02:06:30 +04:00
printk ( KERN_INFO " memory offlining [mem %#010llx-%#010llx] failed \n " ,
( unsigned long long ) start_pfn < < PAGE_SHIFT ,
( ( unsigned long long ) end_pfn < < PAGE_SHIFT ) - 1 ) ;
2007-10-22 03:41:36 +04:00
memory_notify ( MEM_CANCEL_OFFLINE , & arg ) ;
2007-10-16 12:26:12 +04:00
/* pushback to free area */
2012-04-03 17:06:15 +04:00
undo_isolate_page_range ( start_pfn , end_pfn , MIGRATE_MOVABLE ) ;
2007-10-22 03:41:36 +04:00
2009-11-18 01:06:22 +03:00
out :
2010-12-03 01:31:19 +03:00
unlock_memory_hotplug ( ) ;
2007-10-16 12:26:12 +04:00
return ret ;
}
2008-10-19 07:25:58 +04:00
2012-10-09 03:33:58 +04:00
int offline_pages ( unsigned long start_pfn , unsigned long nr_pages )
{
return __offline_pages ( start_pfn , start_pfn + nr_pages , 120 * HZ ) ;
}
2013-05-08 02:29:49 +04:00
# endif /* CONFIG_MEMORY_HOTREMOVE */
2012-10-09 03:33:58 +04:00
2013-02-23 04:32:54 +04:00
/**
* walk_memory_range - walks through all mem sections in [ start_pfn , end_pfn )
* @ start_pfn : start pfn of the memory range
2013-04-30 02:06:16 +04:00
* @ end_pfn : end pfn of the memory range
2013-02-23 04:32:54 +04:00
* @ arg : argument passed to func
* @ func : callback for each memory section walked
*
* This function walks through all present mem sections in range
* [ start_pfn , end_pfn ) and call func on each mem section .
*
* Returns the return value of func .
*/
2013-05-08 02:29:49 +04:00
int walk_memory_range ( unsigned long start_pfn , unsigned long end_pfn ,
2013-02-23 04:32:54 +04:00
void * arg , int ( * func ) ( struct memory_block * , void * ) )
2008-10-19 07:25:58 +04:00
{
2012-10-09 03:34:01 +04:00
struct memory_block * mem = NULL ;
struct mem_section * section ;
unsigned long pfn , section_nr ;
int ret ;
for ( pfn = start_pfn ; pfn < end_pfn ; pfn + = PAGES_PER_SECTION ) {
section_nr = pfn_to_section_nr ( pfn ) ;
if ( ! present_section_nr ( section_nr ) )
continue ;
section = __nr_to_section ( section_nr ) ;
/* same memblock? */
if ( mem )
if ( ( section_nr > = mem - > start_section_nr ) & &
( section_nr < = mem - > end_section_nr ) )
continue ;
mem = find_memory_block_hinted ( section , mem ) ;
if ( ! mem )
continue ;
2013-02-23 04:32:54 +04:00
ret = func ( mem , arg ) ;
2012-10-09 03:34:01 +04:00
if ( ret ) {
2013-02-23 04:32:54 +04:00
kobject_put ( & mem - > dev . kobj ) ;
return ret ;
2012-10-09 03:34:01 +04:00
}
}
if ( mem )
kobject_put ( & mem - > dev . kobj ) ;
2013-02-23 04:32:54 +04:00
return 0 ;
}
2013-05-08 02:29:49 +04:00
# ifdef CONFIG_MEMORY_HOTREMOVE
2013-11-13 03:07:20 +04:00
static int check_memblock_offlined_cb ( struct memory_block * mem , void * arg )
2013-02-23 04:32:54 +04:00
{
int ret = ! is_memblock_offlined ( mem ) ;
2013-04-30 02:08:49 +04:00
if ( unlikely ( ret ) ) {
phys_addr_t beginpa , endpa ;
beginpa = PFN_PHYS ( section_nr_to_pfn ( mem - > start_section_nr ) ) ;
endpa = PFN_PHYS ( section_nr_to_pfn ( mem - > end_section_nr + 1 ) ) - 1 ;
2013-02-23 04:32:54 +04:00
pr_warn ( " removing memory fails, because memory "
2013-04-30 02:08:49 +04:00
" [%pa-%pa] is onlined \n " ,
& beginpa , & endpa ) ;
}
2013-02-23 04:32:54 +04:00
return ret ;
}
2013-09-12 01:21:50 +04:00
static int check_cpu_on_node ( pg_data_t * pgdat )
2013-02-23 04:33:14 +04:00
{
int cpu ;
for_each_present_cpu ( cpu ) {
if ( cpu_to_node ( cpu ) = = pgdat - > node_id )
/*
* the cpu on this node isn ' t removed , and we can ' t
* offline this node .
*/
return - EBUSY ;
}
return 0 ;
}
2013-09-12 01:21:50 +04:00
static void unmap_cpu_on_node ( pg_data_t * pgdat )
2013-02-23 04:33:31 +04:00
{
# ifdef CONFIG_ACPI_NUMA
int cpu ;
for_each_possible_cpu ( cpu )
if ( cpu_to_node ( cpu ) = = pgdat - > node_id )
numa_clear_node ( cpu ) ;
# endif
}
2013-09-12 01:21:50 +04:00
static int check_and_unmap_cpu_on_node ( pg_data_t * pgdat )
2013-02-23 04:33:31 +04:00
{
2013-09-12 01:21:50 +04:00
int ret ;
2013-02-23 04:33:31 +04:00
2013-09-12 01:21:50 +04:00
ret = check_cpu_on_node ( pgdat ) ;
2013-02-23 04:33:31 +04:00
if ( ret )
return ret ;
/*
* the node will be offlined when we come here , so we can clear
* the cpu_to_node ( ) now .
*/
2013-09-12 01:21:50 +04:00
unmap_cpu_on_node ( pgdat ) ;
2013-02-23 04:33:31 +04:00
return 0 ;
}
2013-09-12 01:21:50 +04:00
/**
* try_offline_node
*
* Offline a node if all memory sections and cpus of the node are removed .
*
* NOTE : The caller must call lock_device_hotplug ( ) to serialize hotplug
* and online / offline operations before this call .
*/
2013-02-23 04:33:27 +04:00
void try_offline_node ( int nid )
2013-02-23 04:33:14 +04:00
{
2013-02-23 04:33:16 +04:00
pg_data_t * pgdat = NODE_DATA ( nid ) ;
unsigned long start_pfn = pgdat - > node_start_pfn ;
unsigned long end_pfn = start_pfn + pgdat - > node_spanned_pages ;
2013-02-23 04:33:14 +04:00
unsigned long pfn ;
2013-02-23 04:33:16 +04:00
struct page * pgdat_page = virt_to_page ( pgdat ) ;
int i ;
2013-02-23 04:33:14 +04:00
for ( pfn = start_pfn ; pfn < end_pfn ; pfn + = PAGES_PER_SECTION ) {
unsigned long section_nr = pfn_to_section_nr ( pfn ) ;
if ( ! present_section_nr ( section_nr ) )
continue ;
if ( pfn_to_nid ( pfn ) ! = nid )
continue ;
/*
* some memory sections of this node are not removed , and we
* can ' t offline node now .
*/
return ;
}
2013-09-12 01:21:50 +04:00
if ( check_and_unmap_cpu_on_node ( pgdat ) )
2013-02-23 04:33:14 +04:00
return ;
/*
* all memory / cpu of this node are removed , we can offline this
* node now .
*/
node_set_offline ( nid ) ;
unregister_one_node ( nid ) ;
2013-02-23 04:33:16 +04:00
if ( ! PageSlab ( pgdat_page ) & & ! PageCompound ( pgdat_page ) )
/* node data is allocated from boot memory */
return ;
/* free waittable in each zone */
for ( i = 0 ; i < MAX_NR_ZONES ; i + + ) {
struct zone * zone = pgdat - > node_zones + i ;
2013-03-23 02:04:50 +04:00
/*
* wait_table may be allocated from boot memory ,
* here only free if it ' s allocated by vmalloc .
*/
if ( is_vmalloc_addr ( zone - > wait_table ) )
2013-02-23 04:33:16 +04:00
vfree ( zone - > wait_table ) ;
}
/*
* Since there is no way to guarentee the address of pgdat / zone is not
* on stack of any kernel threads or used by other kernel objects
* without reference counting or other symchronizing method , do not
* reset node_data and free pgdat here . Just reset it to 0 and reuse
* the memory when the node is online again .
*/
memset ( pgdat , 0 , sizeof ( * pgdat ) ) ;
2013-02-23 04:33:14 +04:00
}
2013-02-23 04:33:27 +04:00
EXPORT_SYMBOL ( try_offline_node ) ;
2013-02-23 04:33:14 +04:00
2013-09-12 01:21:50 +04:00
/**
* remove_memory
*
* NOTE : The caller must call lock_device_hotplug ( ) to serialize hotplug
* and online / offline operations before this call , as required by
* try_offline_node ( ) .
*/
2013-05-27 14:58:46 +04:00
void __ref remove_memory ( int nid , u64 start , u64 size )
2013-02-23 04:32:54 +04:00
{
2013-05-27 14:58:46 +04:00
int ret ;
memory-hotplug: try to offline the memory twice to avoid dependence
memory can't be offlined when CONFIG_MEMCG is selected. For example:
there is a memory device on node 1. The address range is [1G, 1.5G).
You will find 4 new directories memory8, memory9, memory10, and memory11
under the directory /sys/devices/system/memory/.
If CONFIG_MEMCG is selected, we will allocate memory to store page
cgroup when we online pages. When we online memory8, the memory stored
page cgroup is not provided by this memory device. But when we online
memory9, the memory stored page cgroup may be provided by memory8. So
we can't offline memory8 now. We should offline the memory in the
reversed order.
When the memory device is hotremoved, we will auto offline memory
provided by this memory device. But we don't know which memory is
onlined first, so offlining memory may fail. In such case, iterate
twice to offline the memory. 1st iterate: offline every non primary
memory block. 2nd iterate: offline primary (i.e. first added) memory
block.
This idea is suggested by KOSAKI Motohiro.
Signed-off-by: Wen Congyang <wency@cn.fujitsu.com>
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Jianguo Wu <wujianguo@huawei.com>
Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Wu Jianguo <wujianguo@huawei.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-23 04:32:50 +04:00
2013-09-12 01:21:49 +04:00
BUG_ON ( check_hotplug_memory_range ( start , size ) ) ;
2013-02-23 04:32:52 +04:00
lock_memory_hotplug ( ) ;
/*
2013-05-27 14:58:46 +04:00
* All memory blocks must be offlined before removing memory . Check
* whether all memory blocks in question are offline and trigger a BUG ( )
* if this is not the case .
2013-02-23 04:32:52 +04:00
*/
2013-05-27 14:58:46 +04:00
ret = walk_memory_range ( PFN_DOWN ( start ) , PFN_UP ( start + size - 1 ) , NULL ,
2013-11-13 03:07:20 +04:00
check_memblock_offlined_cb ) ;
2013-02-23 04:32:54 +04:00
if ( ret ) {
unlock_memory_hotplug ( ) ;
2013-05-27 14:58:46 +04:00
BUG ( ) ;
2013-02-23 04:32:52 +04:00
}
2013-02-23 04:32:56 +04:00
/* remove memmap entry */
firmware_map_remove ( start , start + size , " System RAM " ) ;
2013-02-23 04:32:58 +04:00
arch_remove_memory ( start , size ) ;
2013-02-23 04:33:14 +04:00
try_offline_node ( nid ) ;
2013-02-23 04:32:52 +04:00
unlock_memory_hotplug ( ) ;
2008-10-19 07:25:58 +04:00
}
EXPORT_SYMBOL_GPL ( remove_memory ) ;
2013-06-02 00:24:07 +04:00
# endif /* CONFIG_MEMORY_HOTREMOVE */