mm, hugetlb: protect reserved pages when soft offlining a hugepage
Don't use the reserve pool when soft offlining a hugepage. Check we have free pages outside the reserve pool before we dequeue the huge page. Otherwise, we can steal other's reserve page. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Reviewed-by: Davidlohr Bueso <davidlohr@hp.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Hillf Danton <dhillf@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0f1cfe9d0d
commit
4ef9184804
@ -955,10 +955,11 @@ static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
|
||||
*/
|
||||
struct page *alloc_huge_page_node(struct hstate *h, int nid)
|
||||
{
|
||||
struct page *page;
|
||||
struct page *page = NULL;
|
||||
|
||||
spin_lock(&hugetlb_lock);
|
||||
page = dequeue_huge_page_node(h, nid);
|
||||
if (h->free_huge_pages - h->resv_huge_pages > 0)
|
||||
page = dequeue_huge_page_node(h, nid);
|
||||
spin_unlock(&hugetlb_lock);
|
||||
|
||||
if (!page)
|
||||
|
Loading…
Reference in New Issue
Block a user