hugetlb: convert remove_pool_huge_page() to remove_pool_hugetlb_folio()
Convert the callers to expect a folio and remove the unnecesary conversion back to a struct page. Link: https://lkml.kernel.org/r/20230824141325.2704553-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
04bbfd844b
commit
d5b43e9683
29
mm/hugetlb.c
29
mm/hugetlb.c
@ -1446,7 +1446,7 @@ static int hstate_next_node_to_alloc(struct hstate *h,
|
||||
}
|
||||
|
||||
/*
|
||||
* helper for remove_pool_huge_page() - return the previously saved
|
||||
* helper for remove_pool_hugetlb_folio() - return the previously saved
|
||||
* node ["this node"] from which to free a huge page. Advance the
|
||||
* next node id whether or not we find a free huge page to free so
|
||||
* that the next attempt to free addresses the next node.
|
||||
@ -2222,9 +2222,8 @@ static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
|
||||
* an additional call to free the page to low level allocators.
|
||||
* Called with hugetlb_lock locked.
|
||||
*/
|
||||
static struct page *remove_pool_huge_page(struct hstate *h,
|
||||
nodemask_t *nodes_allowed,
|
||||
bool acct_surplus)
|
||||
static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
|
||||
nodemask_t *nodes_allowed, bool acct_surplus)
|
||||
{
|
||||
int nr_nodes, node;
|
||||
struct folio *folio = NULL;
|
||||
@ -2244,7 +2243,7 @@ static struct page *remove_pool_huge_page(struct hstate *h,
|
||||
}
|
||||
}
|
||||
|
||||
return &folio->page;
|
||||
return folio;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2598,7 +2597,6 @@ static void return_unused_surplus_pages(struct hstate *h,
|
||||
unsigned long unused_resv_pages)
|
||||
{
|
||||
unsigned long nr_pages;
|
||||
struct page *page;
|
||||
LIST_HEAD(page_list);
|
||||
|
||||
lockdep_assert_held(&hugetlb_lock);
|
||||
@ -2619,15 +2617,17 @@ static void return_unused_surplus_pages(struct hstate *h,
|
||||
* evenly across all nodes with memory. Iterate across these nodes
|
||||
* until we can no longer free unreserved surplus pages. This occurs
|
||||
* when the nodes with surplus pages have no free pages.
|
||||
* remove_pool_huge_page() will balance the freed pages across the
|
||||
* remove_pool_hugetlb_folio() will balance the freed pages across the
|
||||
* on-line nodes with memory and will handle the hstate accounting.
|
||||
*/
|
||||
while (nr_pages--) {
|
||||
page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1);
|
||||
if (!page)
|
||||
struct folio *folio;
|
||||
|
||||
folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
|
||||
if (!folio)
|
||||
goto out;
|
||||
|
||||
list_add(&page->lru, &page_list);
|
||||
list_add(&folio->lru, &page_list);
|
||||
}
|
||||
|
||||
out:
|
||||
@ -3472,7 +3472,6 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||
nodemask_t *nodes_allowed)
|
||||
{
|
||||
unsigned long min_count, ret;
|
||||
struct page *page;
|
||||
LIST_HEAD(page_list);
|
||||
NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
|
||||
|
||||
@ -3594,11 +3593,13 @@ static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
|
||||
* Collect pages to be removed on list without dropping lock
|
||||
*/
|
||||
while (min_count < persistent_huge_pages(h)) {
|
||||
page = remove_pool_huge_page(h, nodes_allowed, 0);
|
||||
if (!page)
|
||||
struct folio *folio;
|
||||
|
||||
folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
|
||||
if (!folio)
|
||||
break;
|
||||
|
||||
list_add(&page->lru, &page_list);
|
||||
list_add(&folio->lru, &page_list);
|
||||
}
|
||||
/* free the pages after dropping lock */
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
|
Loading…
x
Reference in New Issue
Block a user