mm: move folio_set_compound_order() to mm/internal.h
folio_set_compound_order() is moved to an mm-internal location so external
folio users cannot misuse this function. Change the name of the function
to folio_set_order() and use WARN_ON_ONCE() rather than BUG_ON. Also,
handle the case if a non-large folio is passed and add clarifying comments
to the function.
Link: https://lore.kernel.org/lkml/20221207223731.32784-1-sidhartha.kumar@oracle.com/T/
Link: https://lkml.kernel.org/r/20221215061757.223440-1-sidhartha.kumar@oracle.com
Fixes: 9fd330582b
("mm: add folio dtor and order setter functions")
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
Suggested-by: Muchun Song <songmuchun@bytedance.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Suggested-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
1301f93134
commit
04a42e72d7
@ -1019,22 +1019,6 @@ static inline void set_compound_order(struct page *page, unsigned int order)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* folio_set_compound_order is generally passed a non-zero order to
|
||||
* initialize a large folio. However, hugetlb code abuses this by
|
||||
* passing in zero when 'dissolving' a large folio.
|
||||
*/
|
||||
static inline void folio_set_compound_order(struct folio *folio,
|
||||
unsigned int order)
|
||||
{
|
||||
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
folio->_folio_nr_pages = order ? 1U << order : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Returns the number of pages in this potentially compound page. */
|
||||
static inline unsigned long compound_nr(struct page *page)
|
||||
{
|
||||
|
@ -1492,7 +1492,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
|
||||
set_page_refcounted(p);
|
||||
}
|
||||
|
||||
folio_set_compound_order(folio, 0);
|
||||
folio_set_order(folio, 0);
|
||||
__folio_clear_head(folio);
|
||||
}
|
||||
|
||||
@ -1956,7 +1956,7 @@ static bool __prep_compound_gigantic_folio(struct folio *folio,
|
||||
__folio_clear_reserved(folio);
|
||||
__folio_set_head(folio);
|
||||
/* we rely on prep_new_hugetlb_folio to set the destructor */
|
||||
folio_set_compound_order(folio, order);
|
||||
folio_set_order(folio, order);
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
p = folio_page(folio, i);
|
||||
|
||||
@ -2020,7 +2020,7 @@ out_error:
|
||||
p = folio_page(folio, j);
|
||||
__ClearPageReserved(p);
|
||||
}
|
||||
folio_set_compound_order(folio, 0);
|
||||
folio_set_order(folio, 0);
|
||||
__folio_clear_head(folio);
|
||||
return false;
|
||||
}
|
||||
|
@ -378,6 +378,25 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align,
|
||||
int split_free_page(struct page *free_page,
|
||||
unsigned int order, unsigned long split_pfn_offset);
|
||||
|
||||
/*
|
||||
* This will have no effect, other than possibly generating a warning, if the
|
||||
* caller passes in a non-large folio.
|
||||
*/
|
||||
static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
{
|
||||
if (WARN_ON_ONCE(!folio_test_large(folio)))
|
||||
return;
|
||||
|
||||
folio->_folio_order = order;
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* When hugetlb dissolves a folio, we need to clear the tail
|
||||
* page, rather than setting nr_pages to 1.
|
||||
*/
|
||||
folio->_folio_nr_pages = order ? 1U << order : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user