mm: convert free_transhuge_folio() to folio_undo_large_rmappable()
Indirect calls are expensive, thanks to Spectre. Test for TRANSHUGE_PAGE_DTOR and destroy the folio appropriately. Move the free_compound_page() call into destroy_large_folio() to simplify later patches. Link: https://lkml.kernel.org/r/20230816151201.3655946-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com> Cc: Yanteng Si <siyanteng@loongson.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
454a00c40a
commit
8dc4a8f1e0
@ -141,8 +141,6 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||
unsigned long len, unsigned long pgoff, unsigned long flags);
|
||||
|
||||
void prep_transhuge_page(struct page *page);
|
||||
void free_transhuge_page(struct page *page);
|
||||
|
||||
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
||||
int split_huge_page_to_list(struct page *page, struct list_head *list);
|
||||
static inline int split_huge_page(struct page *page)
|
||||
|
@ -1253,9 +1253,7 @@ enum compound_dtor_id {
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
HUGETLB_PAGE_DTOR,
|
||||
#endif
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
TRANSHUGE_PAGE_DTOR,
|
||||
#endif
|
||||
NR_COMPOUND_DTORS,
|
||||
};
|
||||
|
||||
|
@ -2776,10 +2776,9 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void free_transhuge_page(struct page *page)
|
||||
void folio_undo_large_rmappable(struct folio *folio)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
|
||||
struct deferred_split *ds_queue;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
@ -2787,15 +2786,16 @@ void free_transhuge_page(struct page *page)
|
||||
* deferred_list. If folio is not in deferred_list, it's safe
|
||||
* to check without acquiring the split_queue_lock.
|
||||
*/
|
||||
if (data_race(!list_empty(&folio->_deferred_list))) {
|
||||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||
if (!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
list_del(&folio->_deferred_list);
|
||||
}
|
||||
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
|
||||
if (data_race(list_empty(&folio->_deferred_list)))
|
||||
return;
|
||||
|
||||
ds_queue = get_deferred_split_queue(folio);
|
||||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||
if (!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
list_del(&folio->_deferred_list);
|
||||
}
|
||||
free_compound_page(page);
|
||||
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
|
||||
}
|
||||
|
||||
void deferred_split_folio(struct folio *folio)
|
||||
|
@ -413,6 +413,8 @@ static inline void folio_set_order(struct folio *folio, unsigned int order)
|
||||
#endif
|
||||
}
|
||||
|
||||
void folio_undo_large_rmappable(struct folio *folio);
|
||||
|
||||
static inline void prep_compound_head(struct page *page, unsigned int order)
|
||||
{
|
||||
struct folio *folio = (struct folio *)page;
|
||||
|
@ -287,9 +287,6 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
|
||||
static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
|
||||
[NULL_COMPOUND_DTOR] = NULL,
|
||||
[COMPOUND_PAGE_DTOR] = free_compound_page,
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
|
||||
#endif
|
||||
};
|
||||
|
||||
int min_free_kbytes = 1024;
|
||||
@ -614,6 +611,12 @@ void destroy_large_folio(struct folio *folio)
|
||||
return;
|
||||
}
|
||||
|
||||
if (folio_test_transhuge(folio) && dtor == TRANSHUGE_PAGE_DTOR) {
|
||||
folio_undo_large_rmappable(folio);
|
||||
free_compound_page(&folio->page);
|
||||
return;
|
||||
}
|
||||
|
||||
VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio);
|
||||
compound_page_dtors[dtor](&folio->page);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user