filemap: Add folio_put_wait_locked()
Convert all three callers of put_and_wait_on_page_locked() to folio_put_wait_locked(). This shrinks the kernel overall by 19 bytes. filemap_update_page() shrinks by 19 bytes while __migration_entry_wait() is unchanged. folio_put_wait_locked() is 14 bytes smaller than put_and_wait_on_page_locked(), but pmd_migration_entry_wait() grows by 14 bytes. It removes the assumption from pmd_migration_entry_wait() that pages cannot be larger than a PMD (which is true today, but may be interesting to explore in the future). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
parent
5bf34d7c7f
commit
9f2b04a25a
@ -868,7 +868,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
|
||||
return folio_wait_locked_killable(page_folio(page));
|
||||
}
|
||||
|
||||
int put_and_wait_on_page_locked(struct page *page, int state);
|
||||
int folio_put_wait_locked(struct folio *folio, int state);
|
||||
void wait_on_page_writeback(struct page *page);
|
||||
void folio_wait_writeback(struct folio *folio);
|
||||
int folio_wait_writeback_killable(struct folio *folio);
|
||||
|
27
mm/filemap.c
27
mm/filemap.c
@ -1259,10 +1259,10 @@ enum behavior {
|
||||
* __folio_lock() waiting on then setting PG_locked.
|
||||
*/
|
||||
SHARED, /* Hold ref to page and check the bit when woken, like
|
||||
* wait_on_page_writeback() waiting on PG_writeback.
|
||||
* folio_wait_writeback() waiting on PG_writeback.
|
||||
*/
|
||||
DROP, /* Drop ref to page before wait, no check when woken,
|
||||
* like put_and_wait_on_page_locked() on PG_locked.
|
||||
* like folio_put_wait_locked() on PG_locked.
|
||||
*/
|
||||
};
|
||||
|
||||
@ -1439,22 +1439,21 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr)
|
||||
EXPORT_SYMBOL(folio_wait_bit_killable);
|
||||
|
||||
/**
|
||||
* put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
|
||||
* @page: The page to wait for.
|
||||
* folio_put_wait_locked - Drop a reference and wait for it to be unlocked
|
||||
* @folio: The folio to wait for.
|
||||
* @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
|
||||
*
|
||||
* The caller should hold a reference on @page. They expect the page to
|
||||
* The caller should hold a reference on @folio. They expect the page to
|
||||
* become unlocked relatively soon, but do not wish to hold up migration
|
||||
* (for example) by holding the reference while waiting for the page to
|
||||
* (for example) by holding the reference while waiting for the folio to
|
||||
* come unlocked. After this function returns, the caller should not
|
||||
* dereference @page.
|
||||
* dereference @folio.
|
||||
*
|
||||
* Return: 0 if the page was unlocked or -EINTR if interrupted by a signal.
|
||||
* Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
|
||||
*/
|
||||
int put_and_wait_on_page_locked(struct page *page, int state)
|
||||
int folio_put_wait_locked(struct folio *folio, int state)
|
||||
{
|
||||
return folio_wait_bit_common(page_folio(page), PG_locked, state,
|
||||
DROP);
|
||||
return folio_wait_bit_common(folio, PG_locked, state, DROP);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2447,7 +2446,11 @@ static int filemap_update_page(struct kiocb *iocb,
|
||||
goto unlock_mapping;
|
||||
if (!(iocb->ki_flags & IOCB_WAITQ)) {
|
||||
filemap_invalidate_unlock_shared(mapping);
|
||||
put_and_wait_on_page_locked(&folio->page, TASK_KILLABLE);
|
||||
/*
|
||||
* This is where we usually end up waiting for a
|
||||
* previously submitted readahead to finish.
|
||||
*/
|
||||
folio_put_wait_locked(folio, TASK_KILLABLE);
|
||||
return AOP_TRUNCATED_PAGE;
|
||||
}
|
||||
error = __folio_lock_async(folio, iocb->ki_waitq);
|
||||
|
21
mm/migrate.c
21
mm/migrate.c
@ -291,7 +291,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
|
||||
{
|
||||
pte_t pte;
|
||||
swp_entry_t entry;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
spin_lock(ptl);
|
||||
pte = *ptep;
|
||||
@ -302,18 +302,17 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
|
||||
if (!is_migration_entry(entry))
|
||||
goto out;
|
||||
|
||||
page = pfn_swap_entry_to_page(entry);
|
||||
page = compound_head(page);
|
||||
folio = page_folio(pfn_swap_entry_to_page(entry));
|
||||
|
||||
/*
|
||||
* Once page cache replacement of page migration started, page_count
|
||||
* is zero; but we must not call put_and_wait_on_page_locked() without
|
||||
* a ref. Use get_page_unless_zero(), and just fault again if it fails.
|
||||
* is zero; but we must not call folio_put_wait_locked() without
|
||||
* a ref. Use folio_try_get(), and just fault again if it fails.
|
||||
*/
|
||||
if (!get_page_unless_zero(page))
|
||||
if (!folio_try_get(folio))
|
||||
goto out;
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||
folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
|
||||
return;
|
||||
out:
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
@ -338,16 +337,16 @@ void migration_entry_wait_huge(struct vm_area_struct *vma,
|
||||
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
if (!is_pmd_migration_entry(*pmd))
|
||||
goto unlock;
|
||||
page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
|
||||
if (!get_page_unless_zero(page))
|
||||
folio = page_folio(pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd)));
|
||||
if (!folio_try_get(folio))
|
||||
goto unlock;
|
||||
spin_unlock(ptl);
|
||||
put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
|
||||
folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
|
||||
return;
|
||||
unlock:
|
||||
spin_unlock(ptl);
|
||||
|
Loading…
x
Reference in New Issue
Block a user