mm: optimise put_pages_list()
Instead of calling put_page() one page at a time, pop pages off the list if their refcount was too high and pass the remainder to put_unref_page_list(). This should be a speed improvement, but I have no measurements to support that. Current callers do not care about performance, but I hope to add some which do. Link: https://lkml.kernel.org/r/20211007192138.561673-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
642929a2de
commit
988c69f1bc
23
mm/swap.c
23
mm/swap.c
@ -134,18 +134,27 @@ EXPORT_SYMBOL(__put_page);
|
||||
* put_pages_list() - release a list of pages
|
||||
* @pages: list of pages threaded on page->lru
|
||||
*
|
||||
* Release a list of pages which are strung together on page.lru. Currently
|
||||
* used by read_cache_pages() and related error recovery code.
|
||||
* Release a list of pages which are strung together on page.lru.
|
||||
*/
|
||||
void put_pages_list(struct list_head *pages)
|
||||
{
|
||||
while (!list_empty(pages)) {
|
||||
struct page *victim;
|
||||
struct page *page, *next;
|
||||
|
||||
victim = lru_to_page(pages);
|
||||
list_del(&victim->lru);
|
||||
put_page(victim);
|
||||
list_for_each_entry_safe(page, next, pages, lru) {
|
||||
if (!put_page_testzero(page)) {
|
||||
list_del(&page->lru);
|
||||
continue;
|
||||
}
|
||||
if (PageHead(page)) {
|
||||
list_del(&page->lru);
|
||||
__put_compound_page(page);
|
||||
continue;
|
||||
}
|
||||
/* Cannot be PageLRU because it's passed to us using the lru */
|
||||
__ClearPageWaiters(page);
|
||||
}
|
||||
|
||||
free_unref_page_list(pages);
|
||||
}
|
||||
EXPORT_SYMBOL(put_pages_list);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user