mm/vmscan: convert isolate_lru_pages() to use a folio
Remove a few hidden calls to compound_head, saving 279 bytes of text. Link: https://lkml.kernel.org/r/20220617154248.700416-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
akpm
parent
b8cecb9376
commit
166e3d3227
66
mm/vmscan.c
66
mm/vmscan.c
@@ -160,17 +160,17 @@ struct scan_control {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#ifdef ARCH_HAS_PREFETCHW
|
#ifdef ARCH_HAS_PREFETCHW
|
||||||
#define prefetchw_prev_lru_page(_page, _base, _field) \
|
#define prefetchw_prev_lru_folio(_folio, _base, _field) \
|
||||||
do { \
|
do { \
|
||||||
if ((_page)->lru.prev != _base) { \
|
if ((_folio)->lru.prev != _base) { \
|
||||||
struct page *prev; \
|
struct folio *prev; \
|
||||||
\
|
\
|
||||||
prev = lru_to_page(&(_page->lru)); \
|
prev = lru_to_folio(&(_folio->lru)); \
|
||||||
prefetchw(&prev->_field); \
|
prefetchw(&prev->_field); \
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#else
|
#else
|
||||||
#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
|
#define prefetchw_prev_lru_folio(_folio, _base, _field) do { } while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2139,72 +2139,72 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
|
|||||||
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
|
unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
|
||||||
unsigned long skipped = 0;
|
unsigned long skipped = 0;
|
||||||
unsigned long scan, total_scan, nr_pages;
|
unsigned long scan, total_scan, nr_pages;
|
||||||
LIST_HEAD(pages_skipped);
|
LIST_HEAD(folios_skipped);
|
||||||
|
|
||||||
total_scan = 0;
|
total_scan = 0;
|
||||||
scan = 0;
|
scan = 0;
|
||||||
while (scan < nr_to_scan && !list_empty(src)) {
|
while (scan < nr_to_scan && !list_empty(src)) {
|
||||||
struct list_head *move_to = src;
|
struct list_head *move_to = src;
|
||||||
struct page *page;
|
struct folio *folio;
|
||||||
|
|
||||||
page = lru_to_page(src);
|
folio = lru_to_folio(src);
|
||||||
prefetchw_prev_lru_page(page, src, flags);
|
prefetchw_prev_lru_folio(folio, src, flags);
|
||||||
|
|
||||||
nr_pages = compound_nr(page);
|
nr_pages = folio_nr_pages(folio);
|
||||||
total_scan += nr_pages;
|
total_scan += nr_pages;
|
||||||
|
|
||||||
if (page_zonenum(page) > sc->reclaim_idx) {
|
if (folio_zonenum(folio) > sc->reclaim_idx) {
|
||||||
nr_skipped[page_zonenum(page)] += nr_pages;
|
nr_skipped[folio_zonenum(folio)] += nr_pages;
|
||||||
move_to = &pages_skipped;
|
move_to = &folios_skipped;
|
||||||
goto move;
|
goto move;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not count skipped pages because that makes the function
|
* Do not count skipped folios because that makes the function
|
||||||
* return with no isolated pages if the LRU mostly contains
|
* return with no isolated folios if the LRU mostly contains
|
||||||
* ineligible pages. This causes the VM to not reclaim any
|
* ineligible folios. This causes the VM to not reclaim any
|
||||||
* pages, triggering a premature OOM.
|
* folios, triggering a premature OOM.
|
||||||
* Account all tail pages of THP.
|
* Account all pages in a folio.
|
||||||
*/
|
*/
|
||||||
scan += nr_pages;
|
scan += nr_pages;
|
||||||
|
|
||||||
if (!PageLRU(page))
|
if (!folio_test_lru(folio))
|
||||||
goto move;
|
goto move;
|
||||||
if (!sc->may_unmap && page_mapped(page))
|
if (!sc->may_unmap && folio_mapped(folio))
|
||||||
goto move;
|
goto move;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Be careful not to clear PageLRU until after we're
|
* Be careful not to clear the lru flag until after we're
|
||||||
* sure the page is not being freed elsewhere -- the
|
* sure the folio is not being freed elsewhere -- the
|
||||||
* page release code relies on it.
|
* folio release code relies on it.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!get_page_unless_zero(page)))
|
if (unlikely(!folio_try_get(folio)))
|
||||||
goto move;
|
goto move;
|
||||||
|
|
||||||
if (!TestClearPageLRU(page)) {
|
if (!folio_test_clear_lru(folio)) {
|
||||||
/* Another thread is already isolating this page */
|
/* Another thread is already isolating this folio */
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
goto move;
|
goto move;
|
||||||
}
|
}
|
||||||
|
|
||||||
nr_taken += nr_pages;
|
nr_taken += nr_pages;
|
||||||
nr_zone_taken[page_zonenum(page)] += nr_pages;
|
nr_zone_taken[folio_zonenum(folio)] += nr_pages;
|
||||||
move_to = dst;
|
move_to = dst;
|
||||||
move:
|
move:
|
||||||
list_move(&page->lru, move_to);
|
list_move(&folio->lru, move_to);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Splice any skipped pages to the start of the LRU list. Note that
|
* Splice any skipped folios to the start of the LRU list. Note that
|
||||||
* this disrupts the LRU order when reclaiming for lower zones but
|
* this disrupts the LRU order when reclaiming for lower zones but
|
||||||
* we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
|
* we cannot splice to the tail. If we did then the SWAP_CLUSTER_MAX
|
||||||
* scanning would soon rescan the same pages to skip and waste lots
|
* scanning would soon rescan the same folios to skip and waste lots
|
||||||
* of cpu cycles.
|
* of cpu cycles.
|
||||||
*/
|
*/
|
||||||
if (!list_empty(&pages_skipped)) {
|
if (!list_empty(&folios_skipped)) {
|
||||||
int zid;
|
int zid;
|
||||||
|
|
||||||
list_splice(&pages_skipped, src);
|
list_splice(&folios_skipped, src);
|
||||||
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
||||||
if (!nr_skipped[zid])
|
if (!nr_skipped[zid])
|
||||||
continue;
|
continue;
|
||||||
|
Reference in New Issue
Block a user