mm: code cleanup for MADV_FREE
Some comments for MADV_FREE is revised and added to help people understand the MADV_FREE code, especially the page flag, PG_swapbacked. This makes page_is_file_cache() isn't consistent with its comments. So the function is renamed to page_is_file_lru() to make them consistent again. All these are put in one patch as one logical change. Suggested-by: David Hildenbrand <david@redhat.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: David Rientjes <rientjes@google.com> Signed-off-by: "Huang, Ying" <ying.huang@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@kernel.org> Acked-by: Pankaj Gupta <pankaj.gupta.linux@gmail.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@surriel.com> Link: http://lkml.kernel.org/r/20200317100342.2730705-1-ying.huang@intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7a9547fd4e
commit
9de4f22a60
@ -6,19 +6,20 @@
|
|||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* page_is_file_cache - should the page be on a file LRU or anon LRU?
|
* page_is_file_lru - should the page be on a file LRU or anon LRU?
|
||||||
* @page: the page to test
|
* @page: the page to test
|
||||||
*
|
*
|
||||||
* Returns 1 if @page is page cache page backed by a regular filesystem,
|
* Returns 1 if @page is a regular filesystem backed page cache page or a lazily
|
||||||
* or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
|
* freed anonymous page (e.g. via MADV_FREE). Returns 0 if @page is a normal
|
||||||
* Used by functions that manipulate the LRU lists, to sort a page
|
* anonymous page, a tmpfs page or otherwise ram or swap backed page. Used by
|
||||||
* onto the right LRU list.
|
* functions that manipulate the LRU lists, to sort a page onto the right LRU
|
||||||
|
* list.
|
||||||
*
|
*
|
||||||
* We would like to get this info without a page flag, but the state
|
* We would like to get this info without a page flag, but the state
|
||||||
* needs to survive until the page is last deleted from the LRU, which
|
* needs to survive until the page is last deleted from the LRU, which
|
||||||
* could be as far down as __page_cache_release.
|
* could be as far down as __page_cache_release.
|
||||||
*/
|
*/
|
||||||
static inline int page_is_file_cache(struct page *page)
|
static inline int page_is_file_lru(struct page *page)
|
||||||
{
|
{
|
||||||
return !PageSwapBacked(page);
|
return !PageSwapBacked(page);
|
||||||
}
|
}
|
||||||
@ -75,7 +76,7 @@ static __always_inline void del_page_from_lru_list(struct page *page,
|
|||||||
*/
|
*/
|
||||||
static inline enum lru_list page_lru_base_type(struct page *page)
|
static inline enum lru_list page_lru_base_type(struct page *page)
|
||||||
{
|
{
|
||||||
if (page_is_file_cache(page))
|
if (page_is_file_lru(page))
|
||||||
return LRU_INACTIVE_FILE;
|
return LRU_INACTIVE_FILE;
|
||||||
return LRU_INACTIVE_ANON;
|
return LRU_INACTIVE_ANON;
|
||||||
}
|
}
|
||||||
|
@ -63,6 +63,11 @@
|
|||||||
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
|
* page_waitqueue(page) is a wait queue of all tasks waiting for the page
|
||||||
* to become unlocked.
|
* to become unlocked.
|
||||||
*
|
*
|
||||||
|
* PG_swapbacked is set when a page uses swap as a backing storage. This are
|
||||||
|
* usually PageAnon or shmem pages but please note that even anonymous pages
|
||||||
|
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
|
||||||
|
* a result of MADV_FREE).
|
||||||
|
*
|
||||||
* PG_uptodate tells whether the page's contents is valid. When a read
|
* PG_uptodate tells whether the page's contents is valid. When a read
|
||||||
* completes, the page becomes uptodate, unless a disk I/O error happened.
|
* completes, the page becomes uptodate, unless a disk I/O error happened.
|
||||||
*
|
*
|
||||||
|
@ -323,7 +323,7 @@ TRACE_EVENT(mm_vmscan_writepage,
|
|||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->pfn = page_to_pfn(page);
|
__entry->pfn = page_to_pfn(page);
|
||||||
__entry->reclaim_flags = trace_reclaim_flags(
|
__entry->reclaim_flags = trace_reclaim_flags(
|
||||||
page_is_file_cache(page));
|
page_is_file_lru(page));
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("page=%p pfn=%lu flags=%s",
|
TP_printk("page=%p pfn=%lu flags=%s",
|
||||||
|
@ -989,7 +989,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
|
|||||||
/* Successfully isolated */
|
/* Successfully isolated */
|
||||||
del_page_from_lru_list(page, lruvec, page_lru(page));
|
del_page_from_lru_list(page, lruvec, page_lru(page));
|
||||||
mod_node_page_state(page_pgdat(page),
|
mod_node_page_state(page_pgdat(page),
|
||||||
NR_ISOLATED_ANON + page_is_file_cache(page),
|
NR_ISOLATED_ANON + page_is_file_lru(page),
|
||||||
hpage_nr_pages(page));
|
hpage_nr_pages(page));
|
||||||
|
|
||||||
isolate_success:
|
isolate_success:
|
||||||
|
2
mm/gup.c
2
mm/gup.c
@ -1677,7 +1677,7 @@ check_again:
|
|||||||
list_add_tail(&head->lru, &cma_page_list);
|
list_add_tail(&head->lru, &cma_page_list);
|
||||||
mod_node_page_state(page_pgdat(head),
|
mod_node_page_state(page_pgdat(head),
|
||||||
NR_ISOLATED_ANON +
|
NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(head),
|
page_is_file_lru(head),
|
||||||
hpage_nr_pages(head));
|
hpage_nr_pages(head));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -511,7 +511,7 @@ void __khugepaged_exit(struct mm_struct *mm)
|
|||||||
|
|
||||||
static void release_pte_page(struct page *page)
|
static void release_pte_page(struct page *page)
|
||||||
{
|
{
|
||||||
dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
|
dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_lru(page));
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
putback_lru_page(page);
|
putback_lru_page(page);
|
||||||
}
|
}
|
||||||
@ -611,7 +611,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
inc_node_page_state(page,
|
inc_node_page_state(page,
|
||||||
NR_ISOLATED_ANON + page_is_file_cache(page));
|
NR_ISOLATED_ANON + page_is_file_lru(page));
|
||||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||||
|
|
||||||
|
@ -1810,7 +1810,7 @@ static int __soft_offline_page(struct page *page, int flags)
|
|||||||
*/
|
*/
|
||||||
if (!__PageMovable(page))
|
if (!__PageMovable(page))
|
||||||
inc_node_page_state(page, NR_ISOLATED_ANON +
|
inc_node_page_state(page, NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page));
|
page_is_file_lru(page));
|
||||||
list_add(&page->lru, &pagelist);
|
list_add(&page->lru, &pagelist);
|
||||||
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
|
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
|
||||||
MIGRATE_SYNC, MR_MEMORY_FAILURE);
|
MIGRATE_SYNC, MR_MEMORY_FAILURE);
|
||||||
|
@ -1317,7 +1317,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|||||||
list_add_tail(&page->lru, &source);
|
list_add_tail(&page->lru, &source);
|
||||||
if (!__PageMovable(page))
|
if (!__PageMovable(page))
|
||||||
inc_node_page_state(page, NR_ISOLATED_ANON +
|
inc_node_page_state(page, NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page));
|
page_is_file_lru(page));
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
pr_warn("failed to isolate pfn %lx\n", pfn);
|
pr_warn("failed to isolate pfn %lx\n", pfn);
|
||||||
|
@ -1022,7 +1022,7 @@ static int migrate_page_add(struct page *page, struct list_head *pagelist,
|
|||||||
if (!isolate_lru_page(head)) {
|
if (!isolate_lru_page(head)) {
|
||||||
list_add_tail(&head->lru, pagelist);
|
list_add_tail(&head->lru, pagelist);
|
||||||
mod_node_page_state(page_pgdat(head),
|
mod_node_page_state(page_pgdat(head),
|
||||||
NR_ISOLATED_ANON + page_is_file_cache(head),
|
NR_ISOLATED_ANON + page_is_file_lru(head),
|
||||||
hpage_nr_pages(head));
|
hpage_nr_pages(head));
|
||||||
} else if (flags & MPOL_MF_STRICT) {
|
} else if (flags & MPOL_MF_STRICT) {
|
||||||
/*
|
/*
|
||||||
|
16
mm/migrate.c
16
mm/migrate.c
@ -193,7 +193,7 @@ void putback_movable_pages(struct list_head *l)
|
|||||||
put_page(page);
|
put_page(page);
|
||||||
} else {
|
} else {
|
||||||
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
|
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page), -hpage_nr_pages(page));
|
page_is_file_lru(page), -hpage_nr_pages(page));
|
||||||
putback_lru_page(page);
|
putback_lru_page(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1219,7 +1219,7 @@ out:
|
|||||||
*/
|
*/
|
||||||
if (likely(!__PageMovable(page)))
|
if (likely(!__PageMovable(page)))
|
||||||
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
|
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page), -hpage_nr_pages(page));
|
page_is_file_lru(page), -hpage_nr_pages(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1592,7 +1592,7 @@ static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
|
|||||||
err = 1;
|
err = 1;
|
||||||
list_add_tail(&head->lru, pagelist);
|
list_add_tail(&head->lru, pagelist);
|
||||||
mod_node_page_state(page_pgdat(head),
|
mod_node_page_state(page_pgdat(head),
|
||||||
NR_ISOLATED_ANON + page_is_file_cache(head),
|
NR_ISOLATED_ANON + page_is_file_lru(head),
|
||||||
hpage_nr_pages(head));
|
hpage_nr_pages(head));
|
||||||
}
|
}
|
||||||
out_putpage:
|
out_putpage:
|
||||||
@ -1955,7 +1955,7 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
page_lru = page_is_file_cache(page);
|
page_lru = page_is_file_lru(page);
|
||||||
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
|
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
|
||||||
hpage_nr_pages(page));
|
hpage_nr_pages(page));
|
||||||
|
|
||||||
@ -1991,7 +1991,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||||||
* Don't migrate file pages that are mapped in multiple processes
|
* Don't migrate file pages that are mapped in multiple processes
|
||||||
* with execute permissions as they are probably shared libraries.
|
* with execute permissions as they are probably shared libraries.
|
||||||
*/
|
*/
|
||||||
if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
|
if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
|
||||||
(vma->vm_flags & VM_EXEC))
|
(vma->vm_flags & VM_EXEC))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -1999,7 +1999,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||||||
* Also do not migrate dirty pages as not all filesystems can move
|
* Also do not migrate dirty pages as not all filesystems can move
|
||||||
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
|
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
|
||||||
*/
|
*/
|
||||||
if (page_is_file_cache(page) && PageDirty(page))
|
if (page_is_file_lru(page) && PageDirty(page))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
isolated = numamigrate_isolate_page(pgdat, page);
|
isolated = numamigrate_isolate_page(pgdat, page);
|
||||||
@ -2014,7 +2014,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
|||||||
if (!list_empty(&migratepages)) {
|
if (!list_empty(&migratepages)) {
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
dec_node_page_state(page, NR_ISOLATED_ANON +
|
dec_node_page_state(page, NR_ISOLATED_ANON +
|
||||||
page_is_file_cache(page));
|
page_is_file_lru(page));
|
||||||
putback_lru_page(page);
|
putback_lru_page(page);
|
||||||
}
|
}
|
||||||
isolated = 0;
|
isolated = 0;
|
||||||
@ -2044,7 +2044,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||||||
pg_data_t *pgdat = NODE_DATA(node);
|
pg_data_t *pgdat = NODE_DATA(node);
|
||||||
int isolated = 0;
|
int isolated = 0;
|
||||||
struct page *new_page = NULL;
|
struct page *new_page = NULL;
|
||||||
int page_lru = page_is_file_cache(page);
|
int page_lru = page_is_file_lru(page);
|
||||||
unsigned long start = address & HPAGE_PMD_MASK;
|
unsigned long start = address & HPAGE_PMD_MASK;
|
||||||
|
|
||||||
new_page = alloc_pages_node(node,
|
new_page = alloc_pages_node(node,
|
||||||
|
@ -98,7 +98,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||||||
* it cannot move them all from MIGRATE_ASYNC
|
* it cannot move them all from MIGRATE_ASYNC
|
||||||
* context.
|
* context.
|
||||||
*/
|
*/
|
||||||
if (page_is_file_cache(page) && PageDirty(page))
|
if (page_is_file_lru(page) && PageDirty(page))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
16
mm/swap.c
16
mm/swap.c
@ -276,7 +276,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
|
|||||||
void *arg)
|
void *arg)
|
||||||
{
|
{
|
||||||
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
|
||||||
int file = page_is_file_cache(page);
|
int file = page_is_file_lru(page);
|
||||||
int lru = page_lru_base_type(page);
|
int lru = page_lru_base_type(page);
|
||||||
|
|
||||||
del_page_from_lru_list(page, lruvec, lru);
|
del_page_from_lru_list(page, lruvec, lru);
|
||||||
@ -394,7 +394,7 @@ void mark_page_accessed(struct page *page)
|
|||||||
else
|
else
|
||||||
__lru_cache_activate_page(page);
|
__lru_cache_activate_page(page);
|
||||||
ClearPageReferenced(page);
|
ClearPageReferenced(page);
|
||||||
if (page_is_file_cache(page))
|
if (page_is_file_lru(page))
|
||||||
workingset_activation(page);
|
workingset_activation(page);
|
||||||
}
|
}
|
||||||
if (page_is_idle(page))
|
if (page_is_idle(page))
|
||||||
@ -515,7 +515,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
active = PageActive(page);
|
active = PageActive(page);
|
||||||
file = page_is_file_cache(page);
|
file = page_is_file_lru(page);
|
||||||
lru = page_lru_base_type(page);
|
lru = page_lru_base_type(page);
|
||||||
|
|
||||||
del_page_from_lru_list(page, lruvec, lru + active);
|
del_page_from_lru_list(page, lruvec, lru + active);
|
||||||
@ -548,7 +548,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
|
|||||||
void *arg)
|
void *arg)
|
||||||
{
|
{
|
||||||
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
|
||||||
int file = page_is_file_cache(page);
|
int file = page_is_file_lru(page);
|
||||||
int lru = page_lru_base_type(page);
|
int lru = page_lru_base_type(page);
|
||||||
|
|
||||||
del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
|
del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
|
||||||
@ -573,9 +573,9 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
|
|||||||
ClearPageActive(page);
|
ClearPageActive(page);
|
||||||
ClearPageReferenced(page);
|
ClearPageReferenced(page);
|
||||||
/*
|
/*
|
||||||
* lazyfree pages are clean anonymous pages. They have
|
* Lazyfree pages are clean anonymous pages. They have
|
||||||
* SwapBacked flag cleared to distinguish normal anonymous
|
* PG_swapbacked flag cleared, to distinguish them from normal
|
||||||
* pages
|
* anonymous pages
|
||||||
*/
|
*/
|
||||||
ClearPageSwapBacked(page);
|
ClearPageSwapBacked(page);
|
||||||
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
|
add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE);
|
||||||
@ -962,7 +962,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
|
|||||||
|
|
||||||
if (page_evictable(page)) {
|
if (page_evictable(page)) {
|
||||||
lru = page_lru(page);
|
lru = page_lru(page);
|
||||||
update_page_reclaim_stat(lruvec, page_is_file_cache(page),
|
update_page_reclaim_stat(lruvec, page_is_file_lru(page),
|
||||||
PageActive(page));
|
PageActive(page));
|
||||||
if (was_unevictable)
|
if (was_unevictable)
|
||||||
count_vm_event(UNEVICTABLE_PGRESCUED);
|
count_vm_event(UNEVICTABLE_PGRESCUED);
|
||||||
|
12
mm/vmscan.c
12
mm/vmscan.c
@ -919,7 +919,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
|||||||
* exceptional entries and shadow exceptional entries in the
|
* exceptional entries and shadow exceptional entries in the
|
||||||
* same address_space.
|
* same address_space.
|
||||||
*/
|
*/
|
||||||
if (reclaimed && page_is_file_cache(page) &&
|
if (reclaimed && page_is_file_lru(page) &&
|
||||||
!mapping_exiting(mapping) && !dax_mapping(mapping))
|
!mapping_exiting(mapping) && !dax_mapping(mapping))
|
||||||
shadow = workingset_eviction(page, target_memcg);
|
shadow = workingset_eviction(page, target_memcg);
|
||||||
__delete_from_page_cache(page, shadow);
|
__delete_from_page_cache(page, shadow);
|
||||||
@ -1043,7 +1043,7 @@ static void page_check_dirty_writeback(struct page *page,
|
|||||||
* Anonymous pages are not handled by flushers and must be written
|
* Anonymous pages are not handled by flushers and must be written
|
||||||
* from reclaim context. Do not stall reclaim based on them
|
* from reclaim context. Do not stall reclaim based on them
|
||||||
*/
|
*/
|
||||||
if (!page_is_file_cache(page) ||
|
if (!page_is_file_lru(page) ||
|
||||||
(PageAnon(page) && !PageSwapBacked(page))) {
|
(PageAnon(page) && !PageSwapBacked(page))) {
|
||||||
*dirty = false;
|
*dirty = false;
|
||||||
*writeback = false;
|
*writeback = false;
|
||||||
@ -1315,7 +1315,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
|||||||
* the rest of the LRU for clean pages and see
|
* the rest of the LRU for clean pages and see
|
||||||
* the same dirty pages again (PageReclaim).
|
* the same dirty pages again (PageReclaim).
|
||||||
*/
|
*/
|
||||||
if (page_is_file_cache(page) &&
|
if (page_is_file_lru(page) &&
|
||||||
(!current_is_kswapd() || !PageReclaim(page) ||
|
(!current_is_kswapd() || !PageReclaim(page) ||
|
||||||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
|
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
|
||||||
/*
|
/*
|
||||||
@ -1459,7 +1459,7 @@ activate_locked:
|
|||||||
try_to_free_swap(page);
|
try_to_free_swap(page);
|
||||||
VM_BUG_ON_PAGE(PageActive(page), page);
|
VM_BUG_ON_PAGE(PageActive(page), page);
|
||||||
if (!PageMlocked(page)) {
|
if (!PageMlocked(page)) {
|
||||||
int type = page_is_file_cache(page);
|
int type = page_is_file_lru(page);
|
||||||
SetPageActive(page);
|
SetPageActive(page);
|
||||||
stat->nr_activate[type] += nr_pages;
|
stat->nr_activate[type] += nr_pages;
|
||||||
count_memcg_page_event(page, PGACTIVATE);
|
count_memcg_page_event(page, PGACTIVATE);
|
||||||
@ -1497,7 +1497,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
|
|||||||
LIST_HEAD(clean_pages);
|
LIST_HEAD(clean_pages);
|
||||||
|
|
||||||
list_for_each_entry_safe(page, next, page_list, lru) {
|
list_for_each_entry_safe(page, next, page_list, lru) {
|
||||||
if (page_is_file_cache(page) && !PageDirty(page) &&
|
if (page_is_file_lru(page) && !PageDirty(page) &&
|
||||||
!__PageMovable(page) && !PageUnevictable(page)) {
|
!__PageMovable(page) && !PageUnevictable(page)) {
|
||||||
ClearPageActive(page);
|
ClearPageActive(page);
|
||||||
list_move(&page->lru, &clean_pages);
|
list_move(&page->lru, &clean_pages);
|
||||||
@ -2053,7 +2053,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
|
|||||||
* IO, plus JVM can create lots of anon VM_EXEC pages,
|
* IO, plus JVM can create lots of anon VM_EXEC pages,
|
||||||
* so we ignore them here.
|
* so we ignore them here.
|
||||||
*/
|
*/
|
||||||
if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
|
if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
|
||||||
list_add(&page->lru, &l_active);
|
list_add(&page->lru, &l_active);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user