vfs: keep inodes with page cache off the inode shrinker LRU
Historically (pre-2.5), the inode shrinker used to reclaim only empty inodes and skip over those that still contained page cache. This caused problems on highmem hosts: struct inode could put fill lowmem zones before the cache was getting reclaimed in the highmem zones. To address this, the inode shrinker started to strip page cache to facilitate reclaiming lowmem. However, this comes with its own set of problems: the shrinkers may drop actively used page cache just because the inodes are not currently open or dirty - think working with a large git tree. It further doesn't respect cgroup memory protection settings and can cause priority inversions between containers. Nowadays, the page cache also holds non-resident info for evicted cache pages in order to detect refaults. We've come to rely heavily on this data inside reclaim for protecting the cache workingset and driving swap behavior. We also use it to quantify and report workload health through psi. The latter in turn is used for fleet health monitoring, as well as driving automated memory sizing of workloads and containers, proactive reclaim and memory offloading schemes. The consequences of dropping page cache prematurely is that we're seeing subtle and not-so-subtle failures in all of the above-mentioned scenarios, with the workload generally entering unexpected thrashing states while losing the ability to reliably detect it. To fix this on non-highmem systems at least, going back to rotating inodes on the LRU isn't feasible. We've tried (commita76cf1a474
("mm: don't reclaim inodes with many attached pages")) and failed (commit69056ee6a8
("Revert "mm: don't reclaim inodes with many attached pages"")). The issue is mostly that shrinker pools attract pressure based on their size, and when objects get skipped the shrinkers remember this as deferred reclaim work. This accumulates excessive pressure on the remaining inodes, and we can quickly eat into heavily used ones, or dirty ones that require IO to reclaim, when there potentially is plenty of cold, clean cache around still. Instead, this patch keeps populated inodes off the inode LRU in the first place - just like an open file or dirty state would. An otherwise clean and unused inode then gets queued when the last cache entry disappears. This solves the problem without reintroducing the reclaim issues, and generally is a bit more scalable than having to wade through potentially hundreds of thousands of busy inodes. Locking is a bit tricky because the locks protecting the inode state (i_lock) and the inode LRU (lru_list.lock) don't nest inside the irq-safe page cache lock (i_pages.xa_lock). Page cache deletions are serialized through i_lock, taken before the i_pages lock, to make sure depopulated inodes are queued reliably. Additions may race with deletions, but we'll check again in the shrinker. If additions race with the shrinker itself, we're protected by the i_lock: if find_inode() or iput() win, the shrinker will bail on the elevated i_count or I_REFERENCED; if the shrinker wins and goes ahead with the inode, it will set I_FREEING and inhibit further igets(), which will cause the other side to create a new instance of the inode instead. Link: https://lkml.kernel.org/r/20210614211904.14420-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Roman Gushchin <guro@fb.com> Cc: Tejun Heo <tj@kernel.org> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
658f9ae761
commit
51b8c1fe25
46
fs/inode.c
46
fs/inode.c
@ -428,11 +428,20 @@ void ihold(struct inode *inode)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ihold);
|
EXPORT_SYMBOL(ihold);
|
||||||
|
|
||||||
static void inode_lru_list_add(struct inode *inode)
|
static void __inode_add_lru(struct inode *inode, bool rotate)
|
||||||
{
|
{
|
||||||
|
if (inode->i_state & (I_DIRTY_ALL | I_SYNC | I_FREEING | I_WILL_FREE))
|
||||||
|
return;
|
||||||
|
if (atomic_read(&inode->i_count))
|
||||||
|
return;
|
||||||
|
if (!(inode->i_sb->s_flags & SB_ACTIVE))
|
||||||
|
return;
|
||||||
|
if (!mapping_shrinkable(&inode->i_data))
|
||||||
|
return;
|
||||||
|
|
||||||
if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
|
if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
|
||||||
this_cpu_inc(nr_unused);
|
this_cpu_inc(nr_unused);
|
||||||
else
|
else if (rotate)
|
||||||
inode->i_state |= I_REFERENCED;
|
inode->i_state |= I_REFERENCED;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,16 +452,11 @@ static void inode_lru_list_add(struct inode *inode)
|
|||||||
*/
|
*/
|
||||||
void inode_add_lru(struct inode *inode)
|
void inode_add_lru(struct inode *inode)
|
||||||
{
|
{
|
||||||
if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
|
__inode_add_lru(inode, false);
|
||||||
I_FREEING | I_WILL_FREE)) &&
|
|
||||||
!atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
|
|
||||||
inode_lru_list_add(inode);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static void inode_lru_list_del(struct inode *inode)
|
static void inode_lru_list_del(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
|
if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
|
||||||
this_cpu_dec(nr_unused);
|
this_cpu_dec(nr_unused);
|
||||||
}
|
}
|
||||||
@ -728,10 +732,6 @@ again:
|
|||||||
/*
|
/*
|
||||||
* Isolate the inode from the LRU in preparation for freeing it.
|
* Isolate the inode from the LRU in preparation for freeing it.
|
||||||
*
|
*
|
||||||
* Any inodes which are pinned purely because of attached pagecache have their
|
|
||||||
* pagecache removed. If the inode has metadata buffers attached to
|
|
||||||
* mapping->private_list then try to remove them.
|
|
||||||
*
|
|
||||||
* If the inode has the I_REFERENCED flag set, then it means that it has been
|
* If the inode has the I_REFERENCED flag set, then it means that it has been
|
||||||
* used recently - the flag is set in iput_final(). When we encounter such an
|
* used recently - the flag is set in iput_final(). When we encounter such an
|
||||||
* inode, clear the flag and move it to the back of the LRU so it gets another
|
* inode, clear the flag and move it to the back of the LRU so it gets another
|
||||||
@ -747,31 +747,39 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
|||||||
struct inode *inode = container_of(item, struct inode, i_lru);
|
struct inode *inode = container_of(item, struct inode, i_lru);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we are inverting the lru lock/inode->i_lock here, so use a trylock.
|
* We are inverting the lru lock/inode->i_lock here, so use a
|
||||||
* If we fail to get the lock, just skip it.
|
* trylock. If we fail to get the lock, just skip it.
|
||||||
*/
|
*/
|
||||||
if (!spin_trylock(&inode->i_lock))
|
if (!spin_trylock(&inode->i_lock))
|
||||||
return LRU_SKIP;
|
return LRU_SKIP;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Referenced or dirty inodes are still in use. Give them another pass
|
* Inodes can get referenced, redirtied, or repopulated while
|
||||||
* through the LRU as we canot reclaim them now.
|
* they're already on the LRU, and this can make them
|
||||||
|
* unreclaimable for a while. Remove them lazily here; iput,
|
||||||
|
* sync, or the last page cache deletion will requeue them.
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&inode->i_count) ||
|
if (atomic_read(&inode->i_count) ||
|
||||||
(inode->i_state & ~I_REFERENCED)) {
|
(inode->i_state & ~I_REFERENCED) ||
|
||||||
|
!mapping_shrinkable(&inode->i_data)) {
|
||||||
list_lru_isolate(lru, &inode->i_lru);
|
list_lru_isolate(lru, &inode->i_lru);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
this_cpu_dec(nr_unused);
|
this_cpu_dec(nr_unused);
|
||||||
return LRU_REMOVED;
|
return LRU_REMOVED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* recently referenced inodes get one more pass */
|
/* Recently referenced inodes get one more pass */
|
||||||
if (inode->i_state & I_REFERENCED) {
|
if (inode->i_state & I_REFERENCED) {
|
||||||
inode->i_state &= ~I_REFERENCED;
|
inode->i_state &= ~I_REFERENCED;
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return LRU_ROTATE;
|
return LRU_ROTATE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On highmem systems, mapping_shrinkable() permits dropping
|
||||||
|
* page cache in order to free up struct inodes: lowmem might
|
||||||
|
* be under pressure before the cache inside the highmem zone.
|
||||||
|
*/
|
||||||
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
|
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
|
||||||
__iget(inode);
|
__iget(inode);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
@ -1638,7 +1646,7 @@ static void iput_final(struct inode *inode)
|
|||||||
if (!drop &&
|
if (!drop &&
|
||||||
!(inode->i_state & I_DONTCACHE) &&
|
!(inode->i_state & I_DONTCACHE) &&
|
||||||
(sb->s_flags & SB_ACTIVE)) {
|
(sb->s_flags & SB_ACTIVE)) {
|
||||||
inode_add_lru(inode);
|
__inode_add_lru(inode, true);
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,6 @@ extern int vfs_open(const struct path *, struct file *);
|
|||||||
* inode.c
|
* inode.c
|
||||||
*/
|
*/
|
||||||
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
|
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
|
||||||
extern void inode_add_lru(struct inode *inode);
|
|
||||||
extern int dentry_needs_remove_privs(struct dentry *dentry);
|
extern int dentry_needs_remove_privs(struct dentry *dentry);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3193,6 +3193,7 @@ static inline void remove_inode_hash(struct inode *inode)
|
|||||||
}
|
}
|
||||||
|
|
||||||
extern void inode_sb_list_add(struct inode *inode);
|
extern void inode_sb_list_add(struct inode *inode);
|
||||||
|
extern void inode_add_lru(struct inode *inode);
|
||||||
|
|
||||||
extern int sb_set_blocksize(struct super_block *, int);
|
extern int sb_set_blocksize(struct super_block *, int);
|
||||||
extern int sb_min_blocksize(struct super_block *, int);
|
extern int sb_min_blocksize(struct super_block *, int);
|
||||||
|
@ -23,6 +23,56 @@ static inline bool mapping_empty(struct address_space *mapping)
|
|||||||
return xa_empty(&mapping->i_pages);
|
return xa_empty(&mapping->i_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mapping_shrinkable - test if page cache state allows inode reclaim
|
||||||
|
* @mapping: the page cache mapping
|
||||||
|
*
|
||||||
|
* This checks the mapping's cache state for the pupose of inode
|
||||||
|
* reclaim and LRU management.
|
||||||
|
*
|
||||||
|
* The caller is expected to hold the i_lock, but is not required to
|
||||||
|
* hold the i_pages lock, which usually protects cache state. That's
|
||||||
|
* because the i_lock and the list_lru lock that protect the inode and
|
||||||
|
* its LRU state don't nest inside the irq-safe i_pages lock.
|
||||||
|
*
|
||||||
|
* Cache deletions are performed under the i_lock, which ensures that
|
||||||
|
* when an inode goes empty, it will reliably get queued on the LRU.
|
||||||
|
*
|
||||||
|
* Cache additions do not acquire the i_lock and may race with this
|
||||||
|
* check, in which case we'll report the inode as shrinkable when it
|
||||||
|
* has cache pages. This is okay: the shrinker also checks the
|
||||||
|
* refcount and the referenced bit, which will be elevated or set in
|
||||||
|
* the process of adding new cache pages to an inode.
|
||||||
|
*/
|
||||||
|
static inline bool mapping_shrinkable(struct address_space *mapping)
|
||||||
|
{
|
||||||
|
void *head;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On highmem systems, there could be lowmem pressure from the
|
||||||
|
* inodes before there is highmem pressure from the page
|
||||||
|
* cache. Make inodes shrinkable regardless of cache state.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_HIGHMEM))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* Cache completely empty? Shrink away. */
|
||||||
|
head = rcu_access_pointer(mapping->i_pages.xa_head);
|
||||||
|
if (!head)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The xarray stores single offset-0 entries directly in the
|
||||||
|
* head pointer, which allows non-resident page cache entries
|
||||||
|
* to escape the shadow shrinker's list of xarray nodes. The
|
||||||
|
* inode shrinker needs to pick them up under memory pressure.
|
||||||
|
*/
|
||||||
|
if (!xa_is_node(head) && xa_is_value(head))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bits in mapping->flags.
|
* Bits in mapping->flags.
|
||||||
*/
|
*/
|
||||||
|
@ -262,9 +262,13 @@ void delete_from_page_cache(struct page *page)
|
|||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = page_mapping(page);
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
__delete_from_page_cache(page, NULL);
|
__delete_from_page_cache(page, NULL);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
page_cache_free_page(mapping, page);
|
page_cache_free_page(mapping, page);
|
||||||
}
|
}
|
||||||
@ -340,6 +344,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
|
|||||||
if (!pagevec_count(pvec))
|
if (!pagevec_count(pvec))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
for (i = 0; i < pagevec_count(pvec); i++) {
|
for (i = 0; i < pagevec_count(pvec); i++) {
|
||||||
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
|
trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
|
||||||
@ -348,6 +353,9 @@ void delete_from_page_cache_batch(struct address_space *mapping,
|
|||||||
}
|
}
|
||||||
page_cache_delete_batch(mapping, pvec);
|
page_cache_delete_batch(mapping, pvec);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
for (i = 0; i < pagevec_count(pvec); i++)
|
for (i = 0; i < pagevec_count(pvec); i++)
|
||||||
page_cache_free_page(mapping, pvec->pages[i]);
|
page_cache_free_page(mapping, pvec->pages[i]);
|
||||||
|
@ -45,9 +45,13 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
|
|||||||
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
||||||
void *entry)
|
void *entry)
|
||||||
{
|
{
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
__clear_shadow_entry(mapping, index, entry);
|
__clear_shadow_entry(mapping, index, entry);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -73,8 +77,10 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
dax = dax_mapping(mapping);
|
dax = dax_mapping(mapping);
|
||||||
if (!dax)
|
if (!dax) {
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
|
}
|
||||||
|
|
||||||
for (i = j; i < pagevec_count(pvec); i++) {
|
for (i = j; i < pagevec_count(pvec); i++) {
|
||||||
struct page *page = pvec->pages[i];
|
struct page *page = pvec->pages[i];
|
||||||
@ -93,8 +99,12 @@ static void truncate_exceptional_pvec_entries(struct address_space *mapping,
|
|||||||
__clear_shadow_entry(mapping, index, page);
|
__clear_shadow_entry(mapping, index, page);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!dax)
|
if (!dax) {
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
}
|
||||||
pvec->nr = j;
|
pvec->nr = j;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -567,6 +577,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
|||||||
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
|
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
if (PageDirty(page))
|
if (PageDirty(page))
|
||||||
goto failed;
|
goto failed;
|
||||||
@ -574,6 +585,9 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
|||||||
BUG_ON(page_has_private(page));
|
BUG_ON(page_has_private(page));
|
||||||
__delete_from_page_cache(page, NULL);
|
__delete_from_page_cache(page, NULL);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
if (mapping->a_ops->freepage)
|
if (mapping->a_ops->freepage)
|
||||||
mapping->a_ops->freepage(page);
|
mapping->a_ops->freepage(page);
|
||||||
@ -582,6 +596,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
|
|||||||
return 1;
|
return 1;
|
||||||
failed:
|
failed:
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1190,6 +1190,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
|||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!PageLocked(page));
|
||||||
BUG_ON(mapping != page_mapping(page));
|
BUG_ON(mapping != page_mapping(page));
|
||||||
|
|
||||||
|
if (!PageSwapCache(page))
|
||||||
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
/*
|
/*
|
||||||
* The non racy check for a busy page.
|
* The non racy check for a busy page.
|
||||||
@ -1258,6 +1260,9 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
|||||||
shadow = workingset_eviction(page, target_memcg);
|
shadow = workingset_eviction(page, target_memcg);
|
||||||
__delete_from_page_cache(page, shadow);
|
__delete_from_page_cache(page, shadow);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
if (freepage != NULL)
|
if (freepage != NULL)
|
||||||
freepage(page);
|
freepage(page);
|
||||||
@ -1267,6 +1272,8 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
|
|||||||
|
|
||||||
cannot_free:
|
cannot_free:
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (!PageSwapCache(page))
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -543,6 +543,13 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!spin_trylock(&mapping->host->i_lock)) {
|
||||||
|
xa_unlock(&mapping->i_pages);
|
||||||
|
spin_unlock_irq(lru_lock);
|
||||||
|
ret = LRU_RETRY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
list_lru_isolate(lru, item);
|
list_lru_isolate(lru, item);
|
||||||
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
|
__dec_lruvec_kmem_state(node, WORKINGSET_NODES);
|
||||||
|
|
||||||
@ -562,6 +569,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
|
|||||||
|
|
||||||
out_invalid:
|
out_invalid:
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
|
if (mapping_shrinkable(mapping))
|
||||||
|
inode_add_lru(mapping->host);
|
||||||
|
spin_unlock(&mapping->host->i_lock);
|
||||||
ret = LRU_REMOVED_RETRY;
|
ret = LRU_REMOVED_RETRY;
|
||||||
out:
|
out:
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
Loading…
Reference in New Issue
Block a user