mm/filemap: fix storing to a THP shadow entry
commit198b62f83e
upstream When a THP is removed from the page cache by reclaim, we replace it with a shadow entry that occupies all slots of the XArray previously occupied by the THP. If the user then accesses that page again, we only allocate a single page, but storing it into the shadow entry replaces all entries with that one page. That leads to bugs like page dumped because: VM_BUG_ON_PAGE(page_to_pgoff(page) != offset) ------------[ cut here ]------------ kernel BUG at mm/filemap.c:2529! https://bugzilla.kernel.org/show_bug.cgi?id=206569 This is hard to reproduce with mainline, but happens regularly with the THP patchset (as so many more THPs are created). This solution is take from the THP patchset. It splits the shadow entry into order-0 pieces at the time that we bring a new page into cache. Fixes:99cb0dbd47
("mm,thp: add read-only THP support for (non-shmem) FS") Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Song Liu <songliubraving@fb.com> Cc: "Kirill A . Shutemov" <kirill@shutemov.name> Cc: Qian Cai <cai@lca.pw> Link: https://lkml.kernel.org/r/20200903183029.14930-4-willy@infradead.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
0a890e2209
commit
3b7f3cab1d
35
mm/filemap.c
35
mm/filemap.c
@ -856,7 +856,6 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||
int huge = PageHuge(page);
|
||||
struct mem_cgroup *memcg;
|
||||
int error;
|
||||
void *old;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageSwapBacked(page), page);
|
||||
@ -872,21 +871,41 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||
get_page(page);
|
||||
page->mapping = mapping;
|
||||
page->index = offset;
|
||||
gfp_mask &= GFP_RECLAIM_MASK;
|
||||
|
||||
do {
|
||||
unsigned int order = xa_get_order(xas.xa, xas.xa_index);
|
||||
void *entry, *old = NULL;
|
||||
|
||||
if (order > thp_order(page))
|
||||
xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
|
||||
order, gfp_mask);
|
||||
xas_lock_irq(&xas);
|
||||
old = xas_load(&xas);
|
||||
if (old && !xa_is_value(old))
|
||||
xas_for_each_conflict(&xas, entry) {
|
||||
old = entry;
|
||||
if (!xa_is_value(entry)) {
|
||||
xas_set_err(&xas, -EEXIST);
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (old) {
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
/* entry may have been split before we acquired lock */
|
||||
order = xa_get_order(xas.xa, xas.xa_index);
|
||||
if (order > thp_order(page)) {
|
||||
xas_split(&xas, old, order);
|
||||
xas_reset(&xas);
|
||||
}
|
||||
}
|
||||
|
||||
xas_store(&xas, page);
|
||||
if (xas_error(&xas))
|
||||
goto unlock;
|
||||
|
||||
if (xa_is_value(old)) {
|
||||
if (old)
|
||||
mapping->nrexceptional--;
|
||||
if (shadowp)
|
||||
*shadowp = old;
|
||||
}
|
||||
mapping->nrpages++;
|
||||
|
||||
/* hugetlb pages do not participate in page cache accounting */
|
||||
@ -894,7 +913,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
|
||||
__inc_node_page_state(page, NR_FILE_PAGES);
|
||||
unlock:
|
||||
xas_unlock_irq(&xas);
|
||||
} while (xas_nomem(&xas, gfp_mask & GFP_RECLAIM_MASK));
|
||||
} while (xas_nomem(&xas, gfp_mask));
|
||||
|
||||
if (xas_error(&xas))
|
||||
goto error;
|
||||
|
Reference in New Issue
Block a user