be2d575638
Patch series "Change the return value for page isolation functions", v3. Now the page isolation functions did not return a boolean to indicate success or not, instead it will return a negative error when failed to isolate a page. So below code used in most places seem a boolean success/failure thing, which can confuse people whether the isolation is successful. if (folio_isolate_lru(folio)) continue; Moreover the page isolation functions only return 0 or -EBUSY, and most users did not care about the negative error except for few users, thus we can convert all page isolation functions to return a boolean value, which can remove the confusion to make code more clear. No functional changes intended in this patch series. This patch (of 4): Now the folio_isolate_lru() did not return a boolean value to indicate isolation success or not, however below code checking the return value can make people think that it was a boolean success/failure thing, which makes people easy to make mistakes (see the fix patch[1]). if (folio_isolate_lru(folio)) continue; Thus it's better to check the negative error value expilictly returned by folio_isolate_lru(), which makes code more clear per Linus's suggestion[2]. Moreover Matthew suggested we can convert the isolation functions to return a boolean[3], since most users did not care about the negative error value, and can also remove the confusing of checking return value. So this patch converts the folio_isolate_lru() to return a boolean value, which means return 'true' to indicate the folio isolation is successful, and 'false' means a failure to isolation. Meanwhile changing all users' logic of checking the isolation state. No functional changes intended. [1] https://lore.kernel.org/all/20230131063206.28820-1-Kuan-Ying.Lee@mediatek.com/T/#u [2] https://lore.kernel.org/all/CAHk-=wiBrY+O-4=2mrbVyxR+hOqfdJ=Do6xoucfJ9_5az01L4Q@mail.gmail.com/ [3] https://lore.kernel.org/all/Y+sTFqwMNAjDvxw3@casper.infradead.org/ Link: https://lkml.kernel.org/r/cover.1676424378.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/8a4e3679ed4196168efadf7ea36c038f2f7d5aa9.1676424378.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: SeongJae Park <sj@kernel.org> Acked-by: David Hildenbrand <david@redhat.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
143 lines
3.3 KiB
C
143 lines
3.3 KiB
C
/*
|
|
* Compatibility functions which bloat the callers too much to make inline.
|
|
* All of the callers of these functions should be converted to use folios
|
|
* eventually.
|
|
*/
|
|
|
|
#include <linux/migrate.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/rmap.h>
|
|
#include <linux/swap.h>
|
|
#include "internal.h"
|
|
|
|
struct address_space *page_mapping(struct page *page)
|
|
{
|
|
return folio_mapping(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(page_mapping);
|
|
|
|
void unlock_page(struct page *page)
|
|
{
|
|
return folio_unlock(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(unlock_page);
|
|
|
|
void end_page_writeback(struct page *page)
|
|
{
|
|
return folio_end_writeback(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(end_page_writeback);
|
|
|
|
void wait_on_page_writeback(struct page *page)
|
|
{
|
|
return folio_wait_writeback(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL_GPL(wait_on_page_writeback);
|
|
|
|
void wait_for_stable_page(struct page *page)
|
|
{
|
|
return folio_wait_stable(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
|
|
|
void mark_page_accessed(struct page *page)
|
|
{
|
|
folio_mark_accessed(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(mark_page_accessed);
|
|
|
|
bool set_page_writeback(struct page *page)
|
|
{
|
|
return folio_start_writeback(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(set_page_writeback);
|
|
|
|
bool set_page_dirty(struct page *page)
|
|
{
|
|
return folio_mark_dirty(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(set_page_dirty);
|
|
|
|
int __set_page_dirty_nobuffers(struct page *page)
|
|
{
|
|
return filemap_dirty_folio(page_mapping(page), page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
|
|
|
|
bool clear_page_dirty_for_io(struct page *page)
|
|
{
|
|
return folio_clear_dirty_for_io(page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(clear_page_dirty_for_io);
|
|
|
|
bool redirty_page_for_writepage(struct writeback_control *wbc,
|
|
struct page *page)
|
|
{
|
|
return folio_redirty_for_writepage(wbc, page_folio(page));
|
|
}
|
|
EXPORT_SYMBOL(redirty_page_for_writepage);
|
|
|
|
void lru_cache_add_inactive_or_unevictable(struct page *page,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
folio_add_lru_vma(page_folio(page), vma);
|
|
}
|
|
|
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|
pgoff_t index, gfp_t gfp)
|
|
{
|
|
return filemap_add_folio(mapping, page_folio(page), index, gfp);
|
|
}
|
|
EXPORT_SYMBOL(add_to_page_cache_lru);
|
|
|
|
noinline
|
|
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
|
|
int fgp_flags, gfp_t gfp)
|
|
{
|
|
struct folio *folio;
|
|
|
|
folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
|
|
if (!folio || xa_is_value(folio))
|
|
return &folio->page;
|
|
return folio_file_page(folio, index);
|
|
}
|
|
EXPORT_SYMBOL(pagecache_get_page);
|
|
|
|
struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
|
pgoff_t index)
|
|
{
|
|
unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
|
|
|
|
return pagecache_get_page(mapping, index, fgp_flags,
|
|
mapping_gfp_mask(mapping));
|
|
}
|
|
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
|
|
|
int isolate_lru_page(struct page *page)
|
|
{
|
|
bool ret;
|
|
|
|
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
|
|
return -EBUSY;
|
|
ret = folio_isolate_lru((struct folio *)page);
|
|
if (ret)
|
|
return 0;
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
void putback_lru_page(struct page *page)
|
|
{
|
|
folio_putback_lru(page_folio(page));
|
|
}
|
|
|
|
#ifdef CONFIG_MMU
|
|
void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma,
|
|
unsigned long address)
|
|
{
|
|
VM_BUG_ON_PAGE(PageTail(page), page);
|
|
|
|
return folio_add_new_anon_rmap((struct folio *)page, vma, address);
|
|
}
|
|
#endif
|