mm: constify more page/folio tests
Constify the flag tests that aren't automatically generated and the tests that look like flag tests but are more complicated. Link: https://lkml.kernel.org/r/20240227192337.757313-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ce3467af6b
commit
29cfe7556b
@ -558,13 +558,13 @@ PAGEFLAG_FALSE(HighMem, highmem)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
static __always_inline bool folio_test_swapcache(struct folio *folio)
|
||||
static __always_inline bool folio_test_swapcache(const struct folio *folio)
|
||||
{
|
||||
return folio_test_swapbacked(folio) &&
|
||||
test_bit(PG_swapcache, folio_flags(folio, 0));
|
||||
test_bit(PG_swapcache, const_folio_flags(folio, 0));
|
||||
}
|
||||
|
||||
static __always_inline bool PageSwapCache(struct page *page)
|
||||
static __always_inline bool PageSwapCache(const struct page *page)
|
||||
{
|
||||
return folio_test_swapcache(page_folio(page));
|
||||
}
|
||||
@ -663,22 +663,22 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
|
||||
*/
|
||||
#define PAGE_MAPPING_DAX_SHARED ((void *)0x1)
|
||||
|
||||
static __always_inline bool folio_mapping_flags(struct folio *folio)
|
||||
static __always_inline bool folio_mapping_flags(const struct folio *folio)
|
||||
{
|
||||
return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
|
||||
}
|
||||
|
||||
static __always_inline int PageMappingFlags(struct page *page)
|
||||
static __always_inline int PageMappingFlags(const struct page *page)
|
||||
{
|
||||
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool folio_test_anon(struct folio *folio)
|
||||
static __always_inline bool folio_test_anon(const struct folio *folio)
|
||||
{
|
||||
return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
|
||||
}
|
||||
|
||||
static __always_inline bool PageAnon(struct page *page)
|
||||
static __always_inline bool PageAnon(const struct page *page)
|
||||
{
|
||||
return folio_test_anon(page_folio(page));
|
||||
}
|
||||
@ -689,7 +689,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio)
|
||||
PAGE_MAPPING_MOVABLE;
|
||||
}
|
||||
|
||||
static __always_inline int __PageMovable(struct page *page)
|
||||
static __always_inline int __PageMovable(const struct page *page)
|
||||
{
|
||||
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
|
||||
PAGE_MAPPING_MOVABLE;
|
||||
@ -702,13 +702,13 @@ static __always_inline int __PageMovable(struct page *page)
|
||||
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
|
||||
* anon_vma, but to that page's node of the stable tree.
|
||||
*/
|
||||
static __always_inline bool folio_test_ksm(struct folio *folio)
|
||||
static __always_inline bool folio_test_ksm(const struct folio *folio)
|
||||
{
|
||||
return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
|
||||
PAGE_MAPPING_KSM;
|
||||
}
|
||||
|
||||
static __always_inline bool PageKsm(struct page *page)
|
||||
static __always_inline bool PageKsm(const struct page *page)
|
||||
{
|
||||
return folio_test_ksm(page_folio(page));
|
||||
}
|
||||
@ -747,9 +747,9 @@ static inline bool folio_xor_flags_has_waiters(struct folio *folio,
|
||||
* some of the bytes in it may be; see the is_partially_uptodate()
|
||||
* address_space operation.
|
||||
*/
|
||||
static inline bool folio_test_uptodate(struct folio *folio)
|
||||
static inline bool folio_test_uptodate(const struct folio *folio)
|
||||
{
|
||||
bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
|
||||
bool ret = test_bit(PG_uptodate, const_folio_flags(folio, 0));
|
||||
/*
|
||||
* Must ensure that the data we read out of the folio is loaded
|
||||
* _after_ we've loaded folio->flags to check the uptodate bit.
|
||||
@ -764,7 +764,7 @@ static inline bool folio_test_uptodate(struct folio *folio)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int PageUptodate(struct page *page)
|
||||
static inline int PageUptodate(const struct page *page)
|
||||
{
|
||||
return folio_test_uptodate(page_folio(page));
|
||||
}
|
||||
@ -806,9 +806,9 @@ void set_page_writeback(struct page *page);
|
||||
#define folio_start_writeback_keepwrite(folio) \
|
||||
__folio_start_writeback(folio, true)
|
||||
|
||||
static __always_inline bool folio_test_head(struct folio *folio)
|
||||
static __always_inline bool folio_test_head(const struct folio *folio)
|
||||
{
|
||||
return test_bit(PG_head, folio_flags(folio, FOLIO_PF_ANY));
|
||||
return test_bit(PG_head, const_folio_flags(folio, FOLIO_PF_ANY));
|
||||
}
|
||||
|
||||
static __always_inline int PageHead(const struct page *page)
|
||||
@ -827,7 +827,7 @@ CLEARPAGEFLAG(Head, head, PF_ANY)
|
||||
*
|
||||
* Return: True if the folio is larger than one page.
|
||||
*/
|
||||
static inline bool folio_test_large(struct folio *folio)
|
||||
static inline bool folio_test_large(const struct folio *folio)
|
||||
{
|
||||
return folio_test_head(folio);
|
||||
}
|
||||
@ -856,7 +856,7 @@ TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
|
||||
#define PG_head_mask ((1UL << PG_head))
|
||||
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
int PageHuge(struct page *page);
|
||||
int PageHuge(const struct page *page);
|
||||
SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
|
||||
CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
|
||||
|
||||
@ -869,10 +869,10 @@ CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
|
||||
* Return: True for hugetlbfs folios, false for anon folios or folios
|
||||
* belonging to other filesystems.
|
||||
*/
|
||||
static inline bool folio_test_hugetlb(struct folio *folio)
|
||||
static inline bool folio_test_hugetlb(const struct folio *folio)
|
||||
{
|
||||
return folio_test_large(folio) &&
|
||||
test_bit(PG_hugetlb, folio_flags(folio, 1));
|
||||
test_bit(PG_hugetlb, const_folio_flags(folio, 1));
|
||||
}
|
||||
#else
|
||||
TESTPAGEFLAG_FALSE(Huge, hugetlb)
|
||||
@ -887,7 +887,7 @@ TESTPAGEFLAG_FALSE(Huge, hugetlb)
|
||||
* hugetlbfs pages, but not normal pages. PageTransHuge() can only be
|
||||
* called only in the core VM paths where hugetlbfs pages can't exist.
|
||||
*/
|
||||
static inline int PageTransHuge(struct page *page)
|
||||
static inline int PageTransHuge(const struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
return PageHead(page);
|
||||
@ -898,7 +898,7 @@ static inline int PageTransHuge(struct page *page)
|
||||
* and hugetlbfs pages, so it should only be called when it's known
|
||||
* that hugetlbfs pages aren't involved.
|
||||
*/
|
||||
static inline int PageTransCompound(struct page *page)
|
||||
static inline int PageTransCompound(const struct page *page)
|
||||
{
|
||||
return PageCompound(page);
|
||||
}
|
||||
@ -908,7 +908,7 @@ static inline int PageTransCompound(struct page *page)
|
||||
* and hugetlbfs pages, so it should only be called when it's known
|
||||
* that hugetlbfs pages aren't involved.
|
||||
*/
|
||||
static inline int PageTransTail(struct page *page)
|
||||
static inline int PageTransTail(const struct page *page)
|
||||
{
|
||||
return PageTail(page);
|
||||
}
|
||||
@ -972,7 +972,7 @@ static inline int page_type_has_type(unsigned int page_type)
|
||||
return (int)page_type < PAGE_MAPCOUNT_RESERVE;
|
||||
}
|
||||
|
||||
static inline int page_has_type(struct page *page)
|
||||
static inline int page_has_type(const struct page *page)
|
||||
{
|
||||
return page_type_has_type(page->page_type);
|
||||
}
|
||||
@ -1056,7 +1056,7 @@ extern bool is_free_buddy_page(struct page *page);
|
||||
|
||||
PAGEFLAG(Isolated, isolated, PF_ANY);
|
||||
|
||||
static __always_inline int PageAnonExclusive(struct page *page)
|
||||
static __always_inline int PageAnonExclusive(const struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PGFLAGS(!PageAnon(page), page);
|
||||
VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page);
|
||||
@ -1129,12 +1129,12 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
|
||||
* Determine if a page has private stuff, indicating that release routines
|
||||
* should be invoked upon it.
|
||||
*/
|
||||
static inline int page_has_private(struct page *page)
|
||||
static inline int page_has_private(const struct page *page)
|
||||
{
|
||||
return !!(page->flags & PAGE_FLAGS_PRIVATE);
|
||||
}
|
||||
|
||||
static inline bool folio_has_private(struct folio *folio)
|
||||
static inline bool folio_has_private(const struct folio *folio)
|
||||
{
|
||||
return page_has_private(&folio->page);
|
||||
}
|
||||
|
@ -2164,9 +2164,9 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
|
||||
* transparent huge pages. See the PageTransHuge() documentation for more
|
||||
* details.
|
||||
*/
|
||||
int PageHuge(struct page *page)
|
||||
int PageHuge(const struct page *page)
|
||||
{
|
||||
struct folio *folio;
|
||||
const struct folio *folio;
|
||||
|
||||
if (!PageCompound(page))
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user