mm: convert mm_counter() to take a folio
Now all callers of mm_counter() have a folio, convert mm_counter() to take a folio. Saves a call to compound_head() hidden inside PageAnon(). Link: https://lkml.kernel.org/r/20240111152429.3374566-10-willy@infradead.org Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
eabafaaa95
commit
a23f517b0e
@ -723,7 +723,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
|
||||
else if (is_migration_entry(entry)) {
|
||||
struct folio *folio = pfn_swap_entry_folio(entry);
|
||||
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
dec_mm_counter(mm, mm_counter(folio));
|
||||
}
|
||||
free_swap_and_cache(entry);
|
||||
}
|
||||
|
@ -2603,11 +2603,11 @@ static inline int mm_counter_file(struct page *page)
|
||||
return MM_FILEPAGES;
|
||||
}
|
||||
|
||||
static inline int mm_counter(struct page *page)
|
||||
static inline int mm_counter(struct folio *folio)
|
||||
{
|
||||
if (PageAnon(page))
|
||||
if (folio_test_anon(folio))
|
||||
return MM_ANONPAGES;
|
||||
return mm_counter_file(page);
|
||||
return mm_counter_file(&folio->page);
|
||||
}
|
||||
|
||||
static inline unsigned long get_mm_rss(struct mm_struct *mm)
|
||||
|
10
mm/memory.c
10
mm/memory.c
@ -808,7 +808,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
} else if (is_migration_entry(entry)) {
|
||||
folio = pfn_swap_entry_folio(entry);
|
||||
|
||||
rss[mm_counter(&folio->page)]++;
|
||||
rss[mm_counter(folio)]++;
|
||||
|
||||
if (!is_readable_migration_entry(entry) &&
|
||||
is_cow_mapping(vm_flags)) {
|
||||
@ -840,7 +840,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
||||
* keep things as they are.
|
||||
*/
|
||||
folio_get(folio);
|
||||
rss[mm_counter(page)]++;
|
||||
rss[mm_counter(folio)]++;
|
||||
/* Cannot fail as these pages cannot get pinned. */
|
||||
folio_try_dup_anon_rmap_pte(folio, page, src_vma);
|
||||
|
||||
@ -1476,7 +1476,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
||||
if (pte_young(ptent) && likely(vma_has_recency(vma)))
|
||||
folio_mark_accessed(folio);
|
||||
}
|
||||
rss[mm_counter(page)]--;
|
||||
rss[mm_counter(folio)]--;
|
||||
if (!delay_rmap) {
|
||||
folio_remove_rmap_pte(folio, page, vma);
|
||||
if (unlikely(page_mapcount(page) < 0))
|
||||
@ -1504,7 +1504,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
||||
* see zap_install_uffd_wp_if_needed().
|
||||
*/
|
||||
WARN_ON_ONCE(!vma_is_anonymous(vma));
|
||||
rss[mm_counter(page)]--;
|
||||
rss[mm_counter(folio)]--;
|
||||
if (is_device_private_entry(entry))
|
||||
folio_remove_rmap_pte(folio, page, vma);
|
||||
folio_put(folio);
|
||||
@ -1519,7 +1519,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
|
||||
folio = pfn_swap_entry_folio(entry);
|
||||
if (!should_zap_folio(details, folio))
|
||||
continue;
|
||||
rss[mm_counter(&folio->page)]--;
|
||||
rss[mm_counter(folio)]--;
|
||||
} else if (pte_marker_entry_uffd_wp(entry)) {
|
||||
/*
|
||||
* For anon: always drop the marker; for file: only
|
||||
|
@ -1780,7 +1780,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
set_huge_pte_at(mm, address, pvmw.pte, pteval,
|
||||
hsz);
|
||||
} else {
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
dec_mm_counter(mm, mm_counter(folio));
|
||||
set_pte_at(mm, address, pvmw.pte, pteval);
|
||||
}
|
||||
|
||||
@ -1795,7 +1795,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
* migration) will not expect userfaults on already
|
||||
* copied pages.
|
||||
*/
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
dec_mm_counter(mm, mm_counter(folio));
|
||||
} else if (folio_test_anon(folio)) {
|
||||
swp_entry_t entry = page_swap_entry(subpage);
|
||||
pte_t swp_pte;
|
||||
@ -2181,7 +2181,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
set_huge_pte_at(mm, address, pvmw.pte, pteval,
|
||||
hsz);
|
||||
} else {
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
dec_mm_counter(mm, mm_counter(folio));
|
||||
set_pte_at(mm, address, pvmw.pte, pteval);
|
||||
}
|
||||
|
||||
@ -2196,7 +2196,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
* migration) will not expect userfaults on already
|
||||
* copied pages.
|
||||
*/
|
||||
dec_mm_counter(mm, mm_counter(&folio->page));
|
||||
dec_mm_counter(mm, mm_counter(folio));
|
||||
} else {
|
||||
swp_entry_t entry;
|
||||
pte_t swp_pte;
|
||||
|
@ -124,7 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd,
|
||||
* Must happen after rmap, as mm_counter() checks mapping (via
|
||||
* PageAnon()), which is set by __page_set_anon_rmap().
|
||||
*/
|
||||
inc_mm_counter(dst_mm, mm_counter(page));
|
||||
inc_mm_counter(dst_mm, mm_counter(folio));
|
||||
|
||||
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user