mm/rmap: convert page_dup_file_rmap() to folio_dup_file_rmap_[pte|ptes|pmd]()
Let's convert page_dup_file_rmap() like the other rmap functions. As there is only a single caller, convert that single caller right away and remove page_dup_file_rmap(). Add folio_dup_file_rmap_ptes() right away, we want to perform rmap baching during fork() soon. Link: https://lkml.kernel.org/r/20231220224504.646757-34-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Yin Fengwei <fengwei.yin@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
4d8f7418e8
commit
d8ef5e311d
@ -308,6 +308,60 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
|
||||
atomic_dec(&folio->_entire_mapcount);
|
||||
}
|
||||
|
||||
static __always_inline void __folio_dup_file_rmap(struct folio *folio,
|
||||
struct page *page, int nr_pages, enum rmap_level level)
|
||||
{
|
||||
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
||||
|
||||
switch (level) {
|
||||
case RMAP_LEVEL_PTE:
|
||||
do {
|
||||
atomic_inc(&page->_mapcount);
|
||||
} while (page++, --nr_pages > 0);
|
||||
break;
|
||||
case RMAP_LEVEL_PMD:
|
||||
atomic_inc(&folio->_entire_mapcount);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_dup_file_rmap_ptes - duplicate PTE mappings of a page range of a folio
|
||||
* @folio: The folio to duplicate the mappings of
|
||||
* @page: The first page to duplicate the mappings of
|
||||
* @nr_pages: The number of pages of which the mapping will be duplicated
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + nr_pages)
|
||||
*
|
||||
* The caller needs to hold the page table lock.
|
||||
*/
|
||||
static inline void folio_dup_file_rmap_ptes(struct folio *folio,
|
||||
struct page *page, int nr_pages)
|
||||
{
|
||||
__folio_dup_file_rmap(folio, page, nr_pages, RMAP_LEVEL_PTE);
|
||||
}
|
||||
#define folio_dup_file_rmap_pte(folio, page) \
|
||||
folio_dup_file_rmap_ptes(folio, page, 1)
|
||||
|
||||
/**
|
||||
* folio_dup_file_rmap_pmd - duplicate a PMD mapping of a page range of a folio
|
||||
* @folio: The folio to duplicate the mapping of
|
||||
* @page: The first page to duplicate the mapping of
|
||||
*
|
||||
* The page range of the folio is defined by [page, page + HPAGE_PMD_NR)
|
||||
*
|
||||
* The caller needs to hold the page table lock.
|
||||
*/
|
||||
static inline void folio_dup_file_rmap_pmd(struct folio *folio,
|
||||
struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
__folio_dup_file_rmap(folio, page, HPAGE_PMD_NR, RMAP_LEVEL_PTE);
|
||||
#else
|
||||
WARN_ON_ONCE(true);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __page_dup_rmap(struct page *page, bool compound)
|
||||
{
|
||||
VM_WARN_ON(folio_test_hugetlb(page_folio(page)));
|
||||
@ -322,11 +376,6 @@ static inline void __page_dup_rmap(struct page *page, bool compound)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void page_dup_file_rmap(struct page *page, bool compound)
|
||||
{
|
||||
__page_dup_rmap(page, compound);
|
||||
}
|
||||
|
||||
/**
|
||||
* page_try_dup_anon_rmap - try duplicating a mapping of an already mapped
|
||||
* anonymous page
|
||||
|
@ -965,7 +965,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
|
||||
rss[MM_ANONPAGES]++;
|
||||
} else if (page) {
|
||||
folio_get(folio);
|
||||
page_dup_file_rmap(page, false);
|
||||
folio_dup_file_rmap_pte(folio, page);
|
||||
rss[mm_counter_file(page)]++;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user