mm: add pmd_folio()
Convert directly from a pmd to a folio without going through another representation first. For now this is just a slightly shorter way to write it, but it might end up being more efficient later. Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5beaee54a3
commit
e06d03d559
@ -50,6 +50,8 @@
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
#endif
|
||||
|
||||
#define pmd_folio(pmd) page_folio(pmd_page(pmd))
|
||||
|
||||
/*
|
||||
* A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
|
||||
*
|
||||
|
@ -1816,7 +1816,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
goto out;
|
||||
}
|
||||
|
||||
folio = pfn_folio(pmd_pfn(orig_pmd));
|
||||
folio = pmd_folio(orig_pmd);
|
||||
/*
|
||||
* If other processes are mapping this folio, we couldn't discard
|
||||
* the folio unless they all do MADV_FREE so let's skip the folio.
|
||||
@ -2086,7 +2086,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
if (pmd_protnone(*pmd))
|
||||
goto unlock;
|
||||
|
||||
folio = page_folio(pmd_page(*pmd));
|
||||
folio = pmd_folio(*pmd);
|
||||
toptier = node_is_toptier(folio_nid(folio));
|
||||
/*
|
||||
* Skip scanning top tier node if normal numa
|
||||
@ -2663,7 +2663,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
* It's safe to call pmd_page when folio is set because it's
|
||||
* guaranteed that pmd is present.
|
||||
*/
|
||||
if (folio && folio != page_folio(pmd_page(*pmd)))
|
||||
if (folio && folio != pmd_folio(*pmd))
|
||||
goto out;
|
||||
__split_huge_pmd_locked(vma, pmd, range.start, freeze);
|
||||
}
|
||||
|
@ -363,7 +363,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
|
||||
goto huge_unlock;
|
||||
}
|
||||
|
||||
folio = pfn_folio(pmd_pfn(orig_pmd));
|
||||
folio = pmd_folio(orig_pmd);
|
||||
|
||||
/* Do not interfere with other mappings of this folio */
|
||||
if (folio_likely_mapped_shared(folio))
|
||||
|
@ -509,7 +509,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk)
|
||||
qp->nr_failed++;
|
||||
return;
|
||||
}
|
||||
folio = pfn_folio(pmd_pfn(*pmd));
|
||||
folio = pmd_folio(*pmd);
|
||||
if (is_huge_zero_folio(folio)) {
|
||||
walk->action = ACTION_CONTINUE;
|
||||
return;
|
||||
|
@ -378,7 +378,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
goto out;
|
||||
if (is_huge_zero_pmd(*pmd))
|
||||
goto out;
|
||||
folio = page_folio(pmd_page(*pmd));
|
||||
folio = pmd_folio(*pmd);
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
mlock_folio(folio);
|
||||
else
|
||||
|
@ -1662,7 +1662,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start,
|
||||
/* Check if we can move the pmd without splitting it. */
|
||||
if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) ||
|
||||
!pmd_none(dst_pmdval)) {
|
||||
struct folio *folio = pfn_folio(pmd_pfn(*src_pmd));
|
||||
struct folio *folio = pmd_folio(*src_pmd);
|
||||
|
||||
if (!folio || (!is_huge_zero_folio(folio) &&
|
||||
!PageAnonExclusive(&folio->page))) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user