mm/ksm: use folio in write_protect_page
Compound page is checked and skipped before write_protect_page() called, use folio to save a few compound_head checks. Link: https://lkml.kernel.org/r/20240411061713.1847574-8-alexs@kernel.org Signed-off-by: Alex Shi (tencent) <alexs@kernel.org> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
72556a4c06
commit
40d707f33d
25
mm/ksm.c
25
mm/ksm.c
@ -1289,23 +1289,24 @@ static u32 calc_checksum(struct page *page)
|
||||
return checksum;
|
||||
}
|
||||
|
||||
static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
|
||||
pte_t *orig_pte)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
|
||||
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
|
||||
int swapped;
|
||||
int err = -EFAULT;
|
||||
struct mmu_notifier_range range;
|
||||
bool anon_exclusive;
|
||||
pte_t entry;
|
||||
|
||||
pvmw.address = page_address_in_vma(page, vma);
|
||||
if (WARN_ON_ONCE(folio_test_large(folio)))
|
||||
return err;
|
||||
|
||||
pvmw.address = page_address_in_vma(&folio->page, vma);
|
||||
if (pvmw.address == -EFAULT)
|
||||
goto out;
|
||||
|
||||
BUG_ON(PageTransCompound(page));
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
|
||||
pvmw.address + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
@ -1315,12 +1316,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
|
||||
goto out_unlock;
|
||||
|
||||
anon_exclusive = PageAnonExclusive(page);
|
||||
anon_exclusive = PageAnonExclusive(&folio->page);
|
||||
entry = ptep_get(pvmw.pte);
|
||||
if (pte_write(entry) || pte_dirty(entry) ||
|
||||
anon_exclusive || mm_tlb_flush_pending(mm)) {
|
||||
swapped = PageSwapCache(page);
|
||||
flush_cache_page(vma, pvmw.address, page_to_pfn(page));
|
||||
swapped = folio_test_swapcache(folio);
|
||||
flush_cache_page(vma, pvmw.address, folio_pfn(folio));
|
||||
/*
|
||||
* Ok this is tricky, when get_user_pages_fast() run it doesn't
|
||||
* take any lock, therefore the check that we are going to make
|
||||
@ -1340,20 +1341,20 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
|
||||
* Check that no O_DIRECT or similar I/O is in progress on the
|
||||
* page
|
||||
*/
|
||||
if (page_mapcount(page) + 1 + swapped != page_count(page)) {
|
||||
if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
|
||||
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* See folio_try_share_anon_rmap_pte(): clear PTE first. */
|
||||
if (anon_exclusive &&
|
||||
folio_try_share_anon_rmap_pte(page_folio(page), page)) {
|
||||
folio_try_share_anon_rmap_pte(folio, &folio->page)) {
|
||||
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (pte_dirty(entry))
|
||||
set_page_dirty(page);
|
||||
folio_mark_dirty(folio);
|
||||
entry = pte_mkclean(entry);
|
||||
|
||||
if (pte_write(entry))
|
||||
@ -1519,7 +1520,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
|
||||
* ptes are necessarily already write-protected. But in either
|
||||
* case, we need to lock and check page_count is not raised.
|
||||
*/
|
||||
if (write_protect_page(vma, page, &orig_pte) == 0) {
|
||||
if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
|
||||
if (!kpage) {
|
||||
/*
|
||||
* While we hold page lock, upgrade page from
|
||||
|
Loading…
x
Reference in New Issue
Block a user