From 735ecdfaf4e802209caf34b7ac45adf448c54ccc Mon Sep 17 00:00:00 2001 From: Lance Yang Date: Fri, 14 Jun 2024 09:51:38 +0800 Subject: [PATCH] mm/vmscan: avoid split lazyfree THP during shrink_folio_list() When the user no longer requires the pages, they would use madvise(MADV_FREE) to mark the pages as lazy free. Subsequently, they typically would not re-write to that memory again. During memory reclaim, if we detect that the large folio and its PMD are both still marked as clean and there are no unexpected references (such as GUP), so we can just discard the memory lazily, improving the efficiency of memory reclamation in this case. On an Intel i5 CPU, reclaiming 1GiB of lazyfree THPs using mem_cgroup_force_empty() results in the following runtimes in seconds (shorter is better): -------------------------------------------- | Old | New | Change | -------------------------------------------- | 0.683426 | 0.049197 | -92.80% | -------------------------------------------- [ioworker0@gmail.com: minor changes per David] Link: https://lkml.kernel.org/r/20240622100057.3352-1-ioworker0@gmail.com Link: https://lkml.kernel.org/r/20240614015138.31461-4-ioworker0@gmail.com Signed-off-by: Lance Yang Suggested-by: Zi Yan Suggested-by: David Hildenbrand Cc: Bang Li Cc: Baolin Wang Cc: Barry Song Cc: Fangrui Song Cc: Jeff Xie Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Minchan Kim Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: SeongJae Park Cc: Yang Shi Cc: Yin Fengwei Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 9 ++++++ mm/huge_memory.c | 66 +++++++++++++++++++++++++++++++++++++++++ mm/rmap.c | 26 +++++++++------- 3 files changed, 91 insertions(+), 10 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9f720b0731c4..212cca384d7e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -430,6 +430,8 @@ static inline bool thp_migration_supported(void) void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, bool freeze, struct folio *folio); +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmdp, struct folio *folio); #else /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -497,6 +499,13 @@ static inline void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, bool freeze, struct folio *folio) {} +static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, + struct folio *folio) +{ + return false; +} + #define split_huge_pud(__vma, __pmd, __address) \ do { } while (0) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index b247490a0931..18bcd7672a89 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2689,6 +2689,72 @@ static void unmap_folio(struct folio *folio) try_to_unmap_flush(); } +static bool __discard_anon_folio_pmd_locked(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp, + struct folio *folio) +{ + struct mm_struct *mm = vma->vm_mm; + int ref_count, map_count; + pmd_t orig_pmd = *pmdp; + struct page *page; + + if (folio_test_dirty(folio) || pmd_dirty(orig_pmd)) + return false; + + orig_pmd = pmdp_huge_clear_flush(vma, addr, pmdp); + + /* + * Syncing against concurrent GUP-fast: + * - clear PMD; barrier; read refcount + * - inc refcount; barrier; read PMD + */ + smp_mb(); + + ref_count = folio_ref_count(folio); + map_count = folio_mapcount(folio); + + /* + * Order reads for folio refcount and dirty flag + * (see comments in __remove_mapping()). + */ + smp_rmb(); + + /* + * If the folio or its PMD is redirtied at this point, or if there + * are unexpected references, we will give up to discard this folio + * and remap it. + * + * The only folio refs must be one from isolation plus the rmap(s). + */ + if (folio_test_dirty(folio) || pmd_dirty(orig_pmd) || + ref_count != map_count + 1) { + set_pmd_at(mm, addr, pmdp, orig_pmd); + return false; + } + + folio_remove_rmap_pmd(folio, page, vma); + zap_deposited_table(mm, pmdp); + add_mm_counter(mm, MM_ANONPAGES, -HPAGE_PMD_NR); + if (vma->vm_flags & VM_LOCKED) + mlock_drain_local(); + folio_put(folio); + + return true; +} + +bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmdp, struct folio *folio) +{ + VM_WARN_ON_FOLIO(!folio_test_pmd_mappable(folio), folio); + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); + VM_WARN_ON_ONCE(!IS_ALIGNED(addr, HPAGE_PMD_SIZE)); + + if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) + return __discard_anon_folio_pmd_locked(vma, addr, pmdp, folio); + + return false; +} + static void remap_page(struct folio *folio, unsigned long nr) { int i = 0; diff --git a/mm/rmap.c b/mm/rmap.c index 42ffdcdfcd27..69cbd7ac2a5c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1678,16 +1678,22 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, goto walk_abort; } - if (!pvmw.pte && (flags & TTU_SPLIT_HUGE_PMD)) { - /* - * We temporarily have to drop the PTL and start once - * again from that now-PTE-mapped page table. - */ - split_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, - false, folio); - flags &= ~TTU_SPLIT_HUGE_PMD; - page_vma_mapped_walk_restart(&pvmw); - continue; + if (!pvmw.pte) { + if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, + folio)) + goto walk_done; + + if (flags & TTU_SPLIT_HUGE_PMD) { + /* + * We temporarily have to drop the PTL and + * restart so we can process the PTE-mapped THP. + */ + split_huge_pmd_locked(vma, pvmw.address, + pvmw.pmd, false, folio); + flags &= ~TTU_SPLIT_HUGE_PMD; + page_vma_mapped_walk_restart(&pvmw); + continue; + } } /* Unexpected PMD-mapped THP? */