mm: rmap: use flush_cache_range() to flush cache for hugetlb pages
Now we will use flush_cache_page() to flush cache for anonymous hugetlb pages when unmapping or migrating a hugetlb page mapping, but the flush_cache_page() only handles a PAGE_SIZE range on some architectures (like arm32, arc and so on), which will cause potential cache issues. Thus change to use flush_cache_range() to cover the whole size of a hugetlb page. Link: https://lkml.kernel.org/r/dc903b378d1e2d26bbbe85409ab9d009631f175c.1651056365.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Mina Almasry <almasrymina@google.com> Cc: Muchun Song <songmuchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
54205e9c54
commit
dfc7ab5756
90
mm/rmap.c
90
mm/rmap.c
@ -1525,13 +1525,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
anon_exclusive = folio_test_anon(folio) &&
|
||||
PageAnonExclusive(subpage);
|
||||
|
||||
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
|
||||
/*
|
||||
* To call huge_pmd_unshare, i_mmap_rwsem must be
|
||||
* held in write mode. Caller needs to explicitly
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
/*
|
||||
* huge_pmd_unshare may unmap an entire PMD page.
|
||||
* There is no way of knowing exactly which PMDs may
|
||||
@ -1541,22 +1535,31 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
*/
|
||||
flush_cache_range(vma, range.start, range.end);
|
||||
|
||||
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
||||
if (!folio_test_anon(folio)) {
|
||||
/*
|
||||
* The ref count of the PMD page was dropped
|
||||
* which is part of the way map counting
|
||||
* is done for shared PMDs. Return 'true'
|
||||
* here. When there is no other sharing,
|
||||
* huge_pmd_unshare returns false and we will
|
||||
* unmap the actual page and drop map count
|
||||
* to zero.
|
||||
* To call huge_pmd_unshare, i_mmap_rwsem must be
|
||||
* held in write mode. Caller needs to explicitly
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
break;
|
||||
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
|
||||
|
||||
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
||||
/*
|
||||
* The ref count of the PMD page was dropped
|
||||
* which is part of the way map counting
|
||||
* is done for shared PMDs. Return 'true'
|
||||
* here. When there is no other sharing,
|
||||
* huge_pmd_unshare returns false and we will
|
||||
* unmap the actual page and drop map count
|
||||
* to zero.
|
||||
*/
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
|
||||
@ -1879,13 +1882,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
anon_exclusive = folio_test_anon(folio) &&
|
||||
PageAnonExclusive(subpage);
|
||||
|
||||
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
|
||||
/*
|
||||
* To call huge_pmd_unshare, i_mmap_rwsem must be
|
||||
* held in write mode. Caller needs to explicitly
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
/*
|
||||
* huge_pmd_unshare may unmap an entire PMD page.
|
||||
* There is no way of knowing exactly which PMDs may
|
||||
@ -1895,22 +1892,31 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
|
||||
*/
|
||||
flush_cache_range(vma, range.start, range.end);
|
||||
|
||||
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
||||
if (!folio_test_anon(folio)) {
|
||||
/*
|
||||
* The ref count of the PMD page was dropped
|
||||
* which is part of the way map counting
|
||||
* is done for shared PMDs. Return 'true'
|
||||
* here. When there is no other sharing,
|
||||
* huge_pmd_unshare returns false and we will
|
||||
* unmap the actual page and drop map count
|
||||
* to zero.
|
||||
* To call huge_pmd_unshare, i_mmap_rwsem must be
|
||||
* held in write mode. Caller needs to explicitly
|
||||
* do this outside rmap routines.
|
||||
*/
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
break;
|
||||
VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
|
||||
|
||||
if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
|
||||
flush_tlb_range(vma, range.start, range.end);
|
||||
mmu_notifier_invalidate_range(mm, range.start,
|
||||
range.end);
|
||||
|
||||
/*
|
||||
* The ref count of the PMD page was dropped
|
||||
* which is part of the way map counting
|
||||
* is done for shared PMDs. Return 'true'
|
||||
* here. When there is no other sharing,
|
||||
* huge_pmd_unshare returns false and we will
|
||||
* unmap the actual page and drop map count
|
||||
* to zero.
|
||||
*/
|
||||
page_vma_mapped_walk_done(&pvmw);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
|
||||
|
Loading…
x
Reference in New Issue
Block a user