mm: simplify follow_invalidate_pte()
The only user (DAX) of range and pmdpp parameters of follow_invalidate_pte() is gone, it is safe to remove them and make it static to simlify the code. This is revertant of the following commits:0979639595
("mm: add follow_pte_pmd()")a4d1a88525
("dax: update to new mmu_notifier semantic") There is only one caller of the follow_invalidate_pte(). So just fold it into follow_pte() and remove it. Link: https://lkml.kernel.org/r/20220403053957.10770-7-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Cc: Xiyu Yang <xiyuyang19@fudan.edu.cn> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
06083a0921
commit
0e5e64c0b0
@ -1845,9 +1845,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||
int
|
||||
copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
|
||||
int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range, pte_t **ptepp,
|
||||
pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
|
101
mm/memory.c
101
mm/memory.c
@ -4949,73 +4949,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
||||
}
|
||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||
|
||||
int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range, pte_t **ptepp,
|
||||
pmd_t **pmdpp, spinlock_t **ptlp)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
|
||||
pgd = pgd_offset(mm, address);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
goto out;
|
||||
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
|
||||
goto out;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||
|
||||
if (pmd_huge(*pmd)) {
|
||||
if (!pmdpp)
|
||||
goto out;
|
||||
|
||||
if (range) {
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
|
||||
NULL, mm, address & PMD_MASK,
|
||||
(address & PMD_MASK) + PMD_SIZE);
|
||||
mmu_notifier_invalidate_range_start(range);
|
||||
}
|
||||
*ptlp = pmd_lock(mm, pmd);
|
||||
if (pmd_huge(*pmd)) {
|
||||
*pmdpp = pmd;
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(*ptlp);
|
||||
if (range)
|
||||
mmu_notifier_invalidate_range_end(range);
|
||||
}
|
||||
|
||||
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
||||
goto out;
|
||||
|
||||
if (range) {
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
|
||||
address & PAGE_MASK,
|
||||
(address & PAGE_MASK) + PAGE_SIZE);
|
||||
mmu_notifier_invalidate_range_start(range);
|
||||
}
|
||||
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
|
||||
if (!pte_present(*ptep))
|
||||
goto unlock;
|
||||
*ptepp = ptep;
|
||||
return 0;
|
||||
unlock:
|
||||
pte_unmap_unlock(ptep, *ptlp);
|
||||
if (range)
|
||||
mmu_notifier_invalidate_range_end(range);
|
||||
out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* follow_pte - look up PTE at a user virtual address
|
||||
* @mm: the mm_struct of the target address space
|
||||
@ -5040,7 +4973,39 @@ out:
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, spinlock_t **ptlp)
|
||||
{
|
||||
return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
|
||||
pgd = pgd_offset(mm, address);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
goto out;
|
||||
|
||||
p4d = p4d_offset(pgd, address);
|
||||
if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
|
||||
goto out;
|
||||
|
||||
pud = pud_offset(p4d, address);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
goto out;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
VM_BUG_ON(pmd_trans_huge(*pmd));
|
||||
|
||||
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
||||
goto out;
|
||||
|
||||
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
|
||||
if (!pte_present(*ptep))
|
||||
goto unlock;
|
||||
*ptepp = ptep;
|
||||
return 0;
|
||||
unlock:
|
||||
pte_unmap_unlock(ptep, *ptlp);
|
||||
out:
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(follow_pte);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user