mm: simplify follow_pte{,pmd}
commit ff5c19ed4b087073cea38ff0edc80c23d7256943 upstream. Merge __follow_pte_pmd, follow_pte_pmd and follow_pte into a single follow_pte function and just pass two additional NULL arguments for the two previous follow_pte callers. [sfr@canb.auug.org.au: merge fix for "s390/pci: remove races against pte updates"] Link: https://lkml.kernel.org/r/20201111221254.7f6a3658@canb.auug.org.au Link: https://lkml.kernel.org/r/20201029101432.47011-3-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Daniel Vetter <daniel@ffwll.ch> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Nick Desaulniers <ndesaulniers@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8aeef9c9ac
commit
78c7b24257
9
fs/dax.c
9
fs/dax.c
@ -794,12 +794,11 @@ static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
|
||||
address = pgoff_address(index, vma);
|
||||
|
||||
/*
|
||||
* Note because we provide range to follow_pte_pmd it will
|
||||
* call mmu_notifier_invalidate_range_start() on our behalf
|
||||
* before taking any lock.
|
||||
* Note because we provide range to follow_pte it will call
|
||||
* mmu_notifier_invalidate_range_start() on our behalf before
|
||||
* taking any lock.
|
||||
*/
|
||||
if (follow_pte_pmd(vma->vm_mm, address, &range,
|
||||
&ptep, &pmdp, &ptl))
|
||||
if (follow_pte(vma->vm_mm, address, &range, &ptep, &pmdp, &ptl))
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
@ -1466,9 +1466,9 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||
struct vm_area_struct *vma);
|
||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
|
||||
spinlock_t **ptlp);
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn);
|
||||
int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||
|
35
mm/memory.c
35
mm/memory.c
@ -4222,9 +4222,9 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
||||
}
|
||||
#endif /* __PAGETABLE_PMD_FOLDED */
|
||||
|
||||
static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
|
||||
int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp,
|
||||
spinlock_t **ptlp)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
@ -4289,31 +4289,6 @@ out:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int follow_pte(struct mm_struct *mm, unsigned long address,
|
||||
pte_t **ptepp, spinlock_t **ptlp)
|
||||
{
|
||||
int res;
|
||||
|
||||
/* (void) is needed to make gcc happy */
|
||||
(void) __cond_lock(*ptlp,
|
||||
!(res = __follow_pte_pmd(mm, address, NULL,
|
||||
ptepp, NULL, ptlp)));
|
||||
return res;
|
||||
}
|
||||
|
||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||
struct mmu_notifier_range *range,
|
||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
|
||||
{
|
||||
int res;
|
||||
|
||||
/* (void) is needed to make gcc happy */
|
||||
(void) __cond_lock(*ptlp,
|
||||
!(res = __follow_pte_pmd(mm, address, range,
|
||||
ptepp, pmdpp, ptlp)));
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
* @vma: memory mapping
|
||||
@ -4334,7 +4309,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return ret;
|
||||
|
||||
ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
|
||||
ret = follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl);
|
||||
if (ret)
|
||||
return ret;
|
||||
*pfn = pte_pfn(*ptep);
|
||||
@ -4355,7 +4330,7 @@ int follow_phys(struct vm_area_struct *vma,
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
goto out;
|
||||
|
||||
if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
|
||||
if (follow_pte(vma->vm_mm, address, NULL, &ptep, NULL, &ptl))
|
||||
goto out;
|
||||
pte = *ptep;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user