mm/hmm: remove pgmap checking for devmap pages
The checking boils down to some racy check if the pagemap is still available or not. Instead of checking this, rely entirely on the notifiers, if a pagemap is destroyed then all pages that belong to it must be removed from the tables and the notifiers triggered. Link: https://lore.kernel.org/r/20200327200021.29372-2-jgg@ziepe.ca Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
08ddddda66
commit
068354ade5
50
mm/hmm.c
50
mm/hmm.c
@ -28,7 +28,6 @@
|
||||
|
||||
struct hmm_vma_walk {
|
||||
struct hmm_range *range;
|
||||
struct dev_pagemap *pgmap;
|
||||
unsigned long last;
|
||||
unsigned int flags;
|
||||
};
|
||||
@ -196,19 +195,8 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
|
||||
return hmm_vma_fault(addr, end, fault, write_fault, walk);
|
||||
|
||||
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
|
||||
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
|
||||
if (pmd_devmap(pmd)) {
|
||||
hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
|
||||
hmm_vma_walk->pgmap);
|
||||
if (unlikely(!hmm_vma_walk->pgmap))
|
||||
return -EBUSY;
|
||||
}
|
||||
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
|
||||
pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
|
||||
}
|
||||
if (hmm_vma_walk->pgmap) {
|
||||
put_dev_pagemap(hmm_vma_walk->pgmap);
|
||||
hmm_vma_walk->pgmap = NULL;
|
||||
}
|
||||
hmm_vma_walk->last = end;
|
||||
return 0;
|
||||
}
|
||||
@ -300,15 +288,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
||||
if (fault || write_fault)
|
||||
goto fault;
|
||||
|
||||
if (pte_devmap(pte)) {
|
||||
hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
|
||||
hmm_vma_walk->pgmap);
|
||||
if (unlikely(!hmm_vma_walk->pgmap)) {
|
||||
pte_unmap(ptep);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Since each architecture defines a struct page for the zero page, just
|
||||
* fall through and treat it like a normal page.
|
||||
@ -328,10 +307,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
||||
return 0;
|
||||
|
||||
fault:
|
||||
if (hmm_vma_walk->pgmap) {
|
||||
put_dev_pagemap(hmm_vma_walk->pgmap);
|
||||
hmm_vma_walk->pgmap = NULL;
|
||||
}
|
||||
pte_unmap(ptep);
|
||||
/* Fault any virtual address we were asked to fault */
|
||||
return hmm_vma_fault(addr, end, fault, write_fault, walk);
|
||||
@ -418,16 +393,6 @@ again:
|
||||
return r;
|
||||
}
|
||||
}
|
||||
if (hmm_vma_walk->pgmap) {
|
||||
/*
|
||||
* We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
|
||||
* so that we can leverage get_dev_pagemap() optimization which
|
||||
* will not re-take a reference on a pgmap if we already have
|
||||
* one.
|
||||
*/
|
||||
put_dev_pagemap(hmm_vma_walk->pgmap);
|
||||
hmm_vma_walk->pgmap = NULL;
|
||||
}
|
||||
pte_unmap(ptep - 1);
|
||||
|
||||
hmm_vma_walk->last = addr;
|
||||
@ -491,20 +456,9 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
|
||||
}
|
||||
|
||||
pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
|
||||
for (i = 0; i < npages; ++i, ++pfn) {
|
||||
hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
|
||||
hmm_vma_walk->pgmap);
|
||||
if (unlikely(!hmm_vma_walk->pgmap)) {
|
||||
ret = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
for (i = 0; i < npages; ++i, ++pfn)
|
||||
pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
|
||||
cpu_flags;
|
||||
}
|
||||
if (hmm_vma_walk->pgmap) {
|
||||
put_dev_pagemap(hmm_vma_walk->pgmap);
|
||||
hmm_vma_walk->pgmap = NULL;
|
||||
}
|
||||
hmm_vma_walk->last = end;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user