device-dax: set mapping prior to vmf_insert_pfn{,_pmd,pud}()
Normally, the @page mapping is set prior to inserting the page into a page table entry. Make device-dax adhere to the same ordering, rather than setting mapping after the PTE is inserted. The address_space never changes and it is always associated with the same inode and underlying pages. So, the page mapping is set once but cleared when the struct pages are removed/freed (i.e. after {devm_}memunmap_pages()). Link: https://lkml.kernel.org/r/20211202204422.26777-10-joao.m.martins@oracle.com Suggested-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a0fb038e50
commit
0e7325f03f
@ -121,6 +121,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
|
|||||||
|
|
||||||
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
||||||
|
|
||||||
|
dax_set_mapping(vmf, *pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
|
return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,6 +163,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
|
|||||||
|
|
||||||
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
||||||
|
|
||||||
|
dax_set_mapping(vmf, *pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,6 +207,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
|
|||||||
|
|
||||||
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
*pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
|
||||||
|
|
||||||
|
dax_set_mapping(vmf, *pfn, fault_size);
|
||||||
|
|
||||||
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -217,7 +223,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|||||||
enum page_entry_size pe_size)
|
enum page_entry_size pe_size)
|
||||||
{
|
{
|
||||||
struct file *filp = vmf->vma->vm_file;
|
struct file *filp = vmf->vma->vm_file;
|
||||||
unsigned long fault_size;
|
|
||||||
vm_fault_t rc = VM_FAULT_SIGBUS;
|
vm_fault_t rc = VM_FAULT_SIGBUS;
|
||||||
int id;
|
int id;
|
||||||
pfn_t pfn;
|
pfn_t pfn;
|
||||||
@ -230,23 +235,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
|
|||||||
id = dax_read_lock();
|
id = dax_read_lock();
|
||||||
switch (pe_size) {
|
switch (pe_size) {
|
||||||
case PE_SIZE_PTE:
|
case PE_SIZE_PTE:
|
||||||
fault_size = PAGE_SIZE;
|
|
||||||
rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
|
rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
case PE_SIZE_PMD:
|
case PE_SIZE_PMD:
|
||||||
fault_size = PMD_SIZE;
|
|
||||||
rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
|
rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
case PE_SIZE_PUD:
|
case PE_SIZE_PUD:
|
||||||
fault_size = PUD_SIZE;
|
|
||||||
rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
|
rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
rc = VM_FAULT_SIGBUS;
|
rc = VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rc == VM_FAULT_NOPAGE)
|
|
||||||
dax_set_mapping(vmf, pfn, fault_size);
|
|
||||||
dax_read_unlock(id);
|
dax_read_unlock(id);
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user