mm: factor out the numa mapping rebuilding into a new helper
Patch series "support multi-size THP numa balancing", v2. This patchset tries to support mTHP numa balancing, as a simple solution to start, the NUMA balancing algorithm for mTHP will follow the THP strategy as the basic support. Please find details in each patch. This patch (of 2): To support large folio's numa balancing, factor out the numa mapping rebuilding into a new helper as a preparation. Link: https://lkml.kernel.org/r/cover.1712132950.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/cover.1711683069.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/8bc2586bdd8dbbe6d83c09b77b360ec8fcac3736.1711683069.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Cc: David Hildenbrand <david@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
68dbcf4899
commit
6b0ed7b3c7
22
mm/memory.c
22
mm/memory.c
@ -5063,6 +5063,20 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
|
||||
return mpol_misplaced(folio, vmf, addr);
|
||||
}
|
||||
|
||||
static void numa_rebuild_single_mapping(struct vm_fault *vmf, struct vm_area_struct *vma,
|
||||
bool writable)
|
||||
{
|
||||
pte_t pte, old_pte;
|
||||
|
||||
old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
|
||||
pte = pte_modify(old_pte, vma->vm_page_prot);
|
||||
pte = pte_mkyoung(pte);
|
||||
if (writable)
|
||||
pte = pte_mkwrite(pte, vma);
|
||||
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
}
|
||||
|
||||
static vm_fault_t do_numa_page(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
@ -5168,13 +5182,7 @@ out_map:
|
||||
* Make it present again, depending on how arch implements
|
||||
* non-accessible ptes, some can allow access by kernel mode.
|
||||
*/
|
||||
old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
|
||||
pte = pte_modify(old_pte, vma->vm_page_prot);
|
||||
pte = pte_mkyoung(pte);
|
||||
if (writable)
|
||||
pte = pte_mkwrite(pte, vma);
|
||||
ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
|
||||
update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
|
||||
numa_rebuild_single_mapping(vmf, vma, writable);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
goto out;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user