hugetlb: convert hugetlb_wp() to use struct vm_fault
hugetlb_wp() can use the struct vm_fault passed in from hugetlb_fault(). This alleviates the stack by consolidating 5 variables into a single struct. [vishal.moola@gmail.com: simplify hugetlb_wp() arguments] Link: https://lkml.kernel.org/r/ZhQtoFNZBNwBCeXn@fedora Link: https://lkml.kernel.org/r/20240401202651.31440-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
committed by
Andrew Morton
parent
7b6ec181de
commit
bd722058e3
64
mm/hugetlb.c
64
mm/hugetlb.c
@ -5918,19 +5918,18 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* cannot race with other handlers or page migration.
|
||||
* Keep the pte_same checks anyway to make transition from the mutex easier.
|
||||
*/
|
||||
static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *ptep, unsigned int flags,
|
||||
struct folio *pagecache_folio, spinlock_t *ptl,
|
||||
static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
const bool unshare = flags & FAULT_FLAG_UNSHARE;
|
||||
pte_t pte = huge_ptep_get(ptep);
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
|
||||
pte_t pte = huge_ptep_get(vmf->pte);
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
struct folio *old_folio;
|
||||
struct folio *new_folio;
|
||||
int outside_reserve = 0;
|
||||
vm_fault_t ret = 0;
|
||||
unsigned long haddr = address & huge_page_mask(h);
|
||||
struct mmu_notifier_range range;
|
||||
|
||||
/*
|
||||
@ -5953,7 +5952,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
/* Let's take out MAP_SHARED mappings first. */
|
||||
if (vma->vm_flags & VM_MAYSHARE) {
|
||||
set_huge_ptep_writable(vma, haddr, ptep);
|
||||
set_huge_ptep_writable(vma, vmf->address, vmf->pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5972,7 +5971,7 @@ retry_avoidcopy:
|
||||
SetPageAnonExclusive(&old_folio->page);
|
||||
}
|
||||
if (likely(!unshare))
|
||||
set_huge_ptep_writable(vma, haddr, ptep);
|
||||
set_huge_ptep_writable(vma, vmf->address, vmf->pte);
|
||||
|
||||
delayacct_wpcopy_end();
|
||||
return 0;
|
||||
@ -5999,8 +5998,8 @@ retry_avoidcopy:
|
||||
* Drop page table lock as buddy allocator may be called. It will
|
||||
* be acquired again before returning to the caller, as expected.
|
||||
*/
|
||||
spin_unlock(ptl);
|
||||
new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
|
||||
spin_unlock(vmf->ptl);
|
||||
new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve);
|
||||
|
||||
if (IS_ERR(new_folio)) {
|
||||
/*
|
||||
@ -6025,19 +6024,21 @@ retry_avoidcopy:
|
||||
*
|
||||
* Reacquire both after unmap operation.
|
||||
*/
|
||||
idx = vma_hugecache_offset(h, vma, haddr);
|
||||
idx = vma_hugecache_offset(h, vma, vmf->address);
|
||||
hash = hugetlb_fault_mutex_hash(mapping, idx);
|
||||
hugetlb_vma_unlock_read(vma);
|
||||
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||
|
||||
unmap_ref_private(mm, vma, &old_folio->page, haddr);
|
||||
unmap_ref_private(mm, vma, &old_folio->page,
|
||||
vmf->address);
|
||||
|
||||
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||
hugetlb_vma_lock_read(vma);
|
||||
spin_lock(ptl);
|
||||
ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
|
||||
if (likely(ptep &&
|
||||
pte_same(huge_ptep_get(ptep), pte)))
|
||||
spin_lock(vmf->ptl);
|
||||
vmf->pte = hugetlb_walk(vma, vmf->address,
|
||||
huge_page_size(h));
|
||||
if (likely(vmf->pte &&
|
||||
pte_same(huge_ptep_get(vmf->pte), pte)))
|
||||
goto retry_avoidcopy;
|
||||
/*
|
||||
* race occurs while re-acquiring page table
|
||||
@ -6059,37 +6060,38 @@ retry_avoidcopy:
|
||||
if (unlikely(ret))
|
||||
goto out_release_all;
|
||||
|
||||
if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
|
||||
if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
|
||||
ret = VM_FAULT_HWPOISON_LARGE;
|
||||
goto out_release_all;
|
||||
}
|
||||
__folio_mark_uptodate(new_folio);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
|
||||
haddr + huge_page_size(h));
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
|
||||
vmf->address + huge_page_size(h));
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
/*
|
||||
* Retake the page table lock to check for racing updates
|
||||
* before the page tables are altered
|
||||
*/
|
||||
spin_lock(ptl);
|
||||
ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
|
||||
if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
|
||||
spin_lock(vmf->ptl);
|
||||
vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
|
||||
if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
|
||||
pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
|
||||
|
||||
/* Break COW or unshare */
|
||||
huge_ptep_clear_flush(vma, haddr, ptep);
|
||||
huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
|
||||
hugetlb_remove_rmap(old_folio);
|
||||
hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
|
||||
hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
|
||||
if (huge_pte_uffd_wp(pte))
|
||||
newpte = huge_pte_mkuffd_wp(newpte);
|
||||
set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
|
||||
set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
|
||||
huge_page_size(h));
|
||||
folio_set_hugetlb_migratable(new_folio);
|
||||
/* Make the old page be freed below */
|
||||
new_folio = old_folio;
|
||||
}
|
||||
spin_unlock(ptl);
|
||||
spin_unlock(vmf->ptl);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
out_release_all:
|
||||
/*
|
||||
@ -6097,12 +6099,12 @@ out_release_all:
|
||||
* unshare)
|
||||
*/
|
||||
if (new_folio != old_folio)
|
||||
restore_reserve_on_error(h, vma, haddr, new_folio);
|
||||
restore_reserve_on_error(h, vma, vmf->address, new_folio);
|
||||
folio_put(new_folio);
|
||||
out_release_old:
|
||||
folio_put(old_folio);
|
||||
|
||||
spin_lock(ptl); /* Caller expects lock to be held */
|
||||
spin_lock(vmf->ptl); /* Caller expects lock to be held */
|
||||
|
||||
delayacct_wpcopy_end();
|
||||
return ret;
|
||||
@ -6369,8 +6371,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping,
|
||||
hugetlb_count_add(pages_per_huge_page(h), mm);
|
||||
if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
|
||||
/* Optimization, do the COW without a second fault */
|
||||
ret = hugetlb_wp(mm, vma, vmf->real_address, vmf->pte,
|
||||
vmf->flags, folio, vmf->ptl, vmf);
|
||||
ret = hugetlb_wp(folio, vmf);
|
||||
}
|
||||
|
||||
spin_unlock(vmf->ptl);
|
||||
@ -6583,8 +6584,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
|
||||
if (!huge_pte_write(vmf.orig_pte)) {
|
||||
ret = hugetlb_wp(mm, vma, address, vmf.pte, flags,
|
||||
pagecache_folio, vmf.ptl, &vmf);
|
||||
ret = hugetlb_wp(pagecache_folio, &vmf);
|
||||
goto out_put_page;
|
||||
} else if (likely(flags & FAULT_FLAG_WRITE)) {
|
||||
vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
|
||||
|
Reference in New Issue
Block a user