mm/mremap: convert huge PUD move to separate helper
With TRANSPARENT_HUGEPAGE_PUD enabled the kernel can find huge PUD entries. Add a helper to move huge PUD entries on mremap(). This will be used by a later patch to optimize mremap of PUD_SIZE aligned level 4 PTE mapped address This also make sure we support mremap on huge PUD entries even with CONFIG_HAVE_MOVE_PUD disabled. [aneesh.kumar@linux.ibm.com: fix build failure with clang-10] Link: https://lore.kernel.org/lkml/YMuOSnJsL9qkxweY@archlinux-ax161 Link: https://lkml.kernel.org/r/20210619134310.89098-1-aneesh.kumar@linux.ibm.com Link: https://lkml.kernel.org/r/20210616045239.370802-4-aneesh.kumar@linux.ibm.com Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Hugh Dickins <hughd@google.com> Cc: Joel Fernandes <joel@joelfernandes.org> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a9cc9c3456
commit
7d846db7d0
70
mm/mremap.c
70
mm/mremap.c
@ -324,10 +324,61 @@ static inline bool move_normal_pud(struct vm_area_struct *vma,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
|
||||
static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
|
||||
{
|
||||
spinlock_t *old_ptl, *new_ptl;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pud_t pud;
|
||||
|
||||
/*
|
||||
* The destination pud shouldn't be established, free_pgtables()
|
||||
* should have released it.
|
||||
*/
|
||||
if (WARN_ON_ONCE(!pud_none(*new_pud)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We don't have to worry about the ordering of src and dst
|
||||
* ptlocks because exclusive mmap_lock prevents deadlock.
|
||||
*/
|
||||
old_ptl = pud_lock(vma->vm_mm, old_pud);
|
||||
new_ptl = pud_lockptr(mm, new_pud);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/* Clear the pud */
|
||||
pud = *old_pud;
|
||||
pud_clear(old_pud);
|
||||
|
||||
VM_BUG_ON(!pud_none(*new_pud));
|
||||
|
||||
/* Set the new pud */
|
||||
/* mark soft_ditry when we add pud level soft dirty support */
|
||||
set_pud_at(mm, new_addr, new_pud, pud);
|
||||
flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_unlock(new_ptl);
|
||||
spin_unlock(old_ptl);
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, pud_t *old_pud, pud_t *new_pud)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
return false;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
enum pgt_entry {
|
||||
NORMAL_PMD,
|
||||
HPAGE_PMD,
|
||||
NORMAL_PUD,
|
||||
HPAGE_PUD,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -347,6 +398,7 @@ static __always_inline unsigned long get_extent(enum pgt_entry entry,
|
||||
mask = PMD_MASK;
|
||||
size = PMD_SIZE;
|
||||
break;
|
||||
case HPAGE_PUD:
|
||||
case NORMAL_PUD:
|
||||
mask = PUD_MASK;
|
||||
size = PUD_SIZE;
|
||||
@ -395,6 +447,12 @@ static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
|
||||
move_huge_pmd(vma, old_addr, new_addr, old_entry,
|
||||
new_entry);
|
||||
break;
|
||||
case HPAGE_PUD:
|
||||
moved = IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||
move_huge_pud(vma, old_addr, new_addr, old_entry,
|
||||
new_entry);
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
@ -414,6 +472,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
unsigned long extent, old_end;
|
||||
struct mmu_notifier_range range;
|
||||
pmd_t *old_pmd, *new_pmd;
|
||||
pud_t *old_pud, *new_pud;
|
||||
|
||||
old_end = old_addr + len;
|
||||
flush_cache_range(vma, old_addr, old_end);
|
||||
@ -429,8 +488,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
* PUD level if possible.
|
||||
*/
|
||||
extent = get_extent(NORMAL_PUD, old_addr, old_end, new_addr);
|
||||
if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
|
||||
pud_t *old_pud, *new_pud;
|
||||
|
||||
old_pud = get_old_pud(vma->vm_mm, old_addr);
|
||||
if (!old_pud)
|
||||
@ -438,6 +495,15 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
|
||||
new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
|
||||
if (!new_pud)
|
||||
break;
|
||||
if (pud_trans_huge(*old_pud) || pud_devmap(*old_pud)) {
|
||||
if (extent == HPAGE_PUD_SIZE) {
|
||||
move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
|
||||
old_pud, new_pud, need_rmap_locks);
|
||||
/* We ignore and continue on error? */
|
||||
continue;
|
||||
}
|
||||
} else if (IS_ENABLED(CONFIG_HAVE_MOVE_PUD) && extent == PUD_SIZE) {
|
||||
|
||||
if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
|
||||
old_pud, new_pud, need_rmap_locks))
|
||||
continue;
|
||||
|
Loading…
x
Reference in New Issue
Block a user