clear_refs: remove clear_refs_private->vma and introduce clear_refs_test_walk()
clear_refs_write() has some prechecks to determine if we really walk over a given vma. Now we have a test_walk() callback to filter vmas, so let's utilize it. Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
14eb6fdd42
commit
5c64f52acd
@ -736,7 +736,6 @@ enum clear_refs_types {
|
||||
};
|
||||
|
||||
struct clear_refs_private {
|
||||
struct vm_area_struct *vma;
|
||||
enum clear_refs_types type;
|
||||
};
|
||||
|
||||
@ -767,7 +766,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, struct mm_walk *walk)
|
||||
{
|
||||
struct clear_refs_private *cp = walk->private;
|
||||
struct vm_area_struct *vma = cp->vma;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
pte_t *pte, ptent;
|
||||
spinlock_t *ptl;
|
||||
struct page *page;
|
||||
@ -801,6 +800,25 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int clear_refs_test_walk(unsigned long start, unsigned long end,
|
||||
struct mm_walk *walk)
|
||||
{
|
||||
struct clear_refs_private *cp = walk->private;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
|
||||
/*
|
||||
* Writing 1 to /proc/pid/clear_refs affects all pages.
|
||||
* Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
|
||||
* Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
|
||||
* Writing 4 to /proc/pid/clear_refs affects all pages.
|
||||
*/
|
||||
if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
|
||||
return 1;
|
||||
if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
@ -841,6 +859,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
};
|
||||
struct mm_walk clear_refs_walk = {
|
||||
.pmd_entry = clear_refs_pte_range,
|
||||
.test_walk = clear_refs_test_walk,
|
||||
.mm = mm,
|
||||
.private = &cp,
|
||||
};
|
||||
@ -860,28 +879,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
|
||||
}
|
||||
mmu_notifier_invalidate_range_start(mm, 0, -1);
|
||||
}
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
cp.vma = vma;
|
||||
if (is_vm_hugetlb_page(vma))
|
||||
continue;
|
||||
/*
|
||||
* Writing 1 to /proc/pid/clear_refs affects all pages.
|
||||
*
|
||||
* Writing 2 to /proc/pid/clear_refs only affects
|
||||
* Anonymous pages.
|
||||
*
|
||||
* Writing 3 to /proc/pid/clear_refs only affects file
|
||||
* mapped pages.
|
||||
*
|
||||
* Writing 4 to /proc/pid/clear_refs affects all pages.
|
||||
*/
|
||||
if (type == CLEAR_REFS_ANON && vma->vm_file)
|
||||
continue;
|
||||
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
|
||||
continue;
|
||||
walk_page_range(vma->vm_start, vma->vm_end,
|
||||
&clear_refs_walk);
|
||||
}
|
||||
walk_page_range(0, ~0UL, &clear_refs_walk);
|
||||
if (type == CLEAR_REFS_SOFT_DIRTY)
|
||||
mmu_notifier_invalidate_range_end(mm, 0, -1);
|
||||
flush_tlb_mm(mm);
|
||||
|
Loading…
Reference in New Issue
Block a user