Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: "146 patches. Subsystems affected by this patch series: kthread, ia64, scripts, ntfs, squashfs, ocfs2, vfs, and mm (slab-generic, slab, kmemleak, dax, kasan, debug, pagecache, gup, shmem, frontswap, memremap, memcg, selftests, pagemap, dma, vmalloc, memory-failure, hugetlb, userfaultfd, vmscan, mempolicy, oom-kill, hugetlbfs, migration, thp, ksm, page-poison, percpu, rmap, zswap, zram, cleanups, hmm, and damon)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (146 commits) mm/damon: hide kernel pointer from tracepoint event mm/damon/vaddr: hide kernel pointer from damon_va_three_regions() failure log mm/damon/vaddr: use pr_debug() for damon_va_three_regions() failure logging mm/damon/dbgfs: remove an unnecessary variable mm/damon: move the implementation of damon_insert_region to damon.h mm/damon: add access checking for hugetlb pages Docs/admin-guide/mm/damon/usage: update for schemes statistics mm/damon/dbgfs: support all DAMOS stats Docs/admin-guide/mm/damon/reclaim: document statistics parameters mm/damon/reclaim: provide reclamation statistics mm/damon/schemes: account how many times quota limit has exceeded mm/damon/schemes: account scheme actions that successfully applied mm/damon: remove a mistakenly added comment for a future feature Docs/admin-guide/mm/damon/usage: update for kdamond_pid and (mk|rm)_contexts Docs/admin-guide/mm/damon/usage: mention tracepoint at the beginning Docs/admin-guide/mm/damon/usage: remove redundant information Docs/admin-guide/mm/damon/usage: update for scheme quotas and watermarks mm/damon: convert macro functions to static inline functions mm/damon: modify damon_rand() macro to static inline function mm/damon: move damon_rand() definition into damon.h ...
This commit is contained in:
		
							
								
								
									
										12
									
								
								mm/memory.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								mm/memory.c
									
									
									
									
									
								
							| @@ -41,6 +41,7 @@ | ||||
| 
 | ||||
| #include <linux/kernel_stat.h> | ||||
| #include <linux/mm.h> | ||||
| #include <linux/mm_inline.h> | ||||
| #include <linux/sched/mm.h> | ||||
| #include <linux/sched/coredump.h> | ||||
| #include <linux/sched/numa_balancing.h> | ||||
| @@ -719,8 +720,6 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, | ||||
| 	else if (is_writable_device_exclusive_entry(entry)) | ||||
| 		pte = maybe_mkwrite(pte_mkdirty(pte), vma); | ||||
| 
 | ||||
| 	set_pte_at(vma->vm_mm, address, ptep, pte); | ||||
| 
 | ||||
| 	/*
 | ||||
| 	 * No need to take a page reference as one was already | ||||
| 	 * created when the swap entry was made. | ||||
| @@ -734,6 +733,8 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, | ||||
| 		 */ | ||||
| 		WARN_ON_ONCE(!PageAnon(page)); | ||||
| 
 | ||||
| 	set_pte_at(vma->vm_mm, address, ptep, pte); | ||||
| 
 | ||||
| 	if (vma->vm_flags & VM_LOCKED) | ||||
| 		mlock_vma_page(page); | ||||
| 
 | ||||
| @@ -3647,7 +3648,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) | ||||
| 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); | ||||
| 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS); | ||||
| 	pte = mk_pte(page, vma->vm_page_prot); | ||||
| 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) { | ||||
| 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { | ||||
| 		pte = maybe_mkwrite(pte_mkdirty(pte), vma); | ||||
| 		vmf->flags &= ~FAULT_FLAG_WRITE; | ||||
| 		ret |= VM_FAULT_WRITE; | ||||
| @@ -3660,8 +3661,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) | ||||
| 		pte = pte_mkuffd_wp(pte); | ||||
| 		pte = pte_wrprotect(pte); | ||||
| 	} | ||||
| 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); | ||||
| 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); | ||||
| 	vmf->orig_pte = pte; | ||||
| 
 | ||||
| 	/* ksm created a completely new copy */ | ||||
| @@ -3672,6 +3671,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) | ||||
| 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive); | ||||
| 	} | ||||
| 
 | ||||
| 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); | ||||
| 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte); | ||||
| 
 | ||||
| 	swap_free(entry); | ||||
| 	if (mem_cgroup_swap_full(page) || | ||||
| 	    (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user