mm/khugepaged: cleanup memcg uncharge for failure path
Explicit memcg uncharging is not needed when the memcg accounting has the same lifespan of the page/folio. That becomes the case for khugepaged after Yang & Zach's recent rework so the hpage will be allocated for each collapse rather than being cached. Cleanup the explicit memcg uncharge in khugepaged failure path and leave that for put_page(). Link: https://lkml.kernel.org/r/20230303151218.311015-1-peterx@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com> Suggested-by: Zach O'Keefe <zokeefe@google.com> Reviewed-by: Zach O'Keefe <zokeefe@google.com> Reviewed-by: Yang Shi <shy828301@gmail.com> Cc: David Stevens <stevensd@chromium.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
9dabf6e137
commit
7cb1d7ef66
@ -1135,10 +1135,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
|
||||
out_up_write:
|
||||
mmap_write_unlock(mm);
|
||||
out_nolock:
|
||||
if (hpage) {
|
||||
mem_cgroup_uncharge(page_folio(hpage));
|
||||
if (hpage)
|
||||
put_page(hpage);
|
||||
}
|
||||
trace_mm_collapse_huge_page(mm, result == SCAN_SUCCEED, result);
|
||||
return result;
|
||||
}
|
||||
@ -2137,10 +2135,8 @@ xa_unlocked:
|
||||
unlock_page(hpage);
|
||||
out:
|
||||
VM_BUG_ON(!list_empty(&pagelist));
|
||||
if (hpage) {
|
||||
mem_cgroup_uncharge(page_folio(hpage));
|
||||
if (hpage)
|
||||
put_page(hpage);
|
||||
}
|
||||
|
||||
trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
|
||||
return result;
|
||||
|
Loading…
Reference in New Issue
Block a user