mm/memcg: Convert mem_cgroup_uncharge() to take a folio
Convert all the callers to call page_folio(). Most of them were already using a head page, but a few of them I can't prove were, so this may actually fix a bug. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
c4ed6ebfcb
commit
bbc6b703b2
@ -722,12 +722,19 @@ int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
|
|||||||
gfp_t gfp, swp_entry_t entry);
|
gfp_t gfp, swp_entry_t entry);
|
||||||
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
|
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
|
||||||
|
|
||||||
void __mem_cgroup_uncharge(struct page *page);
|
void __mem_cgroup_uncharge(struct folio *folio);
|
||||||
static inline void mem_cgroup_uncharge(struct page *page)
|
|
||||||
|
/**
|
||||||
|
* mem_cgroup_uncharge - Uncharge a folio.
|
||||||
|
* @folio: Folio to uncharge.
|
||||||
|
*
|
||||||
|
* Uncharge a folio previously charged with mem_cgroup_charge().
|
||||||
|
*/
|
||||||
|
static inline void mem_cgroup_uncharge(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (mem_cgroup_disabled())
|
if (mem_cgroup_disabled())
|
||||||
return;
|
return;
|
||||||
__mem_cgroup_uncharge(page);
|
__mem_cgroup_uncharge(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __mem_cgroup_uncharge_list(struct list_head *page_list);
|
void __mem_cgroup_uncharge_list(struct list_head *page_list);
|
||||||
@ -1229,7 +1236,7 @@ static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void mem_cgroup_uncharge(struct page *page)
|
static inline void mem_cgroup_uncharge(struct folio *folio)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -940,7 +940,7 @@ unlock:
|
|||||||
if (xas_error(&xas)) {
|
if (xas_error(&xas)) {
|
||||||
error = xas_error(&xas);
|
error = xas_error(&xas);
|
||||||
if (charged)
|
if (charged)
|
||||||
mem_cgroup_uncharge(page);
|
mem_cgroup_uncharge(page_folio(page));
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1211,7 +1211,7 @@ out_up_write:
|
|||||||
mmap_write_unlock(mm);
|
mmap_write_unlock(mm);
|
||||||
out_nolock:
|
out_nolock:
|
||||||
if (!IS_ERR_OR_NULL(*hpage))
|
if (!IS_ERR_OR_NULL(*hpage))
|
||||||
mem_cgroup_uncharge(*hpage);
|
mem_cgroup_uncharge(page_folio(*hpage));
|
||||||
trace_mm_collapse_huge_page(mm, isolated, result);
|
trace_mm_collapse_huge_page(mm, isolated, result);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1975,7 +1975,7 @@ xa_unlocked:
|
|||||||
out:
|
out:
|
||||||
VM_BUG_ON(!list_empty(&pagelist));
|
VM_BUG_ON(!list_empty(&pagelist));
|
||||||
if (!IS_ERR_OR_NULL(*hpage))
|
if (!IS_ERR_OR_NULL(*hpage))
|
||||||
mem_cgroup_uncharge(*hpage);
|
mem_cgroup_uncharge(page_folio(*hpage));
|
||||||
/* TODO: tracepoints */
|
/* TODO: tracepoints */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6858,22 +6858,16 @@ static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
|
|||||||
css_put(&memcg->css);
|
css_put(&memcg->css);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
void __mem_cgroup_uncharge(struct folio *folio)
|
||||||
* __mem_cgroup_uncharge - uncharge a page
|
|
||||||
* @page: page to uncharge
|
|
||||||
*
|
|
||||||
* Uncharge a page previously charged with __mem_cgroup_charge().
|
|
||||||
*/
|
|
||||||
void __mem_cgroup_uncharge(struct page *page)
|
|
||||||
{
|
{
|
||||||
struct uncharge_gather ug;
|
struct uncharge_gather ug;
|
||||||
|
|
||||||
/* Don't touch page->lru of any random page, pre-check: */
|
/* Don't touch folio->lru of any random page, pre-check: */
|
||||||
if (!page_memcg(page))
|
if (!folio_memcg(folio))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
uncharge_gather_clear(&ug);
|
uncharge_gather_clear(&ug);
|
||||||
uncharge_folio(page_folio(page), &ug);
|
uncharge_folio(folio, &ug);
|
||||||
uncharge_batch(&ug);
|
uncharge_batch(&ug);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -762,7 +762,7 @@ static int delete_from_lru_cache(struct page *p)
|
|||||||
* Poisoned page might never drop its ref count to 0 so we have
|
* Poisoned page might never drop its ref count to 0 so we have
|
||||||
* to uncharge it manually from its memcg.
|
* to uncharge it manually from its memcg.
|
||||||
*/
|
*/
|
||||||
mem_cgroup_uncharge(p);
|
mem_cgroup_uncharge(page_folio(p));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* drop the page count elevated by isolate_lru_page()
|
* drop the page count elevated by isolate_lru_page()
|
||||||
|
@ -505,7 +505,7 @@ void free_devmap_managed_page(struct page *page)
|
|||||||
|
|
||||||
__ClearPageWaiters(page);
|
__ClearPageWaiters(page);
|
||||||
|
|
||||||
mem_cgroup_uncharge(page);
|
mem_cgroup_uncharge(page_folio(page));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When a device_private page is freed, the page->mapping field
|
* When a device_private page is freed, the page->mapping field
|
||||||
|
@ -724,7 +724,7 @@ static inline void free_the_page(struct page *page, unsigned int order)
|
|||||||
|
|
||||||
void free_compound_page(struct page *page)
|
void free_compound_page(struct page *page)
|
||||||
{
|
{
|
||||||
mem_cgroup_uncharge(page);
|
mem_cgroup_uncharge(page_folio(page));
|
||||||
free_the_page(page, compound_order(page));
|
free_the_page(page, compound_order(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,7 +94,7 @@ static void __page_cache_release(struct page *page)
|
|||||||
static void __put_single_page(struct page *page)
|
static void __put_single_page(struct page *page)
|
||||||
{
|
{
|
||||||
__page_cache_release(page);
|
__page_cache_release(page);
|
||||||
mem_cgroup_uncharge(page);
|
mem_cgroup_uncharge(page_folio(page));
|
||||||
free_unref_page(page, 0);
|
free_unref_page(page, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user