filemap: Add filemap_remove_folio and __filemap_remove_folio
Reimplement __delete_from_page_cache() as a wrapper around __filemap_remove_folio() and delete_from_page_cache() as a wrapper around filemap_remove_folio(). Remove the EXPORT_SYMBOL as delete_from_page_cache() was not used by any in-tree modules. Convert page_cache_free_page() into filemap_free_folio(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
parent
a0580c6f9b
commit
452e9e6992
@ -930,8 +930,13 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
|||||||
pgoff_t index, gfp_t gfp);
|
pgoff_t index, gfp_t gfp);
|
||||||
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
pgoff_t index, gfp_t gfp);
|
pgoff_t index, gfp_t gfp);
|
||||||
extern void delete_from_page_cache(struct page *page);
|
void filemap_remove_folio(struct folio *folio);
|
||||||
extern void __delete_from_page_cache(struct page *page, void *shadow);
|
void delete_from_page_cache(struct page *page);
|
||||||
|
void __filemap_remove_folio(struct folio *folio, void *shadow);
|
||||||
|
static inline void __delete_from_page_cache(struct page *page, void *shadow)
|
||||||
|
{
|
||||||
|
__filemap_remove_folio(page_folio(page), shadow);
|
||||||
|
}
|
||||||
void replace_page_cache_page(struct page *old, struct page *new);
|
void replace_page_cache_page(struct page *old, struct page *new);
|
||||||
void delete_from_page_cache_batch(struct address_space *mapping,
|
void delete_from_page_cache_batch(struct address_space *mapping,
|
||||||
struct pagevec *pvec);
|
struct pagevec *pvec);
|
||||||
|
43
mm/filemap.c
43
mm/filemap.c
@ -220,58 +220,55 @@ static void filemap_unaccount_folio(struct address_space *mapping,
|
|||||||
* sure the page is locked and that nobody else uses it - or that usage
|
* sure the page is locked and that nobody else uses it - or that usage
|
||||||
* is safe. The caller must hold the i_pages lock.
|
* is safe. The caller must hold the i_pages lock.
|
||||||
*/
|
*/
|
||||||
void __delete_from_page_cache(struct page *page, void *shadow)
|
void __filemap_remove_folio(struct folio *folio, void *shadow)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
struct address_space *mapping = folio->mapping;
|
||||||
struct address_space *mapping = page->mapping;
|
|
||||||
|
|
||||||
trace_mm_filemap_delete_from_page_cache(folio);
|
trace_mm_filemap_delete_from_page_cache(folio);
|
||||||
|
|
||||||
filemap_unaccount_folio(mapping, folio);
|
filemap_unaccount_folio(mapping, folio);
|
||||||
page_cache_delete(mapping, folio, shadow);
|
page_cache_delete(mapping, folio, shadow);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void page_cache_free_page(struct address_space *mapping,
|
static void filemap_free_folio(struct address_space *mapping,
|
||||||
struct page *page)
|
struct folio *folio)
|
||||||
{
|
{
|
||||||
void (*freepage)(struct page *);
|
void (*freepage)(struct page *);
|
||||||
|
|
||||||
freepage = mapping->a_ops->freepage;
|
freepage = mapping->a_ops->freepage;
|
||||||
if (freepage)
|
if (freepage)
|
||||||
freepage(page);
|
freepage(&folio->page);
|
||||||
|
|
||||||
if (PageTransHuge(page) && !PageHuge(page)) {
|
if (folio_test_large(folio) && !folio_test_hugetlb(folio)) {
|
||||||
page_ref_sub(page, thp_nr_pages(page));
|
folio_ref_sub(folio, folio_nr_pages(folio));
|
||||||
VM_BUG_ON_PAGE(page_count(page) <= 0, page);
|
VM_BUG_ON_FOLIO(folio_ref_count(folio) <= 0, folio);
|
||||||
} else {
|
} else {
|
||||||
put_page(page);
|
folio_put(folio);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* delete_from_page_cache - delete page from page cache
|
* filemap_remove_folio - Remove folio from page cache.
|
||||||
* @page: the page which the kernel is trying to remove from page cache
|
* @folio: The folio.
|
||||||
*
|
*
|
||||||
* This must be called only on pages that have been verified to be in the page
|
* This must be called only on folios that are locked and have been
|
||||||
* cache and locked. It will never put the page into the free list, the caller
|
* verified to be in the page cache. It will never put the folio into
|
||||||
* has a reference on the page.
|
* the free list because the caller has a reference on the page.
|
||||||
*/
|
*/
|
||||||
void delete_from_page_cache(struct page *page)
|
void filemap_remove_folio(struct folio *folio)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page_mapping(page);
|
struct address_space *mapping = folio->mapping;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
spin_lock(&mapping->host->i_lock);
|
spin_lock(&mapping->host->i_lock);
|
||||||
xa_lock_irq(&mapping->i_pages);
|
xa_lock_irq(&mapping->i_pages);
|
||||||
__delete_from_page_cache(page, NULL);
|
__filemap_remove_folio(folio, NULL);
|
||||||
xa_unlock_irq(&mapping->i_pages);
|
xa_unlock_irq(&mapping->i_pages);
|
||||||
if (mapping_shrinkable(mapping))
|
if (mapping_shrinkable(mapping))
|
||||||
inode_add_lru(mapping->host);
|
inode_add_lru(mapping->host);
|
||||||
spin_unlock(&mapping->host->i_lock);
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
page_cache_free_page(mapping, page);
|
filemap_free_folio(mapping, folio);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(delete_from_page_cache);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* page_cache_delete_batch - delete several pages from page cache
|
* page_cache_delete_batch - delete several pages from page cache
|
||||||
@ -358,7 +355,7 @@ void delete_from_page_cache_batch(struct address_space *mapping,
|
|||||||
spin_unlock(&mapping->host->i_lock);
|
spin_unlock(&mapping->host->i_lock);
|
||||||
|
|
||||||
for (i = 0; i < pagevec_count(pvec); i++)
|
for (i = 0; i < pagevec_count(pvec); i++)
|
||||||
page_cache_free_page(mapping, pvec->pages[i]);
|
filemap_free_folio(mapping, page_folio(pvec->pages[i]));
|
||||||
}
|
}
|
||||||
|
|
||||||
int filemap_check_errors(struct address_space *mapping)
|
int filemap_check_errors(struct address_space *mapping)
|
||||||
|
@ -140,3 +140,8 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
|
|||||||
mapping_gfp_mask(mapping));
|
mapping_gfp_mask(mapping));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
EXPORT_SYMBOL(grab_cache_page_write_begin);
|
||||||
|
|
||||||
|
void delete_from_page_cache(struct page *page)
|
||||||
|
{
|
||||||
|
return filemap_remove_folio(page_folio(page));
|
||||||
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user