Three small folio patches.

One bug fix, one patch pulled forward from the patches destined for 5.18
 and then a patch to make use of that functionality.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmHrDXUACgkQDpNsjXcp
 gj6+xwf+LHi3G1hYrG+lhLIcH6EtNlmMhfqnPCaHnON8DqVMvcattg1JhGwUexyi
 nFHLS1OsxgETTnxvuR/nkuHDHA9qrTxQ/zerLydTawT0eaeP38spBWsg3Ovz8Vh3
 Tq0DfCm8xQmFZIDD9Cxm3gbApoOtyrauO/cvaTMANM5SzvaSjzdV3V1dNuagkgQj
 4wzHRfqJZX7cM0I3a4OCklP5pz1ze0Ju41N1E26RYqRWX2MhbnpR4vuOKee2NqPk
 q7ZIHsrnd7cL2S1v35Ur59h3VSqdOAwYLWQkvCx8lx2qbms1tU7/LPXLPyRL1Bye
 tUThijS5a9RDBTHFoMkZD098HTSwMA==
 =19pm
 -----END PGP SIGNATURE-----

Merge tag 'folio-5.17a' of git://git.infradead.org/users/willy/pagecache

Pull more folio updates from Matthew Wilcox:
 "Three small folio patches.

  One bug fix, one patch pulled forward from the patches destined for
  5.18 and then a patch to make use of that functionality"

* tag 'folio-5.17a' of git://git.infradead.org/users/willy/pagecache:
  filemap: Use folio_put_refs() in filemap_free_folio()
  mm: Add folio_put_refs()
  pagevec: Initialise folio_batch->percpu_pvec_drained
This commit is contained in:
Linus Torvalds 2022-01-22 10:43:07 +02:00
commit b68b10b626
3 changed files with 25 additions and 6 deletions

View File

@ -1199,6 +1199,26 @@ static inline void folio_put(struct folio *folio)
__put_page(&folio->page); __put_page(&folio->page);
} }
/**
* folio_put_refs - Reduce the reference count on a folio.
* @folio: The folio.
* @refs: The amount to subtract from the folio's reference count.
*
* If the folio's reference count reaches zero, the memory will be
* released back to the page allocator and may be used by another
* allocation immediately. Do not access the memory or the struct folio
* after calling folio_put_refs() unless you can be sure that these weren't
* the last references.
*
* Context: May be called in process or interrupt context, but not in NMI
* context. May be called while holding a spinlock.
*/
static inline void folio_put_refs(struct folio *folio, int refs)
{
if (folio_ref_sub_and_test(folio, refs))
__put_page(&folio->page);
}
static inline void put_page(struct page *page) static inline void put_page(struct page *page)
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);

View File

@ -111,6 +111,7 @@ static_assert(offsetof(struct pagevec, pages) ==
static inline void folio_batch_init(struct folio_batch *fbatch) static inline void folio_batch_init(struct folio_batch *fbatch)
{ {
fbatch->nr = 0; fbatch->nr = 0;
fbatch->percpu_pvec_drained = false;
} }
static inline unsigned int folio_batch_count(struct folio_batch *fbatch) static inline unsigned int folio_batch_count(struct folio_batch *fbatch)

View File

@ -231,17 +231,15 @@ void __filemap_remove_folio(struct folio *folio, void *shadow)
void filemap_free_folio(struct address_space *mapping, struct folio *folio) void filemap_free_folio(struct address_space *mapping, struct folio *folio)
{ {
void (*freepage)(struct page *); void (*freepage)(struct page *);
int refs = 1;
freepage = mapping->a_ops->freepage; freepage = mapping->a_ops->freepage;
if (freepage) if (freepage)
freepage(&folio->page); freepage(&folio->page);
if (folio_test_large(folio) && !folio_test_hugetlb(folio)) { if (folio_test_large(folio) && !folio_test_hugetlb(folio))
folio_ref_sub(folio, folio_nr_pages(folio)); refs = folio_nr_pages(folio);
VM_BUG_ON_FOLIO(folio_ref_count(folio) <= 0, folio); folio_put_refs(folio, refs);
} else {
folio_put(folio);
}
} }
/** /**