Four folio-related fixes for 5.19:
- Don't release a folio while it's still locked - Fix a use-after-free after dropping the mmap_lock - Fix a memory leak when splitting a page - Fix a kernel-doc warning for struct folio -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmKjmV8ACgkQDpNsjXcp gj7f0Af+OeYLW8nMkqSe92OETzOVPlYCFBPlgE98kmwaD9nFOZlG65w7KggYyUbu hCU5xgfyxo2rQWalO8CLf/a9w+v02UO9IbjtV3kdePpVxRPx+euYsScyoVOn9O6p FI6BwEOONUc45rcOGMqDG8BCh75vdeeemu1Z8AYEGs1sIyl2AQYQvpZyZRu2JnBy AFkjKpwNLHjrC3T1AjOHaJ6CmA2eDJX9z6yuk5yKwMVr7Mkq93PUwyJQb44CK3iD jGgOrPEL1+JUUFMtSfE0Wzy8wUvMyFq7RDZ39zVooQSz2AcyvcQTbO076dPEKKSZ tXwvO8J6TDv17s4/ekaoA/+ernwoyQ== =yYpU -----END PGP SIGNATURE----- Merge tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache Pull folio fixes from Matthew Wilcox: "Four folio-related fixes: - Don't release a folio while it's still locked - Fix a use-after-free after dropping the mmap_lock - Fix a memory leak when splitting a page - Fix a kernel-doc warning for struct folio" * tag 'folio-5.19a' of git://git.infradead.org/users/willy/pagecache: mm: Add kernel-doc for folio->mlock_count mm/huge_memory: Fix xarray node memory leak filemap: Cache the value of vm_flags filemap: Don't release a locked folio
This commit is contained in:
commit
a32e7ea362
@ -227,6 +227,7 @@ struct page {
|
||||
* struct folio - Represents a contiguous set of bytes.
|
||||
* @flags: Identical to the page flags.
|
||||
* @lru: Least Recently Used list; tracks how recently this folio was used.
|
||||
* @mlock_count: Number of times this folio has been pinned by mlock().
|
||||
* @mapping: The file this page belongs to, or refers to the anon_vma for
|
||||
* anonymous memory.
|
||||
* @index: Offset within the file, in units of pages. For anonymous memory,
|
||||
@ -255,10 +256,14 @@ struct folio {
|
||||
unsigned long flags;
|
||||
union {
|
||||
struct list_head lru;
|
||||
/* private: avoid cluttering the output */
|
||||
struct {
|
||||
void *__filler;
|
||||
/* public: */
|
||||
unsigned int mlock_count;
|
||||
/* private: */
|
||||
};
|
||||
/* public: */
|
||||
};
|
||||
struct address_space *mapping;
|
||||
pgoff_t index;
|
||||
|
@ -1508,6 +1508,7 @@ void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
|
||||
void xas_init_marks(const struct xa_state *);
|
||||
|
||||
bool xas_nomem(struct xa_state *, gfp_t);
|
||||
void xas_destroy(struct xa_state *);
|
||||
void xas_pause(struct xa_state *);
|
||||
|
||||
void xas_create_range(struct xa_state *);
|
||||
|
@ -264,9 +264,10 @@ static void xa_node_free(struct xa_node *node)
|
||||
* xas_destroy() - Free any resources allocated during the XArray operation.
|
||||
* @xas: XArray operation state.
|
||||
*
|
||||
* This function is now internal-only.
|
||||
* Most users will not need to call this function; it is called for you
|
||||
* by xas_nomem().
|
||||
*/
|
||||
static void xas_destroy(struct xa_state *xas)
|
||||
void xas_destroy(struct xa_state *xas)
|
||||
{
|
||||
struct xa_node *next, *node = xas->xa_alloc;
|
||||
|
||||
|
@ -2991,11 +2991,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
|
||||
struct file *fpin = NULL;
|
||||
unsigned long vm_flags = vmf->vma->vm_flags;
|
||||
unsigned int mmap_miss;
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
/* Use the readahead code, even if readahead is disabled */
|
||||
if (vmf->vma->vm_flags & VM_HUGEPAGE) {
|
||||
if (vm_flags & VM_HUGEPAGE) {
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
|
||||
ra->size = HPAGE_PMD_NR;
|
||||
@ -3003,7 +3004,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
* Fetch two PMD folios, so we get the chance to actually
|
||||
* readahead, unless we've been told not to.
|
||||
*/
|
||||
if (!(vmf->vma->vm_flags & VM_RAND_READ))
|
||||
if (!(vm_flags & VM_RAND_READ))
|
||||
ra->size *= 2;
|
||||
ra->async_size = HPAGE_PMD_NR;
|
||||
page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
|
||||
@ -3012,12 +3013,12 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
|
||||
#endif
|
||||
|
||||
/* If we don't want any read-ahead, don't bother */
|
||||
if (vmf->vma->vm_flags & VM_RAND_READ)
|
||||
if (vm_flags & VM_RAND_READ)
|
||||
return fpin;
|
||||
if (!ra->ra_pages)
|
||||
return fpin;
|
||||
|
||||
if (vmf->vma->vm_flags & VM_SEQ_READ) {
|
||||
if (vm_flags & VM_SEQ_READ) {
|
||||
fpin = maybe_unlock_mmap_for_io(vmf, fpin);
|
||||
page_cache_sync_ra(&ractl, ra->ra_pages);
|
||||
return fpin;
|
||||
|
@ -2672,8 +2672,7 @@ out_unlock:
|
||||
if (mapping)
|
||||
i_mmap_unlock_read(mapping);
|
||||
out:
|
||||
/* Free any memory we didn't use */
|
||||
xas_nomem(&xas, 0);
|
||||
xas_destroy(&xas);
|
||||
count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED);
|
||||
return ret;
|
||||
}
|
||||
|
@ -164,12 +164,14 @@ static void read_pages(struct readahead_control *rac)
|
||||
while ((folio = readahead_folio(rac)) != NULL) {
|
||||
unsigned long nr = folio_nr_pages(folio);
|
||||
|
||||
folio_get(folio);
|
||||
rac->ra->size -= nr;
|
||||
if (rac->ra->async_size >= nr) {
|
||||
rac->ra->async_size -= nr;
|
||||
filemap_remove_folio(folio);
|
||||
}
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
}
|
||||
} else {
|
||||
while ((folio = readahead_folio(rac)) != NULL)
|
||||
|
Loading…
x
Reference in New Issue
Block a user