Fixes for 5.16 folios:
- Fix compilation warnings on csky and sparc - Rename multipage folios to large folios - Rename AS_THP_SUPPORT and FS_THP_SUPPORT - Add functions to zero portions of a folio -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEejHryeLBw/spnjHrDpNsjXcpgj4FAmGem6wACgkQDpNsjXcp gj7uvwgAjNqDWOVgwYU98daN6nKQQf5Vv35f0bzeKcKcHIOEWZ2+MUeXkI55h8TD ss5L3O86sPtQmpKUQJJChZC4AhpIPRyjPA0JW6vYqXQd912M331WpGgFFyX5eI+3 OxfKLRULmopeWP1RjWmkWqlhYQHL5OLgAMC4VaBSfDHd1UMRf+F9JNm9qR7GCp9Q Vb0qcmBMaQYt/K5sWRQyPUACVTF+27RLKAs+Om37NGekv1UqgOPMzi9nAyi9RjCi rRY6oGupNgC+Y41jzlpaNoL71RPS92H769FBh/Fe4qu55VSPjfcN77qAnVhX5Ykn 4RhzZcEUoqlx9xG9xynk0mmbx2Bf4g== =kvqM -----END PGP SIGNATURE----- Merge tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache Pull folio fixes from Matthew Wilcox: "In the course of preparing the folio changes for iomap for next merge window, we discovered some problems that would be nice to address now: - Renaming multi-page folios to large folios. mapping_multi_page_folio_support() is just a little too long, so we settled on mapping_large_folio_support(). That meant renaming, eg folio_test_multi() to folio_test_large(). Rename AS_THP_SUPPORT to match - I hadn't included folio wrappers for zero_user_segments(), etc. Also, multi-page^W^W large folio support is now independent of CONFIG_TRANSPARENT_HUGEPAGE, so machines with HIGHMEM always need to fall back to the out-of-line zero_user_segments(). Remove FS_THP_SUPPORT to match - The build bots finally got round to telling me that I missed a couple of architectures when adding flush_dcache_folio(). Christoph suggested that we just add linux/cacheflush.h and not rely on asm-generic/cacheflush.h" * tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache: mm: Add functions to zero portions of a folio fs: Rename AS_THP_SUPPORT and mapping_thp_support fs: Remove FS_THP_SUPPORT mm: Remove folio_test_single mm: Rename folio_test_multi to folio_test_large Add linux/cacheflush.h
This commit is contained in:
commit
79941493ff
@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
|
@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||
*/
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
static inline void flush_kernel_vmap_range(void *addr, int size)
|
||||
|
@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
|
||||
|
@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
|
@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||
|
@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *);
|
||||
|
||||
void local_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
static inline void flush_dcache_folio(struct folio *folio) { }
|
||||
|
||||
#define flush_icache_range local_flush_icache_range
|
||||
#define flush_cache_page(vma, addr, pfn) do { } while (0)
|
||||
|
@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
||||
mapping->a_ops = &empty_aops;
|
||||
mapping->host = inode;
|
||||
mapping->flags = 0;
|
||||
if (sb->s_type->fs_flags & FS_THP_SUPPORT)
|
||||
__set_bit(AS_THP_SUPPORT, &mapping->flags);
|
||||
mapping->wb_err = 0;
|
||||
atomic_set(&mapping->i_mmap_writable, 0);
|
||||
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
|
||||
|
@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_dcache_folio(struct folio *folio) { }
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#endif
|
||||
|
||||
#ifndef flush_dcache_mmap_lock
|
||||
|
18
include/linux/cacheflush.h
Normal file
18
include/linux/cacheflush.h
Normal file
@ -0,0 +1,18 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINUX_CACHEFLUSH_H
|
||||
#define _LINUX_CACHEFLUSH_H
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
|
||||
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#endif
|
||||
#else
|
||||
static inline void flush_dcache_folio(struct folio *folio)
|
||||
{
|
||||
}
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
|
||||
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
|
||||
|
||||
#endif /* _LINUX_CACHEFLUSH_H */
|
@ -2518,7 +2518,6 @@ struct file_system_type {
|
||||
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
|
||||
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
|
||||
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
|
||||
#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
|
||||
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
|
||||
int (*init_fs_context)(struct fs_context *);
|
||||
const struct fs_parameter_spec *parameters;
|
||||
|
@ -5,12 +5,11 @@
|
||||
#include <linux/fs.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/cacheflush.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "highmem-internal.h"
|
||||
|
||||
/**
|
||||
@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page)
|
||||
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
|
||||
* If we pass in a head page, we can zero up to the size of the compound page.
|
||||
*/
|
||||
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
|
||||
unsigned start2, unsigned end2);
|
||||
#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
|
||||
#else
|
||||
static inline void zero_user_segments(struct page *page,
|
||||
unsigned start1, unsigned end1,
|
||||
unsigned start2, unsigned end2)
|
||||
@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page,
|
||||
for (i = 0; i < compound_nr(page); i++)
|
||||
flush_dcache_page(page + i);
|
||||
}
|
||||
#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
|
||||
#endif
|
||||
|
||||
static inline void zero_user_segment(struct page *page,
|
||||
unsigned start, unsigned end)
|
||||
@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
|
||||
kunmap_local(addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_zero_segments() - Zero two byte ranges in a folio.
|
||||
* @folio: The folio to write to.
|
||||
* @start1: The first byte to zero.
|
||||
* @xend1: One more than the last byte in the first range.
|
||||
* @start2: The first byte to zero in the second range.
|
||||
* @xend2: One more than the last byte in the second range.
|
||||
*/
|
||||
static inline void folio_zero_segments(struct folio *folio,
|
||||
size_t start1, size_t xend1, size_t start2, size_t xend2)
|
||||
{
|
||||
zero_user_segments(&folio->page, start1, xend1, start2, xend2);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_zero_segment() - Zero a byte range in a folio.
|
||||
* @folio: The folio to write to.
|
||||
* @start: The first byte to zero.
|
||||
* @xend: One more than the last byte to zero.
|
||||
*/
|
||||
static inline void folio_zero_segment(struct folio *folio,
|
||||
size_t start, size_t xend)
|
||||
{
|
||||
zero_user_segments(&folio->page, start, xend, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* folio_zero_range() - Zero a byte range in a folio.
|
||||
* @folio: The folio to write to.
|
||||
* @start: The first byte to zero.
|
||||
* @length: The number of bytes to zero.
|
||||
*/
|
||||
static inline void folio_zero_range(struct folio *folio,
|
||||
size_t start, size_t length)
|
||||
{
|
||||
zero_user_segments(&folio->page, start, start + length, 0, 0);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_HIGHMEM_H */
|
||||
|
@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page)
|
||||
|
||||
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
|
||||
|
||||
/* Whether there are one or multiple pages in a folio */
|
||||
static inline bool folio_test_single(struct folio *folio)
|
||||
{
|
||||
return !folio_test_head(folio);
|
||||
}
|
||||
|
||||
static inline bool folio_test_multi(struct folio *folio)
|
||||
/**
|
||||
* folio_test_large() - Does this folio contain more than one page?
|
||||
* @folio: The folio to test.
|
||||
*
|
||||
* Return: True if the folio is larger than one page.
|
||||
*/
|
||||
static inline bool folio_test_large(struct folio *folio)
|
||||
{
|
||||
return folio_test_head(folio);
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ enum mapping_flags {
|
||||
AS_EXITING = 4, /* final truncate in progress */
|
||||
/* writeback related tags are not used */
|
||||
AS_NO_WRITEBACK_TAGS = 5,
|
||||
AS_THP_SUPPORT = 6, /* THPs supported */
|
||||
AS_LARGE_FOLIO_SUPPORT = 6,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
|
||||
m->gfp_mask = mask;
|
||||
}
|
||||
|
||||
static inline bool mapping_thp_support(struct address_space *mapping)
|
||||
/**
|
||||
* mapping_set_large_folios() - Indicate the file supports large folios.
|
||||
* @mapping: The file.
|
||||
*
|
||||
* The filesystem should call this function in its inode constructor to
|
||||
* indicate that the VFS can use large folios to cache the contents of
|
||||
* the file.
|
||||
*
|
||||
* Context: This should not be called while the inode is active as it
|
||||
* is non-atomic.
|
||||
*/
|
||||
static inline void mapping_set_large_folios(struct address_space *mapping)
|
||||
{
|
||||
return test_bit(AS_THP_SUPPORT, &mapping->flags);
|
||||
__set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
|
||||
}
|
||||
|
||||
static inline bool mapping_large_folio_support(struct address_space *mapping)
|
||||
{
|
||||
return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
|
||||
}
|
||||
|
||||
static inline int filemap_nr_thps(struct address_space *mapping)
|
||||
@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping)
|
||||
static inline void filemap_nr_thps_inc(struct address_space *mapping)
|
||||
{
|
||||
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
|
||||
if (!mapping_thp_support(mapping))
|
||||
if (!mapping_large_folio_support(mapping))
|
||||
atomic_inc(&mapping->nr_thps);
|
||||
#else
|
||||
WARN_ON_ONCE(1);
|
||||
@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping)
|
||||
static inline void filemap_nr_thps_dec(struct address_space *mapping)
|
||||
{
|
||||
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
|
||||
if (!mapping_thp_support(mapping))
|
||||
if (!mapping_large_folio_support(mapping))
|
||||
atomic_dec(&mapping->nr_thps);
|
||||
#else
|
||||
WARN_ON_ONCE(1);
|
||||
|
@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(kunmap_high);
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
|
||||
unsigned start2, unsigned end2)
|
||||
{
|
||||
@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
|
||||
BUG_ON((start1 | start2 | end1 | end2) != 0);
|
||||
}
|
||||
EXPORT_SYMBOL(zero_user_segments);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
#ifdef CONFIG_KMAP_LOCAL
|
||||
|
@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,
|
||||
|
||||
VM_BUG_ON(from == to);
|
||||
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
|
||||
VM_BUG_ON(compound && !folio_test_multi(folio));
|
||||
VM_BUG_ON(compound && !folio_test_large(folio));
|
||||
|
||||
/*
|
||||
* Prevent mem_cgroup_migrate() from looking at
|
||||
|
@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
|
||||
INIT_LIST_HEAD(&info->swaplist);
|
||||
simple_xattrs_init(&info->xattrs);
|
||||
cache_no_acl(inode);
|
||||
mapping_set_large_folios(inode->i_mapping);
|
||||
|
||||
switch (mode & S_IFMT) {
|
||||
default:
|
||||
@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = {
|
||||
.parameters = shmem_fs_parameters,
|
||||
#endif
|
||||
.kill_sb = kill_litter_super,
|
||||
.fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
|
||||
.fs_flags = FS_USERNS_MOUNT,
|
||||
};
|
||||
|
||||
int __init shmem_init(void)
|
||||
|
Loading…
Reference in New Issue
Block a user