tmpfs: preliminary minor tidyups
Make a few cleanups in mm/shmem.c, before going on to complicate it. shmem_alloc_page() will become more complicated: we can't afford to to have that complication duplicated between a CONFIG_NUMA version and a !CONFIG_NUMA version, so rearrange the #ifdef'ery there to yield a single shmem_swapin() and a single shmem_alloc_page(). Yes, it's a shame to inflict the horrid pseudo-vma on non-NUMA configurations, but eliminating it is a larger cleanup: I have an alloc_pages_mpol() patchset not yet ready - mpol handling is subtle and bug-prone, and changed yet again since my last version. Move __SetPageLocked, __SetPageSwapBacked from shmem_getpage_gfp() to shmem_alloc_page(): that SwapBacked flag will be useful in future, to help to distinguish different cases appropriately. And the SGP_DIRTY variant of SGP_CACHE is hard to understand and of little use (IIRC it dates back to when shmem_getpage() returned the page unlocked): kill it and do the necessary in shmem_file_read_iter(). But an arm64 build then complained that info may be uninitialized (where shmem_getpage_gfp() deletes a freshly alloced page beyond eof), and advancing to an "sgp <= SGP_CACHE" test jogged it back to reality. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andres Lagar-Cavilla <andreslc@google.com> Cc: Yang Shi <yang.shi@linaro.org> Cc: Ning Qu <quning@gmail.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fa9949da59
commit
75edd345e8
@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct mempolicy *
|
||||||
|
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#define vma_policy(vma) NULL
|
#define vma_policy(vma) NULL
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
69
mm/shmem.c
69
mm/shmem.c
@ -101,7 +101,6 @@ struct shmem_falloc {
|
|||||||
enum sgp_type {
|
enum sgp_type {
|
||||||
SGP_READ, /* don't exceed i_size, don't allocate page */
|
SGP_READ, /* don't exceed i_size, don't allocate page */
|
||||||
SGP_CACHE, /* don't exceed i_size, may allocate page */
|
SGP_CACHE, /* don't exceed i_size, may allocate page */
|
||||||
SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
|
|
||||||
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
|
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
|
||||||
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
|
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
|
||||||
};
|
};
|
||||||
@ -169,7 +168,7 @@ static inline int shmem_reacct_size(unsigned long flags,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* ... whereas tmpfs objects are accounted incrementally as
|
* ... whereas tmpfs objects are accounted incrementally as
|
||||||
* pages are allocated, in order to allow huge sparse files.
|
* pages are allocated, in order to allow large sparse files.
|
||||||
* shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
|
* shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
|
||||||
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
|
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
|
||||||
*/
|
*/
|
||||||
@ -947,8 +946,7 @@ redirty:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA
|
#if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
|
||||||
#ifdef CONFIG_TMPFS
|
|
||||||
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
||||||
{
|
{
|
||||||
char buffer[64];
|
char buffer[64];
|
||||||
@ -972,7 +970,18 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
|||||||
}
|
}
|
||||||
return mpol;
|
return mpol;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_TMPFS */
|
#else /* !CONFIG_NUMA || !CONFIG_TMPFS */
|
||||||
|
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_NUMA && CONFIG_TMPFS */
|
||||||
|
#ifndef CONFIG_NUMA
|
||||||
|
#define vm_policy vm_private_data
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
||||||
struct shmem_inode_info *info, pgoff_t index)
|
struct shmem_inode_info *info, pgoff_t index)
|
||||||
@ -1008,39 +1017,17 @@ static struct page *shmem_alloc_page(gfp_t gfp,
|
|||||||
pvma.vm_ops = NULL;
|
pvma.vm_ops = NULL;
|
||||||
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
|
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
|
||||||
|
|
||||||
page = alloc_page_vma(gfp, &pvma, 0);
|
page = alloc_pages_vma(gfp, 0, &pvma, 0, numa_node_id(), false);
|
||||||
|
if (page) {
|
||||||
|
__SetPageLocked(page);
|
||||||
|
__SetPageSwapBacked(page);
|
||||||
|
}
|
||||||
|
|
||||||
/* Drop reference taken by mpol_shared_policy_lookup() */
|
/* Drop reference taken by mpol_shared_policy_lookup() */
|
||||||
mpol_cond_put(pvma.vm_policy);
|
mpol_cond_put(pvma.vm_policy);
|
||||||
|
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_NUMA */
|
|
||||||
#ifdef CONFIG_TMPFS
|
|
||||||
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_TMPFS */
|
|
||||||
|
|
||||||
static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
|
|
||||||
struct shmem_inode_info *info, pgoff_t index)
|
|
||||||
{
|
|
||||||
return swapin_readahead(swap, gfp, NULL, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline struct page *shmem_alloc_page(gfp_t gfp,
|
|
||||||
struct shmem_inode_info *info, pgoff_t index)
|
|
||||||
{
|
|
||||||
return alloc_page(gfp);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_NUMA */
|
|
||||||
|
|
||||||
#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
|
|
||||||
static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When a page is moved from swapcache to shmem filecache (either by the
|
* When a page is moved from swapcache to shmem filecache (either by the
|
||||||
@ -1084,8 +1071,6 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
|
|||||||
copy_highpage(newpage, oldpage);
|
copy_highpage(newpage, oldpage);
|
||||||
flush_dcache_page(newpage);
|
flush_dcache_page(newpage);
|
||||||
|
|
||||||
__SetPageLocked(newpage);
|
|
||||||
__SetPageSwapBacked(newpage);
|
|
||||||
SetPageUptodate(newpage);
|
SetPageUptodate(newpage);
|
||||||
set_page_private(newpage, swap_index);
|
set_page_private(newpage, swap_index);
|
||||||
SetPageSwapCache(newpage);
|
SetPageSwapCache(newpage);
|
||||||
@ -1155,7 +1140,7 @@ repeat:
|
|||||||
page = NULL;
|
page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
|
if (sgp <= SGP_CACHE &&
|
||||||
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
||||||
error = -EINVAL;
|
error = -EINVAL;
|
||||||
goto unlock;
|
goto unlock;
|
||||||
@ -1275,9 +1260,6 @@ repeat:
|
|||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
goto decused;
|
goto decused;
|
||||||
}
|
}
|
||||||
|
|
||||||
__SetPageLocked(page);
|
|
||||||
__SetPageSwapBacked(page);
|
|
||||||
if (sgp == SGP_WRITE)
|
if (sgp == SGP_WRITE)
|
||||||
__SetPageReferenced(page);
|
__SetPageReferenced(page);
|
||||||
|
|
||||||
@ -1321,12 +1303,10 @@ clear:
|
|||||||
flush_dcache_page(page);
|
flush_dcache_page(page);
|
||||||
SetPageUptodate(page);
|
SetPageUptodate(page);
|
||||||
}
|
}
|
||||||
if (sgp == SGP_DIRTY)
|
|
||||||
set_page_dirty(page);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Perhaps the file has been truncated since we checked */
|
/* Perhaps the file has been truncated since we checked */
|
||||||
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
|
if (sgp <= SGP_CACHE &&
|
||||||
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
|
||||||
if (alloced) {
|
if (alloced) {
|
||||||
ClearPageDirty(page);
|
ClearPageDirty(page);
|
||||||
@ -1633,7 +1613,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||||||
* and even mark them dirty, so it cannot exceed the max_blocks limit.
|
* and even mark them dirty, so it cannot exceed the max_blocks limit.
|
||||||
*/
|
*/
|
||||||
if (!iter_is_iovec(to))
|
if (!iter_is_iovec(to))
|
||||||
sgp = SGP_DIRTY;
|
sgp = SGP_CACHE;
|
||||||
|
|
||||||
index = *ppos >> PAGE_SHIFT;
|
index = *ppos >> PAGE_SHIFT;
|
||||||
offset = *ppos & ~PAGE_MASK;
|
offset = *ppos & ~PAGE_MASK;
|
||||||
@ -1659,8 +1639,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||||||
error = 0;
|
error = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (page)
|
if (page) {
|
||||||
|
if (sgp == SGP_CACHE)
|
||||||
|
set_page_dirty(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We must evaluate after, since reads (unlike writes)
|
* We must evaluate after, since reads (unlike writes)
|
||||||
|
Loading…
Reference in New Issue
Block a user