2022-05-10 04:20:47 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _MM_SWAP_H
# define _MM_SWAP_H
mempolicy: alloc_pages_mpol() for NUMA policy without vma
Shrink shmem's stack usage by eliminating the pseudo-vma from its folio
allocation. alloc_pages_mpol(gfp, order, pol, ilx, nid) becomes the
principal actor for passing mempolicy choice down to __alloc_pages(),
rather than vma_alloc_folio(gfp, order, vma, addr, hugepage).
vma_alloc_folio() and alloc_pages() remain, but as wrappers around
alloc_pages_mpol(). alloc_pages_bulk_*() untouched, except to provide the
additional args to policy_nodemask(), which subsumes policy_node().
Cleanup throughout, cutting out some unhelpful "helpers".
It would all be much simpler without MPOL_INTERLEAVE, but that adds a
dynamic to the constant mpol: complicated by v3.6 commit 09c231cb8bfd
("tmpfs: distribute interleave better across nodes"), which added ino bias
to the interleave, hidden from mm/mempolicy.c until this commit.
Hence "ilx" throughout, the "interleave index". Originally I thought it
could be done just with nid, but that's wrong: the nodemask may come from
the shared policy layer below a shmem vma, or it may come from the task
layer above a shmem vma; and without the final nodemask then nodeid cannot
be decided. And how ilx is applied depends also on page order.
The interleave index is almost always irrelevant unless MPOL_INTERLEAVE:
with one exception in alloc_pages_mpol(), where the NO_INTERLEAVE_INDEX
passed down from vma-less alloc_pages() is also used as hint not to use
THP-style hugepage allocation - to avoid the overhead of a hugepage arg
(though I don't understand why we never just added a GFP bit for THP - if
it actually needs a different allocation strategy from other pages of the
same order). vma_alloc_folio() still carries its hugepage arg here, but
it is not used, and should be removed when agreed.
get_vma_policy() no longer allows a NULL vma: over time I believe we've
eradicated all the places which used to need it e.g. swapoff and madvise
used to pass NULL vma to read_swap_cache_async(), but now know the vma.
[hughd@google.com: handle NULL mpol being passed to __read_swap_cache_async()]
Link: https://lkml.kernel.org/r/ea419956-4751-0102-21f7-9c93cb957892@google.com
Link: https://lkml.kernel.org/r/74e34633-6060-f5e3-aee-7040d43f2e93@google.com
Link: https://lkml.kernel.org/r/1738368e-bac0-fd11-ed7f-b87142a939fe@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun heo <tj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Domenico Cerasuolo <mimmocerasuolo@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-10-19 23:39:08 +03:00
struct mempolicy ;
2022-05-10 04:20:47 +03:00
# ifdef CONFIG_SWAP
# include <linux/blk_types.h> /* for bio_end_io_t */
/* linux/mm/page_io.c */
2022-05-10 04:20:48 +03:00
int sio_pool_init ( void ) ;
2022-05-10 04:20:49 +03:00
struct swap_iocb ;
2023-12-14 00:58:39 +03:00
void swap_read_folio ( struct folio * folio , bool do_poll ,
struct swap_iocb * * plug ) ;
2022-05-10 04:20:49 +03:00
void __swap_read_unplug ( struct swap_iocb * plug ) ;
static inline void swap_read_unplug ( struct swap_iocb * plug )
{
if ( unlikely ( plug ) )
__swap_read_unplug ( plug ) ;
}
2022-05-10 04:20:49 +03:00
void swap_write_unplug ( struct swap_iocb * sio ) ;
2022-05-10 04:20:47 +03:00
int swap_writepage ( struct page * page , struct writeback_control * wbc ) ;
2023-12-14 00:58:31 +03:00
void __swap_writepage ( struct folio * folio , struct writeback_control * wbc ) ;
2022-05-10 04:20:47 +03:00
/* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */
# define SWAP_ADDRESS_SPACE_SHIFT 14
# define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space * swapper_spaces [ ] ;
# define swap_address_space(entry) \
( & swapper_spaces [ swp_type ( entry ) ] [ swp_offset ( entry ) \
> > SWAP_ADDRESS_SPACE_SHIFT ] )
void show_swap_cache_info ( void ) ;
2022-05-13 06:23:02 +03:00
bool add_to_swap ( struct folio * folio ) ;
2022-05-10 04:20:47 +03:00
void * get_shadow_from_swap_cache ( swp_entry_t entry ) ;
2022-09-02 22:46:08 +03:00
int add_to_swap_cache ( struct folio * folio , swp_entry_t entry ,
2022-05-10 04:20:47 +03:00
gfp_t gfp , void * * shadowp ) ;
2022-06-17 20:50:20 +03:00
void __delete_from_swap_cache ( struct folio * folio ,
2022-05-10 04:20:47 +03:00
swp_entry_t entry , void * shadow ) ;
2022-06-17 20:50:19 +03:00
void delete_from_swap_cache ( struct folio * folio ) ;
2022-05-10 04:20:47 +03:00
void clear_shadow_from_swap_cache ( int type , unsigned long begin ,
unsigned long end ) ;
2022-09-02 22:46:15 +03:00
struct folio * swap_cache_get_folio ( swp_entry_t entry ,
struct vm_area_struct * vma , unsigned long addr ) ;
2022-10-19 21:33:31 +03:00
struct folio * filemap_get_incore_folio ( struct address_space * mapping ,
pgoff_t index ) ;
2022-05-10 04:20:47 +03:00
2023-12-14 00:58:41 +03:00
struct folio * read_swap_cache_async ( swp_entry_t entry , gfp_t gfp_mask ,
struct vm_area_struct * vma , unsigned long addr ,
struct swap_iocb * * plug ) ;
2023-12-14 00:58:30 +03:00
struct folio * __read_swap_cache_async ( swp_entry_t entry , gfp_t gfp_flags ,
struct mempolicy * mpol , pgoff_t ilx , bool * new_page_allocated ,
bool skip_if_exists ) ;
2023-12-14 00:58:42 +03:00
struct folio * swap_cluster_readahead ( swp_entry_t entry , gfp_t flag ,
struct mempolicy * mpol , pgoff_t ilx ) ;
2022-05-10 04:20:47 +03:00
struct page * swapin_readahead ( swp_entry_t entry , gfp_t flag ,
struct vm_fault * vmf ) ;
2022-06-17 20:50:18 +03:00
static inline unsigned int folio_swap_flags ( struct folio * folio )
2022-05-10 04:20:48 +03:00
{
2023-12-14 00:58:40 +03:00
return swp_swap_info ( folio - > swap ) - > flags ;
2022-05-10 04:20:48 +03:00
}
2022-05-10 04:20:47 +03:00
# else /* CONFIG_SWAP */
2022-05-10 04:20:49 +03:00
struct swap_iocb ;
2023-12-14 00:58:39 +03:00
static inline void swap_read_folio ( struct folio * folio , bool do_poll ,
2023-01-25 16:34:31 +03:00
struct swap_iocb * * plug )
2022-05-10 04:20:47 +03:00
{
}
2022-05-10 04:20:49 +03:00
static inline void swap_write_unplug ( struct swap_iocb * sio )
{
}
2022-05-10 04:20:47 +03:00
static inline struct address_space * swap_address_space ( swp_entry_t entry )
{
return NULL ;
}
static inline void show_swap_cache_info ( void )
{
}
2023-12-14 00:58:42 +03:00
static inline struct folio * swap_cluster_readahead ( swp_entry_t entry ,
mempolicy: alloc_pages_mpol() for NUMA policy without vma
Shrink shmem's stack usage by eliminating the pseudo-vma from its folio
allocation. alloc_pages_mpol(gfp, order, pol, ilx, nid) becomes the
principal actor for passing mempolicy choice down to __alloc_pages(),
rather than vma_alloc_folio(gfp, order, vma, addr, hugepage).
vma_alloc_folio() and alloc_pages() remain, but as wrappers around
alloc_pages_mpol(). alloc_pages_bulk_*() untouched, except to provide the
additional args to policy_nodemask(), which subsumes policy_node().
Cleanup throughout, cutting out some unhelpful "helpers".
It would all be much simpler without MPOL_INTERLEAVE, but that adds a
dynamic to the constant mpol: complicated by v3.6 commit 09c231cb8bfd
("tmpfs: distribute interleave better across nodes"), which added ino bias
to the interleave, hidden from mm/mempolicy.c until this commit.
Hence "ilx" throughout, the "interleave index". Originally I thought it
could be done just with nid, but that's wrong: the nodemask may come from
the shared policy layer below a shmem vma, or it may come from the task
layer above a shmem vma; and without the final nodemask then nodeid cannot
be decided. And how ilx is applied depends also on page order.
The interleave index is almost always irrelevant unless MPOL_INTERLEAVE:
with one exception in alloc_pages_mpol(), where the NO_INTERLEAVE_INDEX
passed down from vma-less alloc_pages() is also used as hint not to use
THP-style hugepage allocation - to avoid the overhead of a hugepage arg
(though I don't understand why we never just added a GFP bit for THP - if
it actually needs a different allocation strategy from other pages of the
same order). vma_alloc_folio() still carries its hugepage arg here, but
it is not used, and should be removed when agreed.
get_vma_policy() no longer allows a NULL vma: over time I believe we've
eradicated all the places which used to need it e.g. swapoff and madvise
used to pass NULL vma to read_swap_cache_async(), but now know the vma.
[hughd@google.com: handle NULL mpol being passed to __read_swap_cache_async()]
Link: https://lkml.kernel.org/r/ea419956-4751-0102-21f7-9c93cb957892@google.com
Link: https://lkml.kernel.org/r/74e34633-6060-f5e3-aee-7040d43f2e93@google.com
Link: https://lkml.kernel.org/r/1738368e-bac0-fd11-ed7f-b87142a939fe@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Huang Ying <ying.huang@intel.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Tejun heo <tj@kernel.org>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Cc: Domenico Cerasuolo <mimmocerasuolo@gmail.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-10-19 23:39:08 +03:00
gfp_t gfp_mask , struct mempolicy * mpol , pgoff_t ilx )
2022-05-10 04:20:47 +03:00
{
return NULL ;
}
static inline struct page * swapin_readahead ( swp_entry_t swp , gfp_t gfp_mask ,
struct vm_fault * vmf )
{
return NULL ;
}
static inline int swap_writepage ( struct page * p , struct writeback_control * wbc )
{
return 0 ;
}
2022-09-02 22:46:15 +03:00
static inline struct folio * swap_cache_get_folio ( swp_entry_t entry ,
struct vm_area_struct * vma , unsigned long addr )
{
return NULL ;
}
2022-05-10 04:20:47 +03:00
static inline
2022-10-19 21:33:31 +03:00
struct folio * filemap_get_incore_folio ( struct address_space * mapping ,
pgoff_t index )
2022-05-10 04:20:47 +03:00
{
2022-10-19 21:33:31 +03:00
return filemap_get_folio ( mapping , index ) ;
2022-05-10 04:20:47 +03:00
}
2022-05-13 06:23:02 +03:00
static inline bool add_to_swap ( struct folio * folio )
2022-05-10 04:20:47 +03:00
{
2022-05-13 06:23:02 +03:00
return false ;
2022-05-10 04:20:47 +03:00
}
static inline void * get_shadow_from_swap_cache ( swp_entry_t entry )
{
return NULL ;
}
2022-09-02 22:46:08 +03:00
static inline int add_to_swap_cache ( struct folio * folio , swp_entry_t entry ,
2022-05-10 04:20:47 +03:00
gfp_t gfp_mask , void * * shadowp )
{
return - 1 ;
}
2022-06-17 20:50:20 +03:00
static inline void __delete_from_swap_cache ( struct folio * folio ,
2022-05-10 04:20:47 +03:00
swp_entry_t entry , void * shadow )
{
}
2022-06-17 20:50:19 +03:00
static inline void delete_from_swap_cache ( struct folio * folio )
2022-05-10 04:20:47 +03:00
{
}
static inline void clear_shadow_from_swap_cache ( int type , unsigned long begin ,
unsigned long end )
{
}
2022-06-17 20:50:18 +03:00
static inline unsigned int folio_swap_flags ( struct folio * folio )
2022-05-10 04:20:48 +03:00
{
return 0 ;
}
2022-05-10 04:20:47 +03:00
# endif /* CONFIG_SWAP */
# endif /* _MM_SWAP_H */