2022-05-10 04:20:47 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _MM_SWAP_H
# define _MM_SWAP_H
# ifdef CONFIG_SWAP
# include <linux/blk_types.h> /* for bio_end_io_t */
/* linux/mm/page_io.c */
2022-05-10 04:20:48 +03:00
int sio_pool_init ( void ) ;
2022-05-10 04:20:49 +03:00
struct swap_iocb ;
int swap_readpage ( struct page * page , bool do_poll ,
struct swap_iocb * * plug ) ;
void __swap_read_unplug ( struct swap_iocb * plug ) ;
static inline void swap_read_unplug ( struct swap_iocb * plug )
{
if ( unlikely ( plug ) )
__swap_read_unplug ( plug ) ;
}
2022-05-10 04:20:49 +03:00
void swap_write_unplug ( struct swap_iocb * sio ) ;
2022-05-10 04:20:47 +03:00
int swap_writepage ( struct page * page , struct writeback_control * wbc ) ;
void end_swap_bio_write ( struct bio * bio ) ;
int __swap_writepage ( struct page * page , struct writeback_control * wbc ,
bio_end_io_t end_write_func ) ;
/* linux/mm/swap_state.c */
/* One swap address space for each 64M swap space */
# define SWAP_ADDRESS_SPACE_SHIFT 14
# define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
extern struct address_space * swapper_spaces [ ] ;
# define swap_address_space(entry) \
( & swapper_spaces [ swp_type ( entry ) ] [ swp_offset ( entry ) \
> > SWAP_ADDRESS_SPACE_SHIFT ] )
void show_swap_cache_info ( void ) ;
2022-05-13 06:23:02 +03:00
bool add_to_swap ( struct folio * folio ) ;
2022-05-10 04:20:47 +03:00
void * get_shadow_from_swap_cache ( swp_entry_t entry ) ;
int add_to_swap_cache ( struct page * page , swp_entry_t entry ,
gfp_t gfp , void * * shadowp ) ;
2022-06-17 20:50:20 +03:00
void __delete_from_swap_cache ( struct folio * folio ,
2022-05-10 04:20:47 +03:00
swp_entry_t entry , void * shadow ) ;
2022-06-17 20:50:19 +03:00
void delete_from_swap_cache ( struct folio * folio ) ;
2022-05-10 04:20:47 +03:00
void clear_shadow_from_swap_cache ( int type , unsigned long begin ,
unsigned long end ) ;
struct page * lookup_swap_cache ( swp_entry_t entry ,
struct vm_area_struct * vma ,
unsigned long addr ) ;
struct page * find_get_incore_page ( struct address_space * mapping , pgoff_t index ) ;
struct page * read_swap_cache_async ( swp_entry_t entry , gfp_t gfp_mask ,
struct vm_area_struct * vma ,
unsigned long addr ,
2022-05-10 04:20:49 +03:00
bool do_poll ,
struct swap_iocb * * plug ) ;
2022-05-10 04:20:47 +03:00
struct page * __read_swap_cache_async ( swp_entry_t entry , gfp_t gfp_mask ,
struct vm_area_struct * vma ,
unsigned long addr ,
bool * new_page_allocated ) ;
struct page * swap_cluster_readahead ( swp_entry_t entry , gfp_t flag ,
struct vm_fault * vmf ) ;
struct page * swapin_readahead ( swp_entry_t entry , gfp_t flag ,
struct vm_fault * vmf ) ;
2022-06-17 20:50:18 +03:00
static inline unsigned int folio_swap_flags ( struct folio * folio )
2022-05-10 04:20:48 +03:00
{
2022-06-17 20:50:18 +03:00
return page_swap_info ( & folio - > page ) - > flags ;
2022-05-10 04:20:48 +03:00
}
2022-05-10 04:20:47 +03:00
# else /* CONFIG_SWAP */
2022-05-10 04:20:49 +03:00
struct swap_iocb ;
static inline int swap_readpage ( struct page * page , bool do_poll ,
struct swap_iocb * * plug )
2022-05-10 04:20:47 +03:00
{
return 0 ;
}
2022-05-10 04:20:49 +03:00
static inline void swap_write_unplug ( struct swap_iocb * sio )
{
}
2022-05-10 04:20:47 +03:00
static inline struct address_space * swap_address_space ( swp_entry_t entry )
{
return NULL ;
}
static inline void show_swap_cache_info ( void )
{
}
static inline struct page * swap_cluster_readahead ( swp_entry_t entry ,
gfp_t gfp_mask , struct vm_fault * vmf )
{
return NULL ;
}
static inline struct page * swapin_readahead ( swp_entry_t swp , gfp_t gfp_mask ,
struct vm_fault * vmf )
{
return NULL ;
}
static inline int swap_writepage ( struct page * p , struct writeback_control * wbc )
{
return 0 ;
}
static inline struct page * lookup_swap_cache ( swp_entry_t swp ,
struct vm_area_struct * vma ,
unsigned long addr )
{
return NULL ;
}
static inline
struct page * find_get_incore_page ( struct address_space * mapping , pgoff_t index )
{
return find_get_page ( mapping , index ) ;
}
2022-05-13 06:23:02 +03:00
static inline bool add_to_swap ( struct folio * folio )
2022-05-10 04:20:47 +03:00
{
2022-05-13 06:23:02 +03:00
return false ;
2022-05-10 04:20:47 +03:00
}
static inline void * get_shadow_from_swap_cache ( swp_entry_t entry )
{
return NULL ;
}
static inline int add_to_swap_cache ( struct page * page , swp_entry_t entry ,
gfp_t gfp_mask , void * * shadowp )
{
return - 1 ;
}
2022-06-17 20:50:20 +03:00
static inline void __delete_from_swap_cache ( struct folio * folio ,
2022-05-10 04:20:47 +03:00
swp_entry_t entry , void * shadow )
{
}
2022-06-17 20:50:19 +03:00
static inline void delete_from_swap_cache ( struct folio * folio )
2022-05-10 04:20:47 +03:00
{
}
static inline void clear_shadow_from_swap_cache ( int type , unsigned long begin ,
unsigned long end )
{
}
2022-06-17 20:50:18 +03:00
static inline unsigned int folio_swap_flags ( struct folio * folio )
2022-05-10 04:20:48 +03:00
{
return 0 ;
}
2022-05-10 04:20:47 +03:00
# endif /* CONFIG_SWAP */
# endif /* _MM_SWAP_H */