2019-07-31 18:57:31 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 15:21:47 +03:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
* http : //www.huawei.com/
* Created by Gao Xiang < gaoxiang25 @ huawei . com >
*/
# include "internal.h"
# include <linux/prefetch.h>
2018-07-26 15:21:55 +03:00
# include <trace/events/erofs.h>
2019-09-04 05:09:05 +03:00
static void erofs_readendio ( struct bio * bio )
2018-07-26 15:21:47 +03:00
{
struct bio_vec * bvec ;
2019-03-25 06:40:09 +03:00
blk_status_t err = bio - > bi_status ;
2019-02-15 14:13:19 +03:00
struct bvec_iter_all iter_all ;
2018-07-26 15:21:47 +03:00
2019-04-25 10:03:00 +03:00
bio_for_each_segment_all ( bvec , bio , iter_all ) {
2018-07-26 15:21:47 +03:00
struct page * page = bvec - > bv_page ;
/* page is already locked */
2018-09-18 17:27:28 +03:00
DBG_BUGON ( PageUptodate ( page ) ) ;
2018-07-26 15:21:47 +03:00
2019-08-29 19:38:27 +03:00
if ( err )
2018-07-26 15:21:47 +03:00
SetPageError ( page ) ;
else
SetPageUptodate ( page ) ;
unlock_page ( page ) ;
/* page could be reclaimed now */
}
bio_put ( bio ) ;
}
2019-09-04 05:09:03 +03:00
struct page * erofs_get_meta_page ( struct super_block * sb , erofs_blk_t blkaddr )
2018-07-26 15:21:47 +03:00
{
2019-09-21 21:43:55 +03:00
struct address_space * const mapping = sb - > s_bdev - > bd_inode - > i_mapping ;
struct page * page ;
2018-08-21 17:49:30 +03:00
2019-09-21 21:43:55 +03:00
page = read_cache_page_gfp ( mapping , blkaddr ,
2019-09-04 05:09:12 +03:00
mapping_gfp_constraint ( mapping , ~ __GFP_FS ) ) ;
2019-09-21 21:43:55 +03:00
/* should already be PageUptodate */
if ( ! IS_ERR ( page ) )
lock_page ( page ) ;
return page ;
2018-07-26 15:21:47 +03:00
}
static int erofs_map_blocks_flatmode ( struct inode * inode ,
2018-11-05 22:48:38 +03:00
struct erofs_map_blocks * map ,
int flags )
2018-07-26 15:21:47 +03:00
{
2018-09-18 17:27:28 +03:00
int err = 0 ;
2018-07-26 15:21:47 +03:00
erofs_blk_t nblocks , lastblk ;
u64 offset = map - > m_la ;
2019-09-04 05:08:56 +03:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-09-04 05:08:54 +03:00
bool tailendpacking = ( vi - > datalayout = = EROFS_INODE_FLAT_INLINE ) ;
2018-07-26 15:21:47 +03:00
2018-07-26 15:21:55 +03:00
trace_erofs_map_blocks_flatmode_enter ( inode , map , flags ) ;
2018-07-26 15:21:47 +03:00
nblocks = DIV_ROUND_UP ( inode - > i_size , PAGE_SIZE ) ;
2019-09-04 05:08:54 +03:00
lastblk = nblocks - tailendpacking ;
2018-07-26 15:21:47 +03:00
2019-08-29 19:38:27 +03:00
if ( offset > = inode - > i_size ) {
2018-07-26 15:21:47 +03:00
/* leave out-of-bound access unmapped */
map - > m_flags = 0 ;
map - > m_plen = 0 ;
goto out ;
}
/* there is no hole in flatmode */
map - > m_flags = EROFS_MAP_MAPPED ;
if ( offset < blknr_to_addr ( lastblk ) ) {
map - > m_pa = blknr_to_addr ( vi - > raw_blkaddr ) + map - > m_la ;
map - > m_plen = blknr_to_addr ( lastblk ) - offset ;
2019-09-04 05:08:54 +03:00
} else if ( tailendpacking ) {
2018-07-26 15:21:47 +03:00
/* 2 - inode inline B: inode, [xattrs], inline last blk... */
struct erofs_sb_info * sbi = EROFS_SB ( inode - > i_sb ) ;
map - > m_pa = iloc ( sbi , vi - > nid ) + vi - > inode_isize +
vi - > xattr_isize + erofs_blkoff ( map - > m_la ) ;
map - > m_plen = inode - > i_size - offset ;
2019-08-14 13:37:03 +03:00
/* inline data should be located in one meta block */
2018-09-18 17:27:28 +03:00
if ( erofs_blkoff ( map - > m_pa ) + map - > m_plen > PAGE_SIZE ) {
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" inline data cross block boundary @ nid %llu " ,
vi - > nid ) ;
2018-09-18 17:27:28 +03:00
DBG_BUGON ( 1 ) ;
2019-08-14 13:37:03 +03:00
err = - EFSCORRUPTED ;
2018-09-18 17:27:28 +03:00
goto err_out ;
}
2018-07-26 15:21:47 +03:00
map - > m_flags | = EROFS_MAP_META ;
} else {
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" internal error @ nid: %llu (size %llu), m_la 0x%llx " ,
vi - > nid , inode - > i_size , map - > m_la ) ;
2018-09-18 17:27:28 +03:00
DBG_BUGON ( 1 ) ;
err = - EIO ;
goto err_out ;
2018-07-26 15:21:47 +03:00
}
out :
map - > m_llen = map - > m_plen ;
2018-09-18 17:27:28 +03:00
err_out :
2018-07-26 15:21:55 +03:00
trace_erofs_map_blocks_flatmode_exit ( inode , map , flags , 0 ) ;
2018-09-18 17:27:28 +03:00
return err ;
2018-07-26 15:21:47 +03:00
}
int erofs_map_blocks ( struct inode * inode ,
2018-11-05 22:48:38 +03:00
struct erofs_map_blocks * map , int flags )
2018-07-26 15:21:47 +03:00
{
2019-09-04 05:08:56 +03:00
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) ) {
2019-01-15 04:42:21 +03:00
int err = z_erofs_map_blocks_iter ( inode , map , flags ) ;
2018-07-26 15:21:47 +03:00
2019-01-15 04:42:21 +03:00
if ( map - > mpage ) {
put_page ( map - > mpage ) ;
map - > mpage = NULL ;
}
2018-07-26 15:21:58 +03:00
return err ;
}
2018-07-26 15:21:47 +03:00
return erofs_map_blocks_flatmode ( inode , map , flags ) ;
}
2018-11-05 22:48:38 +03:00
static inline struct bio * erofs_read_raw_page ( struct bio * bio ,
struct address_space * mapping ,
struct page * page ,
erofs_off_t * last_block ,
unsigned int nblocks ,
bool ra )
2018-07-26 15:21:47 +03:00
{
2019-03-25 06:40:09 +03:00
struct inode * const inode = mapping - > host ;
struct super_block * const sb = inode - > i_sb ;
2018-07-26 15:21:47 +03:00
erofs_off_t current_block = ( erofs_off_t ) page - > index ;
int err ;
2018-09-18 17:27:28 +03:00
DBG_BUGON ( ! nblocks ) ;
2018-07-26 15:21:47 +03:00
if ( PageUptodate ( page ) ) {
err = 0 ;
goto has_updated ;
}
/* note that for readpage case, bio also equals to NULL */
2018-11-05 22:49:05 +03:00
if ( bio & &
2018-11-05 22:48:38 +03:00
/* not continuous */
* last_block + 1 ! = current_block ) {
2018-07-26 15:21:47 +03:00
submit_bio_retry :
2019-09-04 05:09:04 +03:00
submit_bio ( bio ) ;
2018-07-26 15:21:47 +03:00
bio = NULL ;
}
2018-11-05 22:49:05 +03:00
if ( ! bio ) {
2018-07-26 15:21:47 +03:00
struct erofs_map_blocks map = {
. m_la = blknr_to_addr ( current_block ) ,
} ;
2018-07-26 15:22:00 +03:00
erofs_blk_t blknr ;
2018-09-10 22:41:14 +03:00
unsigned int blkoff ;
2018-07-26 15:21:47 +03:00
err = erofs_map_blocks ( inode , & map , EROFS_GET_BLOCKS_RAW ) ;
2019-08-29 19:38:27 +03:00
if ( err )
2018-07-26 15:21:47 +03:00
goto err_out ;
/* zero out the holed page */
2019-08-29 19:38:27 +03:00
if ( ! ( map . m_flags & EROFS_MAP_MAPPED ) ) {
2018-07-26 15:21:47 +03:00
zero_user_segment ( page , 0 , PAGE_SIZE ) ;
SetPageUptodate ( page ) ;
/* imply err = 0, see erofs_map_blocks */
goto has_updated ;
}
/* for RAW access mode, m_plen must be equal to m_llen */
2018-09-18 17:27:28 +03:00
DBG_BUGON ( map . m_plen ! = map . m_llen ) ;
2018-07-26 15:21:47 +03:00
2018-07-26 15:22:00 +03:00
blknr = erofs_blknr ( map . m_pa ) ;
blkoff = erofs_blkoff ( map . m_pa ) ;
2018-07-26 15:21:47 +03:00
/* deal with inline page */
if ( map . m_flags & EROFS_MAP_META ) {
void * vsrc , * vto ;
struct page * ipage ;
2018-09-18 17:27:28 +03:00
DBG_BUGON ( map . m_plen > PAGE_SIZE ) ;
2018-07-26 15:21:47 +03:00
2019-09-04 05:09:03 +03:00
ipage = erofs_get_meta_page ( inode - > i_sb , blknr ) ;
2018-07-26 15:21:47 +03:00
if ( IS_ERR ( ipage ) ) {
err = PTR_ERR ( ipage ) ;
goto err_out ;
}
vsrc = kmap_atomic ( ipage ) ;
vto = kmap_atomic ( page ) ;
2018-07-26 15:22:00 +03:00
memcpy ( vto , vsrc + blkoff , map . m_plen ) ;
2018-07-26 15:21:47 +03:00
memset ( vto + map . m_plen , 0 , PAGE_SIZE - map . m_plen ) ;
kunmap_atomic ( vto ) ;
kunmap_atomic ( vsrc ) ;
flush_dcache_page ( page ) ;
SetPageUptodate ( page ) ;
/* TODO: could we unlock the page earlier? */
unlock_page ( ipage ) ;
put_page ( ipage ) ;
/* imply err = 0, see erofs_map_blocks */
goto has_updated ;
}
/* pa must be block-aligned for raw reading */
2018-09-18 17:27:28 +03:00
DBG_BUGON ( erofs_blkoff ( map . m_pa ) ) ;
2018-07-26 15:21:47 +03:00
/* max # of continuous pages */
if ( nblocks > DIV_ROUND_UP ( map . m_plen , PAGE_SIZE ) )
nblocks = DIV_ROUND_UP ( map . m_plen , PAGE_SIZE ) ;
if ( nblocks > BIO_MAX_PAGES )
nblocks = BIO_MAX_PAGES ;
2019-09-04 05:09:12 +03:00
bio = bio_alloc ( GFP_NOIO , nblocks ) ;
bio - > bi_end_io = erofs_readendio ;
bio_set_dev ( bio , sb - > s_bdev ) ;
bio - > bi_iter . bi_sector = ( sector_t ) blknr < <
LOG_SECTORS_PER_BLOCK ;
bio - > bi_opf = REQ_OP_READ ;
2018-07-26 15:21:47 +03:00
}
err = bio_add_page ( bio , page , PAGE_SIZE , 0 ) ;
/* out of the extent or bio is full */
if ( err < PAGE_SIZE )
goto submit_bio_retry ;
* last_block = current_block ;
/* shift in advance in case of it followed by too many gaps */
2019-04-12 12:53:14 +03:00
if ( bio - > bi_iter . bi_size > = bio - > bi_max_vecs * PAGE_SIZE ) {
2018-07-26 15:21:47 +03:00
/* err should reassign to 0 after submitting */
err = 0 ;
goto submit_bio_out ;
}
return bio ;
err_out :
/* for sync reading, set page error immediately */
if ( ! ra ) {
SetPageError ( page ) ;
ClearPageUptodate ( page ) ;
}
has_updated :
unlock_page ( page ) ;
/* if updated manually, continuous pages has a gap */
2018-11-05 22:49:05 +03:00
if ( bio )
2018-07-26 15:21:47 +03:00
submit_bio_out :
2019-09-04 05:09:04 +03:00
submit_bio ( bio ) ;
2019-08-29 19:38:27 +03:00
return err ? ERR_PTR ( err ) : NULL ;
2018-07-26 15:21:47 +03:00
}
/*
* since we dont have write or truncate flows , so no inode
* locking needs to be held at the moment .
*/
static int erofs_raw_access_readpage ( struct file * file , struct page * page )
{
erofs_off_t last_block ;
struct bio * bio ;
2018-07-26 15:21:55 +03:00
trace_erofs_readpage ( page , true ) ;
2018-07-26 15:21:47 +03:00
bio = erofs_read_raw_page ( NULL , page - > mapping ,
2018-11-05 22:48:38 +03:00
page , & last_block , 1 , false ) ;
2018-07-26 15:21:47 +03:00
if ( IS_ERR ( bio ) )
return PTR_ERR ( bio ) ;
2018-09-18 17:27:28 +03:00
DBG_BUGON ( bio ) ; /* since we have only one bio -- must be NULL */
2018-07-26 15:21:47 +03:00
return 0 ;
}
static int erofs_raw_access_readpages ( struct file * filp ,
2018-11-05 22:48:38 +03:00
struct address_space * mapping ,
struct list_head * pages ,
unsigned int nr_pages )
2018-07-26 15:21:47 +03:00
{
erofs_off_t last_block ;
struct bio * bio = NULL ;
gfp_t gfp = readahead_gfp_mask ( mapping ) ;
2018-07-26 15:21:55 +03:00
struct page * page = list_last_entry ( pages , struct page , lru ) ;
trace_erofs_readpages ( mapping - > host , page , nr_pages , true ) ;
2018-07-26 15:21:47 +03:00
for ( ; nr_pages ; - - nr_pages ) {
2018-07-26 15:21:55 +03:00
page = list_entry ( pages - > prev , struct page , lru ) ;
2018-07-26 15:21:47 +03:00
prefetchw ( & page - > flags ) ;
list_del ( & page - > lru ) ;
if ( ! add_to_page_cache_lru ( page , mapping , page - > index , gfp ) ) {
bio = erofs_read_raw_page ( bio , mapping , page ,
2018-11-05 22:48:38 +03:00
& last_block , nr_pages , true ) ;
2018-07-26 15:21:47 +03:00
/* all the page errors are ignored when readahead */
if ( IS_ERR ( bio ) ) {
pr_err ( " %s, readahead error at page %lu of nid %llu \n " ,
2018-11-05 22:48:38 +03:00
__func__ , page - > index ,
2019-09-04 05:08:56 +03:00
EROFS_I ( mapping - > host ) - > nid ) ;
2018-07-26 15:21:47 +03:00
bio = NULL ;
}
}
/* pages could still be locked */
put_page ( page ) ;
}
2018-09-18 17:27:28 +03:00
DBG_BUGON ( ! list_empty ( pages ) ) ;
2018-07-26 15:21:47 +03:00
/* the rare case (end in gaps) */
2019-08-29 19:38:27 +03:00
if ( bio )
2019-09-04 05:09:04 +03:00
submit_bio ( bio ) ;
2018-07-26 15:21:47 +03:00
return 0 ;
}
2019-07-16 12:32:56 +03:00
static int erofs_get_block ( struct inode * inode , sector_t iblock ,
struct buffer_head * bh , int create )
{
struct erofs_map_blocks map = {
. m_la = iblock < < 9 ,
} ;
int err ;
err = erofs_map_blocks ( inode , & map , EROFS_GET_BLOCKS_RAW ) ;
if ( err )
return err ;
if ( map . m_flags & EROFS_MAP_MAPPED )
bh - > b_blocknr = erofs_blknr ( map . m_pa ) ;
return err ;
}
static sector_t erofs_bmap ( struct address_space * mapping , sector_t block )
{
struct inode * inode = mapping - > host ;
2019-09-04 05:08:56 +03:00
if ( EROFS_I ( inode ) - > datalayout = = EROFS_INODE_FLAT_INLINE ) {
2019-07-16 12:32:56 +03:00
erofs_blk_t blks = i_size_read ( inode ) > > LOG_BLOCK_SIZE ;
if ( block > > LOG_SECTORS_PER_BLOCK > = blks )
return 0 ;
}
return generic_block_bmap ( mapping , block , erofs_get_block ) ;
}
2018-07-26 15:21:47 +03:00
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
. readpage = erofs_raw_access_readpage ,
. readpages = erofs_raw_access_readpages ,
2019-07-16 12:32:56 +03:00
. bmap = erofs_bmap ,
2018-07-26 15:21:47 +03:00
} ;