2015-04-08 00:00:32 -04:00
/*
* linux / fs / ext4 / readpage . c
*
* Copyright ( C ) 2002 , Linus Torvalds .
* Copyright ( C ) 2015 , Google , Inc .
*
* This was originally taken from fs / mpage . c
*
* The intent is the ext4_mpage_readpages ( ) function here is intended
* to replace mpage_readpages ( ) in the general case , not just for
* encrypted files . It has some limitations ( see below ) , where it
* will fall back to read_block_full_page ( ) , but these limitations
* should only be hit when page_size ! = block_size .
*
* This will allow us to attach a callback function to support ext4
* encryption .
*
* If anything unusual happens , such as :
*
* - encountering a page which has buffers
* - encountering a page which has a non - hole after a hole
* - encountering a page with non - contiguous blocks
*
* then this code just gives up and calls the buffer_head - based read function .
* It does handle a page which has holes at the end - that is a common case :
* the end - of - file on blocksize < PAGE_CACHE_SIZE setups .
*
*/
# include <linux/kernel.h>
# include <linux/export.h>
# include <linux/mm.h>
# include <linux/kdev_t.h>
# include <linux/gfp.h>
# include <linux/bio.h>
# include <linux/fs.h>
# include <linux/buffer_head.h>
# include <linux/blkdev.h>
# include <linux/highmem.h>
# include <linux/prefetch.h>
# include <linux/mpage.h>
# include <linux/writeback.h>
# include <linux/backing-dev.h>
# include <linux/pagevec.h>
# include <linux/cleancache.h>
# include "ext4.h"
2015-04-12 00:56:10 -04:00
/*
* Call ext4_decrypt on every single page , reusing the encryption
* context .
*/
static void completion_pages ( struct work_struct * work )
{
# ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx * ctx =
2015-05-31 13:31:34 -04:00
container_of ( work , struct ext4_crypto_ctx , r . work ) ;
struct bio * bio = ctx - > r . bio ;
2015-04-12 00:56:10 -04:00
struct bio_vec * bv ;
int i ;
bio_for_each_segment_all ( bv , bio , i ) {
struct page * page = bv - > bv_page ;
int ret = ext4_decrypt ( ctx , page ) ;
if ( ret ) {
WARN_ON_ONCE ( 1 ) ;
SetPageError ( page ) ;
} else
SetPageUptodate ( page ) ;
unlock_page ( page ) ;
}
ext4_release_crypto_ctx ( ctx ) ;
bio_put ( bio ) ;
# else
BUG ( ) ;
# endif
}
static inline bool ext4_bio_encrypted ( struct bio * bio )
{
# ifdef CONFIG_EXT4_FS_ENCRYPTION
return unlikely ( bio - > bi_private ! = NULL ) ;
# else
return false ;
# endif
}
2015-04-08 00:00:32 -04:00
/*
* I / O completion handler for multipage BIOs .
*
* The mpage code never puts partial pages into a BIO ( except for end - of - file ) .
* If a page does not map to a contiguous run of blocks then it simply falls
* back to block_read_full_page ( ) .
*
* Why is this ? If a page ' s completion depends on a number of different BIOs
* which can complete in any order ( or at the same time ) then determining the
* status of that page is hard . See end_buffer_async_read ( ) for the details .
* There is no point in duplicating all that complexity .
*/
2015-07-20 15:29:37 +02:00
static void mpage_end_io ( struct bio * bio )
2015-04-08 00:00:32 -04:00
{
struct bio_vec * bv ;
int i ;
2015-04-12 00:56:10 -04:00
if ( ext4_bio_encrypted ( bio ) ) {
struct ext4_crypto_ctx * ctx = bio - > bi_private ;
2015-07-20 15:29:37 +02:00
if ( bio - > bi_error ) {
2015-04-12 00:56:10 -04:00
ext4_release_crypto_ctx ( ctx ) ;
} else {
2015-05-31 13:31:34 -04:00
INIT_WORK ( & ctx - > r . work , completion_pages ) ;
ctx - > r . bio = bio ;
queue_work ( ext4_read_workqueue , & ctx - > r . work ) ;
2015-04-12 00:56:10 -04:00
return ;
}
}
2015-04-08 00:00:32 -04:00
bio_for_each_segment_all ( bv , bio , i ) {
struct page * page = bv - > bv_page ;
2015-07-20 15:29:37 +02:00
if ( ! bio - > bi_error ) {
2015-04-08 00:00:32 -04:00
SetPageUptodate ( page ) ;
} else {
ClearPageUptodate ( page ) ;
SetPageError ( page ) ;
}
unlock_page ( page ) ;
}
bio_put ( bio ) ;
}
int ext4_mpage_readpages ( struct address_space * mapping ,
struct list_head * pages , struct page * page ,
unsigned nr_pages )
{
struct bio * bio = NULL ;
unsigned page_idx ;
sector_t last_block_in_bio = 0 ;
struct inode * inode = mapping - > host ;
const unsigned blkbits = inode - > i_blkbits ;
const unsigned blocks_per_page = PAGE_CACHE_SIZE > > blkbits ;
const unsigned blocksize = 1 < < blkbits ;
sector_t block_in_file ;
sector_t last_block ;
sector_t last_block_in_file ;
sector_t blocks [ MAX_BUF_PER_PAGE ] ;
unsigned page_block ;
struct block_device * bdev = inode - > i_sb - > s_bdev ;
int length ;
unsigned relative_block = 0 ;
struct ext4_map_blocks map ;
map . m_pblk = 0 ;
map . m_lblk = 0 ;
map . m_len = 0 ;
map . m_flags = 0 ;
for ( page_idx = 0 ; nr_pages ; page_idx + + , nr_pages - - ) {
int fully_mapped = 1 ;
unsigned first_hole = blocks_per_page ;
prefetchw ( & page - > flags ) ;
if ( pages ) {
page = list_entry ( pages - > prev , struct page , lru ) ;
list_del ( & page - > lru ) ;
if ( add_to_page_cache_lru ( page , mapping ,
page - > index , GFP_KERNEL ) )
goto next_page ;
}
if ( page_has_buffers ( page ) )
goto confused ;
block_in_file = ( sector_t ) page - > index < < ( PAGE_CACHE_SHIFT - blkbits ) ;
last_block = block_in_file + nr_pages * blocks_per_page ;
last_block_in_file = ( i_size_read ( inode ) + blocksize - 1 ) > > blkbits ;
if ( last_block > last_block_in_file )
last_block = last_block_in_file ;
page_block = 0 ;
/*
* Map blocks using the previous result first .
*/
if ( ( map . m_flags & EXT4_MAP_MAPPED ) & &
block_in_file > map . m_lblk & &
block_in_file < ( map . m_lblk + map . m_len ) ) {
unsigned map_offset = block_in_file - map . m_lblk ;
unsigned last = map . m_len - map_offset ;
for ( relative_block = 0 ; ; relative_block + + ) {
if ( relative_block = = last ) {
/* needed? */
map . m_flags & = ~ EXT4_MAP_MAPPED ;
break ;
}
if ( page_block = = blocks_per_page )
break ;
blocks [ page_block ] = map . m_pblk + map_offset +
relative_block ;
page_block + + ;
block_in_file + + ;
}
}
/*
* Then do more ext4_map_blocks ( ) calls until we are
* done with this page .
*/
while ( page_block < blocks_per_page ) {
if ( block_in_file < last_block ) {
map . m_lblk = block_in_file ;
map . m_len = last_block - block_in_file ;
if ( ext4_map_blocks ( NULL , inode , & map , 0 ) < 0 ) {
set_error_page :
SetPageError ( page ) ;
zero_user_segment ( page , 0 ,
PAGE_CACHE_SIZE ) ;
unlock_page ( page ) ;
goto next_page ;
}
}
if ( ( map . m_flags & EXT4_MAP_MAPPED ) = = 0 ) {
fully_mapped = 0 ;
if ( first_hole = = blocks_per_page )
first_hole = page_block ;
page_block + + ;
block_in_file + + ;
continue ;
}
if ( first_hole ! = blocks_per_page )
goto confused ; /* hole -> non-hole */
/* Contiguous blocks? */
if ( page_block & & blocks [ page_block - 1 ] ! = map . m_pblk - 1 )
goto confused ;
for ( relative_block = 0 ; ; relative_block + + ) {
if ( relative_block = = map . m_len ) {
/* needed? */
map . m_flags & = ~ EXT4_MAP_MAPPED ;
break ;
} else if ( page_block = = blocks_per_page )
break ;
blocks [ page_block ] = map . m_pblk + relative_block ;
page_block + + ;
block_in_file + + ;
}
}
if ( first_hole ! = blocks_per_page ) {
zero_user_segment ( page , first_hole < < blkbits ,
PAGE_CACHE_SIZE ) ;
if ( first_hole = = 0 ) {
SetPageUptodate ( page ) ;
unlock_page ( page ) ;
goto next_page ;
}
} else if ( fully_mapped ) {
SetPageMappedToDisk ( page ) ;
}
if ( fully_mapped & & blocks_per_page = = 1 & &
! PageUptodate ( page ) & & cleancache_get_page ( page ) = = 0 ) {
SetPageUptodate ( page ) ;
goto confused ;
}
/*
* This page will go to BIO . Do we need to send this
* BIO off first ?
*/
if ( bio & & ( last_block_in_bio ! = blocks [ 0 ] - 1 ) ) {
submit_and_realloc :
submit_bio ( READ , bio ) ;
bio = NULL ;
}
if ( bio = = NULL ) {
2015-04-12 00:56:10 -04:00
struct ext4_crypto_ctx * ctx = NULL ;
if ( ext4_encrypted_inode ( inode ) & &
S_ISREG ( inode - > i_mode ) ) {
ctx = ext4_get_crypto_ctx ( inode ) ;
if ( IS_ERR ( ctx ) )
goto set_error_page ;
}
2015-04-08 00:00:32 -04:00
bio = bio_alloc ( GFP_KERNEL ,
2015-05-19 14:31:01 +02:00
min_t ( int , nr_pages , BIO_MAX_PAGES ) ) ;
2015-04-12 00:56:10 -04:00
if ( ! bio ) {
if ( ctx )
ext4_release_crypto_ctx ( ctx ) ;
2015-04-08 00:00:32 -04:00
goto set_error_page ;
2015-04-12 00:56:10 -04:00
}
2015-04-08 00:00:32 -04:00
bio - > bi_bdev = bdev ;
bio - > bi_iter . bi_sector = blocks [ 0 ] < < ( blkbits - 9 ) ;
bio - > bi_end_io = mpage_end_io ;
2015-04-12 00:56:10 -04:00
bio - > bi_private = ctx ;
2015-04-08 00:00:32 -04:00
}
length = first_hole < < blkbits ;
if ( bio_add_page ( bio , page , length , 0 ) < length )
goto submit_and_realloc ;
if ( ( ( map . m_flags & EXT4_MAP_BOUNDARY ) & &
( relative_block = = map . m_len ) ) | |
( first_hole ! = blocks_per_page ) ) {
submit_bio ( READ , bio ) ;
bio = NULL ;
} else
last_block_in_bio = blocks [ blocks_per_page - 1 ] ;
goto next_page ;
confused :
if ( bio ) {
submit_bio ( READ , bio ) ;
bio = NULL ;
}
if ( ! PageUptodate ( page ) )
block_read_full_page ( page , ext4_get_block ) ;
else
unlock_page ( page ) ;
next_page :
if ( pages )
page_cache_release ( page ) ;
}
BUG_ON ( pages & & ! list_empty ( pages ) ) ;
if ( bio )
submit_bio ( READ , bio ) ;
return 0 ;
}