2019-07-31 23:57:31 +08:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 20:21:47 +08:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 15:09:44 +02:00
* https : //www.huawei.com/
2021-08-20 18:00:19 +08:00
* Copyright ( C ) 2021 , Alibaba Cloud
2018-07-26 20:21:47 +08:00
*/
# include "internal.h"
# include <linux/prefetch.h>
2021-08-05 08:36:00 +08:00
# include <linux/dax.h>
2018-07-26 20:21:55 +08:00
# include <trace/events/erofs.h>
2019-09-04 10:09:03 +08:00
struct page * erofs_get_meta_page ( struct super_block * sb , erofs_blk_t blkaddr )
2018-07-26 20:21:47 +08:00
{
2019-09-22 02:43:55 +08:00
struct address_space * const mapping = sb - > s_bdev - > bd_inode - > i_mapping ;
struct page * page ;
2018-08-21 22:49:30 +08:00
2019-09-22 02:43:55 +08:00
page = read_cache_page_gfp ( mapping , blkaddr ,
2019-09-04 10:09:12 +08:00
mapping_gfp_constraint ( mapping , ~ __GFP_FS ) ) ;
2019-09-22 02:43:55 +08:00
/* should already be PageUptodate */
if ( ! IS_ERR ( page ) )
lock_page ( page ) ;
return page ;
2018-07-26 20:21:47 +08:00
}
static int erofs_map_blocks_flatmode ( struct inode * inode ,
2018-11-05 19:48:38 +00:00
struct erofs_map_blocks * map ,
int flags )
2018-07-26 20:21:47 +08:00
{
erofs_blk_t nblocks , lastblk ;
u64 offset = map - > m_la ;
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-09-04 10:08:54 +08:00
bool tailendpacking = ( vi - > datalayout = = EROFS_INODE_FLAT_INLINE ) ;
2018-07-26 20:21:47 +08:00
nblocks = DIV_ROUND_UP ( inode - > i_size , PAGE_SIZE ) ;
2019-09-04 10:08:54 +08:00
lastblk = nblocks - tailendpacking ;
2018-07-26 20:21:47 +08:00
/* there is no hole in flatmode */
map - > m_flags = EROFS_MAP_MAPPED ;
if ( offset < blknr_to_addr ( lastblk ) ) {
map - > m_pa = blknr_to_addr ( vi - > raw_blkaddr ) + map - > m_la ;
map - > m_plen = blknr_to_addr ( lastblk ) - offset ;
2019-09-04 10:08:54 +08:00
} else if ( tailendpacking ) {
2018-07-26 20:21:47 +08:00
/* 2 - inode inline B: inode, [xattrs], inline last blk... */
struct erofs_sb_info * sbi = EROFS_SB ( inode - > i_sb ) ;
map - > m_pa = iloc ( sbi , vi - > nid ) + vi - > inode_isize +
vi - > xattr_isize + erofs_blkoff ( map - > m_la ) ;
map - > m_plen = inode - > i_size - offset ;
2021-12-09 09:29:18 +08:00
/* inline data should be located in the same meta block */
if ( erofs_blkoff ( map - > m_pa ) + map - > m_plen > EROFS_BLKSIZ ) {
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" inline data cross block boundary @ nid %llu " ,
vi - > nid ) ;
2018-09-18 22:27:28 +08:00
DBG_BUGON ( 1 ) ;
2021-12-09 09:29:18 +08:00
return - EFSCORRUPTED ;
2018-09-18 22:27:28 +08:00
}
2018-07-26 20:21:47 +08:00
map - > m_flags | = EROFS_MAP_META ;
} else {
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" internal error @ nid: %llu (size %llu), m_la 0x%llx " ,
vi - > nid , inode - > i_size , map - > m_la ) ;
2018-09-18 22:27:28 +08:00
DBG_BUGON ( 1 ) ;
2021-12-09 09:29:18 +08:00
return - EIO ;
2018-07-26 20:21:47 +08:00
}
2021-12-09 09:29:18 +08:00
return 0 ;
2018-07-26 20:21:47 +08:00
}
2021-08-20 18:00:19 +08:00
static int erofs_map_blocks ( struct inode * inode ,
struct erofs_map_blocks * map , int flags )
{
struct super_block * sb = inode - > i_sb ;
struct erofs_inode * vi = EROFS_I ( inode ) ;
struct erofs_inode_chunk_index * idx ;
struct page * page ;
u64 chunknr ;
unsigned int unit ;
erofs_off_t pos ;
int err = 0 ;
2021-12-09 09:29:18 +08:00
trace_erofs_map_blocks_enter ( inode , map , flags ) ;
2021-10-14 16:10:10 +08:00
map - > m_deviceid = 0 ;
2021-08-20 18:00:19 +08:00
if ( map - > m_la > = inode - > i_size ) {
/* leave out-of-bound access unmapped */
map - > m_flags = 0 ;
map - > m_plen = 0 ;
goto out ;
}
2021-12-09 09:29:18 +08:00
if ( vi - > datalayout ! = EROFS_INODE_CHUNK_BASED ) {
err = erofs_map_blocks_flatmode ( inode , map , flags ) ;
goto out ;
}
2021-08-20 18:00:19 +08:00
if ( vi - > chunkformat & EROFS_CHUNK_FORMAT_INDEXES )
unit = sizeof ( * idx ) ; /* chunk index */
else
unit = EROFS_BLOCK_MAP_ENTRY_SIZE ; /* block map */
chunknr = map - > m_la > > vi - > chunkbits ;
pos = ALIGN ( iloc ( EROFS_SB ( sb ) , vi - > nid ) + vi - > inode_isize +
vi - > xattr_isize , unit ) + unit * chunknr ;
page = erofs_get_meta_page ( inode - > i_sb , erofs_blknr ( pos ) ) ;
2021-12-09 09:29:18 +08:00
if ( IS_ERR ( page ) ) {
err = PTR_ERR ( page ) ;
goto out ;
}
2021-08-20 18:00:19 +08:00
map - > m_la = chunknr < < vi - > chunkbits ;
map - > m_plen = min_t ( erofs_off_t , 1UL < < vi - > chunkbits ,
roundup ( inode - > i_size - map - > m_la , EROFS_BLKSIZ ) ) ;
/* handle block map */
if ( ! ( vi - > chunkformat & EROFS_CHUNK_FORMAT_INDEXES ) ) {
__le32 * blkaddr = page_address ( page ) + erofs_blkoff ( pos ) ;
if ( le32_to_cpu ( * blkaddr ) = = EROFS_NULL_ADDR ) {
map - > m_flags = 0 ;
} else {
map - > m_pa = blknr_to_addr ( le32_to_cpu ( * blkaddr ) ) ;
map - > m_flags = EROFS_MAP_MAPPED ;
}
goto out_unlock ;
}
/* parse chunk indexes */
idx = page_address ( page ) + erofs_blkoff ( pos ) ;
switch ( le32_to_cpu ( idx - > blkaddr ) ) {
case EROFS_NULL_ADDR :
map - > m_flags = 0 ;
break ;
default :
2021-10-14 16:10:10 +08:00
map - > m_deviceid = le16_to_cpu ( idx - > device_id ) &
EROFS_SB ( sb ) - > device_id_mask ;
2021-08-20 18:00:19 +08:00
map - > m_pa = blknr_to_addr ( le32_to_cpu ( idx - > blkaddr ) ) ;
map - > m_flags = EROFS_MAP_MAPPED ;
break ;
}
out_unlock :
unlock_page ( page ) ;
put_page ( page ) ;
out :
2021-12-09 09:29:18 +08:00
if ( ! err )
map - > m_llen = map - > m_plen ;
trace_erofs_map_blocks_exit ( inode , map , flags , 0 ) ;
2021-08-20 18:00:19 +08:00
return err ;
}
2021-10-14 16:10:10 +08:00
int erofs_map_dev ( struct super_block * sb , struct erofs_map_dev * map )
{
struct erofs_dev_context * devs = EROFS_SB ( sb ) - > devs ;
struct erofs_device_info * dif ;
int id ;
/* primary device by default */
map - > m_bdev = sb - > s_bdev ;
map - > m_daxdev = EROFS_SB ( sb ) - > dax_dev ;
if ( map - > m_deviceid ) {
down_read ( & devs - > rwsem ) ;
dif = idr_find ( & devs - > tree , map - > m_deviceid - 1 ) ;
if ( ! dif ) {
up_read ( & devs - > rwsem ) ;
return - ENODEV ;
}
map - > m_bdev = dif - > bdev ;
map - > m_daxdev = dif - > dax_dev ;
up_read ( & devs - > rwsem ) ;
} else if ( devs - > extra_devices ) {
down_read ( & devs - > rwsem ) ;
idr_for_each_entry ( & devs - > tree , dif , id ) {
erofs_off_t startoff , length ;
if ( ! dif - > mapped_blkaddr )
continue ;
startoff = blknr_to_addr ( dif - > mapped_blkaddr ) ;
length = blknr_to_addr ( dif - > blocks ) ;
if ( map - > m_pa > = startoff & &
map - > m_pa < startoff + length ) {
map - > m_pa - = startoff ;
map - > m_bdev = dif - > bdev ;
map - > m_daxdev = dif - > dax_dev ;
break ;
}
}
up_read ( & devs - > rwsem ) ;
}
return 0 ;
}
2021-08-05 08:35:59 +08:00
static int erofs_iomap_begin ( struct inode * inode , loff_t offset , loff_t length ,
unsigned int flags , struct iomap * iomap , struct iomap * srcmap )
{
int ret ;
struct erofs_map_blocks map ;
2021-10-14 16:10:10 +08:00
struct erofs_map_dev mdev ;
2021-08-05 08:35:59 +08:00
map . m_la = offset ;
map . m_llen = length ;
2021-08-20 18:00:19 +08:00
ret = erofs_map_blocks ( inode , & map , EROFS_GET_BLOCKS_RAW ) ;
2021-08-05 08:35:59 +08:00
if ( ret < 0 )
return ret ;
2021-10-14 16:10:10 +08:00
mdev = ( struct erofs_map_dev ) {
. m_deviceid = map . m_deviceid ,
. m_pa = map . m_pa ,
} ;
ret = erofs_map_dev ( inode - > i_sb , & mdev ) ;
if ( ret )
return ret ;
iomap - > bdev = mdev . m_bdev ;
iomap - > dax_dev = mdev . m_daxdev ;
2021-08-05 08:35:59 +08:00
iomap - > offset = map . m_la ;
iomap - > length = map . m_llen ;
iomap - > flags = 0 ;
2021-08-05 08:36:01 +08:00
iomap - > private = NULL ;
2021-08-05 08:35:59 +08:00
if ( ! ( map . m_flags & EROFS_MAP_MAPPED ) ) {
iomap - > type = IOMAP_HOLE ;
iomap - > addr = IOMAP_NULL_ADDR ;
if ( ! iomap - > length )
iomap - > length = length ;
return 0 ;
}
if ( map . m_flags & EROFS_MAP_META ) {
2021-08-05 08:36:01 +08:00
struct page * ipage ;
iomap - > type = IOMAP_INLINE ;
ipage = erofs_get_meta_page ( inode - > i_sb ,
2021-10-14 16:10:10 +08:00
erofs_blknr ( mdev . m_pa ) ) ;
2021-08-05 08:36:01 +08:00
if ( IS_ERR ( ipage ) )
return PTR_ERR ( ipage ) ;
iomap - > inline_data = page_address ( ipage ) +
2021-10-14 16:10:10 +08:00
erofs_blkoff ( mdev . m_pa ) ;
2021-08-05 08:36:01 +08:00
iomap - > private = ipage ;
} else {
iomap - > type = IOMAP_MAPPED ;
2021-10-14 16:10:10 +08:00
iomap - > addr = mdev . m_pa ;
2021-08-05 08:35:59 +08:00
}
return 0 ;
}
2021-08-05 08:36:01 +08:00
static int erofs_iomap_end ( struct inode * inode , loff_t pos , loff_t length ,
ssize_t written , unsigned int flags , struct iomap * iomap )
{
struct page * ipage = iomap - > private ;
if ( ipage ) {
DBG_BUGON ( iomap - > type ! = IOMAP_INLINE ) ;
unlock_page ( ipage ) ;
put_page ( ipage ) ;
} else {
DBG_BUGON ( iomap - > type = = IOMAP_INLINE ) ;
}
return written ;
}
2021-08-05 08:35:59 +08:00
static const struct iomap_ops erofs_iomap_ops = {
. iomap_begin = erofs_iomap_begin ,
2021-08-05 08:36:01 +08:00
. iomap_end = erofs_iomap_end ,
2021-08-05 08:35:59 +08:00
} ;
2021-08-13 13:29:31 +08:00
int erofs_fiemap ( struct inode * inode , struct fiemap_extent_info * fieinfo ,
u64 start , u64 len )
{
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) ) {
# ifdef CONFIG_EROFS_FS_ZIP
return iomap_fiemap ( inode , fieinfo , start , len ,
& z_erofs_iomap_report_ops ) ;
# else
return - EOPNOTSUPP ;
# endif
}
return iomap_fiemap ( inode , fieinfo , start , len , & erofs_iomap_ops ) ;
}
2021-08-05 08:36:01 +08:00
/*
* since we dont have write or truncate flows , so no inode
* locking needs to be held at the moment .
*/
static int erofs_readpage ( struct file * file , struct page * page )
{
return iomap_readpage ( page , & erofs_iomap_ops ) ;
}
static void erofs_readahead ( struct readahead_control * rac )
{
return iomap_readahead ( rac , & erofs_iomap_ops ) ;
}
static sector_t erofs_bmap ( struct address_space * mapping , sector_t block )
{
return iomap_bmap ( mapping , block , & erofs_iomap_ops ) ;
}
2021-08-05 08:35:59 +08:00
static int erofs_prepare_dio ( struct kiocb * iocb , struct iov_iter * to )
{
struct inode * inode = file_inode ( iocb - > ki_filp ) ;
loff_t align = iocb - > ki_pos | iov_iter_count ( to ) |
iov_iter_alignment ( to ) ;
struct block_device * bdev = inode - > i_sb - > s_bdev ;
unsigned int blksize_mask ;
if ( bdev )
blksize_mask = ( 1 < < ilog2 ( bdev_logical_block_size ( bdev ) ) ) - 1 ;
else
blksize_mask = ( 1 < < inode - > i_blkbits ) - 1 ;
if ( align & blksize_mask )
return - EINVAL ;
return 0 ;
}
static ssize_t erofs_file_read_iter ( struct kiocb * iocb , struct iov_iter * to )
{
/* no need taking (shared) inode lock since it's a ro filesystem */
if ( ! iov_iter_count ( to ) )
return 0 ;
2021-08-05 08:36:00 +08:00
# ifdef CONFIG_FS_DAX
if ( IS_DAX ( iocb - > ki_filp - > f_mapping - > host ) )
return dax_iomap_rw ( iocb , to , & erofs_iomap_ops ) ;
# endif
2021-08-05 08:35:59 +08:00
if ( iocb - > ki_flags & IOCB_DIRECT ) {
int err = erofs_prepare_dio ( iocb , to ) ;
if ( ! err )
return iomap_dio_rw ( iocb , to , & erofs_iomap_ops ,
2021-07-24 12:26:41 +02:00
NULL , 0 , 0 ) ;
2021-08-05 08:35:59 +08:00
if ( err < 0 )
return err ;
}
return filemap_read ( iocb , to , 0 ) ;
}
2018-07-26 20:21:47 +08:00
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
2021-08-05 08:36:01 +08:00
. readpage = erofs_readpage ,
. readahead = erofs_readahead ,
2019-07-16 17:32:56 +08:00
. bmap = erofs_bmap ,
2021-08-05 08:35:59 +08:00
. direct_IO = noop_direct_IO ,
} ;
2021-08-05 08:36:00 +08:00
# ifdef CONFIG_FS_DAX
static vm_fault_t erofs_dax_huge_fault ( struct vm_fault * vmf ,
enum page_entry_size pe_size )
{
return dax_iomap_fault ( vmf , pe_size , NULL , NULL , & erofs_iomap_ops ) ;
}
static vm_fault_t erofs_dax_fault ( struct vm_fault * vmf )
{
return erofs_dax_huge_fault ( vmf , PE_SIZE_PTE ) ;
}
static const struct vm_operations_struct erofs_dax_vm_ops = {
. fault = erofs_dax_fault ,
. huge_fault = erofs_dax_huge_fault ,
} ;
static int erofs_file_mmap ( struct file * file , struct vm_area_struct * vma )
{
if ( ! IS_DAX ( file_inode ( file ) ) )
return generic_file_readonly_mmap ( file , vma ) ;
if ( ( vma - > vm_flags & VM_SHARED ) & & ( vma - > vm_flags & VM_MAYWRITE ) )
return - EINVAL ;
vma - > vm_ops = & erofs_dax_vm_ops ;
vma - > vm_flags | = VM_HUGEPAGE ;
return 0 ;
}
# else
# define erofs_file_mmap generic_file_readonly_mmap
# endif
2021-08-05 08:35:59 +08:00
const struct file_operations erofs_file_fops = {
. llseek = generic_file_llseek ,
. read_iter = erofs_file_read_iter ,
2021-08-05 08:36:00 +08:00
. mmap = erofs_file_mmap ,
2021-08-05 08:35:59 +08:00
. splice_read = generic_file_splice_read ,
2018-07-26 20:21:47 +08:00
} ;