2019-07-31 23:57:31 +08:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 20:21:47 +08:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 15:09:44 +02:00
* https : //www.huawei.com/
2021-08-20 18:00:19 +08:00
* Copyright ( C ) 2021 , Alibaba Cloud
2018-07-26 20:21:47 +08:00
*/
# include "internal.h"
2022-04-25 20:21:39 +08:00
# include <linux/sched/mm.h>
2018-07-26 20:21:55 +08:00
# include <trace/events/erofs.h>
2022-01-02 12:00:13 +08:00
void erofs_unmap_metabuf ( struct erofs_buf * buf )
{
if ( buf - > kmap_type = = EROFS_KMAP )
2022-10-18 18:53:13 +08:00
kunmap_local ( buf - > base ) ;
2022-01-02 12:00:13 +08:00
buf - > base = NULL ;
buf - > kmap_type = EROFS_NO_KMAP ;
}
void erofs_put_metabuf ( struct erofs_buf * buf )
{
if ( ! buf - > page )
return ;
erofs_unmap_metabuf ( buf ) ;
put_page ( buf - > page ) ;
buf - > page = NULL ;
}
2023-03-13 21:53:08 +08:00
/*
* Derive the block size from inode - > i_blkbits to make compatible with
* anonymous inode in fscache mode .
*/
2023-04-07 22:17:04 +08:00
void * erofs_bread ( struct erofs_buf * buf , erofs_blk_t blkaddr ,
enum erofs_kmap_type type )
2022-01-02 12:00:13 +08:00
{
2023-04-07 22:17:04 +08:00
struct inode * inode = buf - > inode ;
2023-03-13 21:53:08 +08:00
erofs_off_t offset = ( erofs_off_t ) blkaddr < < inode - > i_blkbits ;
2022-01-02 12:00:13 +08:00
pgoff_t index = offset > > PAGE_SHIFT ;
struct page * page = buf - > page ;
2022-04-25 20:21:39 +08:00
struct folio * folio ;
unsigned int nofs_flag ;
2022-01-02 12:00:13 +08:00
if ( ! page | | page - > index ! = index ) {
erofs_put_metabuf ( buf ) ;
2022-04-25 20:21:39 +08:00
nofs_flag = memalloc_nofs_save ( ) ;
2023-04-07 22:17:04 +08:00
folio = read_cache_folio ( inode - > i_mapping , index , NULL , NULL ) ;
2022-04-25 20:21:39 +08:00
memalloc_nofs_restore ( nofs_flag ) ;
if ( IS_ERR ( folio ) )
return folio ;
2022-01-02 12:00:13 +08:00
/* should already be PageUptodate, no need to lock page */
2022-04-25 20:21:39 +08:00
page = folio_file_page ( folio , index ) ;
2022-01-02 12:00:13 +08:00
buf - > page = page ;
}
if ( buf - > kmap_type = = EROFS_NO_KMAP ) {
if ( type = = EROFS_KMAP )
2022-10-18 18:53:13 +08:00
buf - > base = kmap_local_page ( page ) ;
2022-01-02 12:00:13 +08:00
buf - > kmap_type = type ;
} else if ( buf - > kmap_type ! = type ) {
DBG_BUGON ( 1 ) ;
return ERR_PTR ( - EFAULT ) ;
}
if ( type = = EROFS_NO_KMAP )
return NULL ;
return buf - > base + ( offset & ~ PAGE_MASK ) ;
}
2023-04-07 22:17:04 +08:00
void erofs_init_metabuf ( struct erofs_buf * buf , struct super_block * sb )
2022-03-16 09:22:45 +08:00
{
2022-04-25 20:21:39 +08:00
if ( erofs_is_fscache_mode ( sb ) )
2023-04-07 22:17:04 +08:00
buf - > inode = EROFS_SB ( sb ) - > s_fscache - > inode ;
else
buf - > inode = sb - > s_bdev - > bd_inode ;
}
2022-04-25 20:21:39 +08:00
2023-04-07 22:17:04 +08:00
void * erofs_read_metabuf ( struct erofs_buf * buf , struct super_block * sb ,
erofs_blk_t blkaddr , enum erofs_kmap_type type )
{
erofs_init_metabuf ( buf , sb ) ;
return erofs_bread ( buf , blkaddr , type ) ;
2022-03-16 09:22:45 +08:00
}
2018-07-26 20:21:47 +08:00
static int erofs_map_blocks_flatmode ( struct inode * inode ,
2023-02-09 10:48:25 +08:00
struct erofs_map_blocks * map )
2018-07-26 20:21:47 +08:00
{
erofs_blk_t nblocks , lastblk ;
u64 offset = map - > m_la ;
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2023-03-13 21:53:08 +08:00
struct super_block * sb = inode - > i_sb ;
2019-09-04 10:08:54 +08:00
bool tailendpacking = ( vi - > datalayout = = EROFS_INODE_FLAT_INLINE ) ;
2018-07-26 20:21:47 +08:00
2023-03-13 21:53:08 +08:00
nblocks = erofs_iblks ( inode ) ;
2019-09-04 10:08:54 +08:00
lastblk = nblocks - tailendpacking ;
2018-07-26 20:21:47 +08:00
/* there is no hole in flatmode */
map - > m_flags = EROFS_MAP_MAPPED ;
2023-03-13 21:53:08 +08:00
if ( offset < erofs_pos ( sb , lastblk ) ) {
map - > m_pa = erofs_pos ( sb , vi - > raw_blkaddr ) + map - > m_la ;
map - > m_plen = erofs_pos ( sb , lastblk ) - offset ;
2019-09-04 10:08:54 +08:00
} else if ( tailendpacking ) {
2023-01-14 23:08:23 +08:00
map - > m_pa = erofs_iloc ( inode ) + vi - > inode_isize +
2023-03-13 21:53:08 +08:00
vi - > xattr_isize + erofs_blkoff ( sb , offset ) ;
2018-07-26 20:21:47 +08:00
map - > m_plen = inode - > i_size - offset ;
2021-12-09 09:29:18 +08:00
/* inline data should be located in the same meta block */
2023-03-13 21:53:08 +08:00
if ( erofs_blkoff ( sb , map - > m_pa ) + map - > m_plen > sb - > s_blocksize ) {
erofs_err ( sb , " inline data cross block boundary @ nid %llu " ,
2019-09-04 10:09:09 +08:00
vi - > nid ) ;
2018-09-18 22:27:28 +08:00
DBG_BUGON ( 1 ) ;
2021-12-09 09:29:18 +08:00
return - EFSCORRUPTED ;
2018-09-18 22:27:28 +08:00
}
2018-07-26 20:21:47 +08:00
map - > m_flags | = EROFS_MAP_META ;
} else {
2023-03-13 21:53:08 +08:00
erofs_err ( sb , " internal error @ nid: %llu (size %llu), m_la 0x%llx " ,
2019-09-04 10:09:09 +08:00
vi - > nid , inode - > i_size , map - > m_la ) ;
2018-09-18 22:27:28 +08:00
DBG_BUGON ( 1 ) ;
2021-12-09 09:29:18 +08:00
return - EIO ;
2018-07-26 20:21:47 +08:00
}
2021-12-09 09:29:18 +08:00
return 0 ;
2018-07-26 20:21:47 +08:00
}
2023-02-09 10:48:25 +08:00
int erofs_map_blocks ( struct inode * inode , struct erofs_map_blocks * map )
2021-08-20 18:00:19 +08:00
{
struct super_block * sb = inode - > i_sb ;
struct erofs_inode * vi = EROFS_I ( inode ) ;
struct erofs_inode_chunk_index * idx ;
2022-01-02 12:00:13 +08:00
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
2021-08-20 18:00:19 +08:00
u64 chunknr ;
unsigned int unit ;
erofs_off_t pos ;
2022-01-02 12:00:13 +08:00
void * kaddr ;
2021-08-20 18:00:19 +08:00
int err = 0 ;
2023-02-09 10:48:25 +08:00
trace_erofs_map_blocks_enter ( inode , map , 0 ) ;
2021-10-14 16:10:10 +08:00
map - > m_deviceid = 0 ;
2021-08-20 18:00:19 +08:00
if ( map - > m_la > = inode - > i_size ) {
/* leave out-of-bound access unmapped */
map - > m_flags = 0 ;
map - > m_plen = 0 ;
goto out ;
}
2021-12-09 09:29:18 +08:00
if ( vi - > datalayout ! = EROFS_INODE_CHUNK_BASED ) {
2023-02-09 10:48:25 +08:00
err = erofs_map_blocks_flatmode ( inode , map ) ;
2021-12-09 09:29:18 +08:00
goto out ;
}
2021-08-20 18:00:19 +08:00
if ( vi - > chunkformat & EROFS_CHUNK_FORMAT_INDEXES )
unit = sizeof ( * idx ) ; /* chunk index */
else
unit = EROFS_BLOCK_MAP_ENTRY_SIZE ; /* block map */
chunknr = map - > m_la > > vi - > chunkbits ;
2023-01-14 23:08:23 +08:00
pos = ALIGN ( erofs_iloc ( inode ) + vi - > inode_isize +
2021-08-20 18:00:19 +08:00
vi - > xattr_isize , unit ) + unit * chunknr ;
2023-03-13 21:53:08 +08:00
kaddr = erofs_read_metabuf ( & buf , sb , erofs_blknr ( sb , pos ) , EROFS_KMAP ) ;
2022-01-02 12:00:13 +08:00
if ( IS_ERR ( kaddr ) ) {
err = PTR_ERR ( kaddr ) ;
2021-12-09 09:29:18 +08:00
goto out ;
}
2021-08-20 18:00:19 +08:00
map - > m_la = chunknr < < vi - > chunkbits ;
map - > m_plen = min_t ( erofs_off_t , 1UL < < vi - > chunkbits ,
2023-03-13 21:53:08 +08:00
round_up ( inode - > i_size - map - > m_la , sb - > s_blocksize ) ) ;
2021-08-20 18:00:19 +08:00
/* handle block map */
if ( ! ( vi - > chunkformat & EROFS_CHUNK_FORMAT_INDEXES ) ) {
2023-03-13 21:53:08 +08:00
__le32 * blkaddr = kaddr + erofs_blkoff ( sb , pos ) ;
2021-08-20 18:00:19 +08:00
if ( le32_to_cpu ( * blkaddr ) = = EROFS_NULL_ADDR ) {
map - > m_flags = 0 ;
} else {
2023-03-13 21:53:08 +08:00
map - > m_pa = erofs_pos ( sb , le32_to_cpu ( * blkaddr ) ) ;
2021-08-20 18:00:19 +08:00
map - > m_flags = EROFS_MAP_MAPPED ;
}
goto out_unlock ;
}
/* parse chunk indexes */
2023-03-13 21:53:08 +08:00
idx = kaddr + erofs_blkoff ( sb , pos ) ;
2021-08-20 18:00:19 +08:00
switch ( le32_to_cpu ( idx - > blkaddr ) ) {
case EROFS_NULL_ADDR :
map - > m_flags = 0 ;
break ;
default :
2021-10-14 16:10:10 +08:00
map - > m_deviceid = le16_to_cpu ( idx - > device_id ) &
EROFS_SB ( sb ) - > device_id_mask ;
2023-03-13 21:53:08 +08:00
map - > m_pa = erofs_pos ( sb , le32_to_cpu ( idx - > blkaddr ) ) ;
2021-08-20 18:00:19 +08:00
map - > m_flags = EROFS_MAP_MAPPED ;
break ;
}
out_unlock :
2022-01-02 12:00:13 +08:00
erofs_put_metabuf ( & buf ) ;
2021-08-20 18:00:19 +08:00
out :
2021-12-09 09:29:18 +08:00
if ( ! err )
map - > m_llen = map - > m_plen ;
2023-02-09 10:48:25 +08:00
trace_erofs_map_blocks_exit ( inode , map , 0 , err ) ;
2021-08-20 18:00:19 +08:00
return err ;
}
2021-10-14 16:10:10 +08:00
int erofs_map_dev ( struct super_block * sb , struct erofs_map_dev * map )
{
struct erofs_dev_context * devs = EROFS_SB ( sb ) - > devs ;
struct erofs_device_info * dif ;
int id ;
map - > m_bdev = sb - > s_bdev ;
map - > m_daxdev = EROFS_SB ( sb ) - > dax_dev ;
2021-11-29 11:22:00 +01:00
map - > m_dax_part_off = EROFS_SB ( sb ) - > dax_part_off ;
2022-04-25 20:21:38 +08:00
map - > m_fscache = EROFS_SB ( sb ) - > s_fscache ;
2021-10-14 16:10:10 +08:00
if ( map - > m_deviceid ) {
down_read ( & devs - > rwsem ) ;
dif = idr_find ( & devs - > tree , map - > m_deviceid - 1 ) ;
if ( ! dif ) {
up_read ( & devs - > rwsem ) ;
return - ENODEV ;
}
2023-03-02 15:17:51 +08:00
if ( devs - > flatdev ) {
map - > m_pa + = erofs_pos ( sb , dif - > mapped_blkaddr ) ;
up_read ( & devs - > rwsem ) ;
return 0 ;
}
2024-01-23 14:26:37 +01:00
map - > m_bdev = dif - > bdev_file ? file_bdev ( dif - > bdev_file ) : NULL ;
2021-10-14 16:10:10 +08:00
map - > m_daxdev = dif - > dax_dev ;
2021-11-29 11:22:00 +01:00
map - > m_dax_part_off = dif - > dax_part_off ;
2022-04-25 20:21:38 +08:00
map - > m_fscache = dif - > fscache ;
2021-10-14 16:10:10 +08:00
up_read ( & devs - > rwsem ) ;
2023-03-02 15:17:51 +08:00
} else if ( devs - > extra_devices & & ! devs - > flatdev ) {
2021-10-14 16:10:10 +08:00
down_read ( & devs - > rwsem ) ;
idr_for_each_entry ( & devs - > tree , dif , id ) {
erofs_off_t startoff , length ;
if ( ! dif - > mapped_blkaddr )
continue ;
2023-03-13 21:53:08 +08:00
startoff = erofs_pos ( sb , dif - > mapped_blkaddr ) ;
length = erofs_pos ( sb , dif - > blocks ) ;
2021-10-14 16:10:10 +08:00
if ( map - > m_pa > = startoff & &
map - > m_pa < startoff + length ) {
map - > m_pa - = startoff ;
2024-01-23 14:26:37 +01:00
map - > m_bdev = dif - > bdev_file ?
file_bdev ( dif - > bdev_file ) : NULL ;
2021-10-14 16:10:10 +08:00
map - > m_daxdev = dif - > dax_dev ;
2021-11-29 11:22:00 +01:00
map - > m_dax_part_off = dif - > dax_part_off ;
2022-04-25 20:21:38 +08:00
map - > m_fscache = dif - > fscache ;
2021-10-14 16:10:10 +08:00
break ;
}
}
up_read ( & devs - > rwsem ) ;
}
return 0 ;
}
2021-08-05 08:35:59 +08:00
static int erofs_iomap_begin ( struct inode * inode , loff_t offset , loff_t length ,
unsigned int flags , struct iomap * iomap , struct iomap * srcmap )
{
int ret ;
2023-03-13 21:53:08 +08:00
struct super_block * sb = inode - > i_sb ;
2021-08-05 08:35:59 +08:00
struct erofs_map_blocks map ;
2021-10-14 16:10:10 +08:00
struct erofs_map_dev mdev ;
2021-08-05 08:35:59 +08:00
map . m_la = offset ;
map . m_llen = length ;
2023-02-09 10:48:25 +08:00
ret = erofs_map_blocks ( inode , & map ) ;
2021-08-05 08:35:59 +08:00
if ( ret < 0 )
return ret ;
2021-10-14 16:10:10 +08:00
mdev = ( struct erofs_map_dev ) {
. m_deviceid = map . m_deviceid ,
. m_pa = map . m_pa ,
} ;
2023-03-13 21:53:08 +08:00
ret = erofs_map_dev ( sb , & mdev ) ;
2021-10-14 16:10:10 +08:00
if ( ret )
return ret ;
2021-08-05 08:35:59 +08:00
iomap - > offset = map . m_la ;
2022-01-13 13:18:45 +08:00
if ( flags & IOMAP_DAX )
2021-11-29 11:22:00 +01:00
iomap - > dax_dev = mdev . m_daxdev ;
2022-01-13 13:18:45 +08:00
else
2021-11-29 11:22:00 +01:00
iomap - > bdev = mdev . m_bdev ;
2021-08-05 08:35:59 +08:00
iomap - > length = map . m_llen ;
iomap - > flags = 0 ;
2021-08-05 08:36:01 +08:00
iomap - > private = NULL ;
2021-08-05 08:35:59 +08:00
if ( ! ( map . m_flags & EROFS_MAP_MAPPED ) ) {
iomap - > type = IOMAP_HOLE ;
iomap - > addr = IOMAP_NULL_ADDR ;
if ( ! iomap - > length )
iomap - > length = length ;
return 0 ;
}
if ( map . m_flags & EROFS_MAP_META ) {
2022-01-02 12:00:13 +08:00
void * ptr ;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
2021-08-05 08:36:01 +08:00
iomap - > type = IOMAP_INLINE ;
2023-03-13 21:53:08 +08:00
ptr = erofs_read_metabuf ( & buf , sb ,
erofs_blknr ( sb , mdev . m_pa ) , EROFS_KMAP ) ;
2022-01-02 12:00:13 +08:00
if ( IS_ERR ( ptr ) )
return PTR_ERR ( ptr ) ;
2023-03-13 21:53:08 +08:00
iomap - > inline_data = ptr + erofs_blkoff ( sb , mdev . m_pa ) ;
2022-01-02 12:00:13 +08:00
iomap - > private = buf . base ;
2021-08-05 08:36:01 +08:00
} else {
iomap - > type = IOMAP_MAPPED ;
2021-10-14 16:10:10 +08:00
iomap - > addr = mdev . m_pa ;
2022-01-13 13:18:45 +08:00
if ( flags & IOMAP_DAX )
iomap - > addr + = mdev . m_dax_part_off ;
2021-08-05 08:35:59 +08:00
}
return 0 ;
}
2021-08-05 08:36:01 +08:00
static int erofs_iomap_end ( struct inode * inode , loff_t pos , loff_t length ,
ssize_t written , unsigned int flags , struct iomap * iomap )
{
2022-01-02 12:00:13 +08:00
void * ptr = iomap - > private ;
if ( ptr ) {
struct erofs_buf buf = {
. page = kmap_to_page ( ptr ) ,
. base = ptr ,
. kmap_type = EROFS_KMAP ,
} ;
2021-08-05 08:36:01 +08:00
DBG_BUGON ( iomap - > type ! = IOMAP_INLINE ) ;
2022-01-02 12:00:13 +08:00
erofs_put_metabuf ( & buf ) ;
2021-08-05 08:36:01 +08:00
} else {
DBG_BUGON ( iomap - > type = = IOMAP_INLINE ) ;
}
return written ;
}
2021-08-05 08:35:59 +08:00
static const struct iomap_ops erofs_iomap_ops = {
. iomap_begin = erofs_iomap_begin ,
2021-08-05 08:36:01 +08:00
. iomap_end = erofs_iomap_end ,
2021-08-05 08:35:59 +08:00
} ;
2021-08-13 13:29:31 +08:00
int erofs_fiemap ( struct inode * inode , struct fiemap_extent_info * fieinfo ,
u64 start , u64 len )
{
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) ) {
# ifdef CONFIG_EROFS_FS_ZIP
return iomap_fiemap ( inode , fieinfo , start , len ,
& z_erofs_iomap_report_ops ) ;
# else
return - EOPNOTSUPP ;
# endif
}
return iomap_fiemap ( inode , fieinfo , start , len , & erofs_iomap_ops ) ;
}
2021-08-05 08:36:01 +08:00
/*
* since we dont have write or truncate flows , so no inode
* locking needs to be held at the moment .
*/
2022-04-29 08:54:32 -04:00
static int erofs_read_folio ( struct file * file , struct folio * folio )
2021-08-05 08:36:01 +08:00
{
2022-04-29 08:54:32 -04:00
return iomap_read_folio ( folio , & erofs_iomap_ops ) ;
2021-08-05 08:36:01 +08:00
}
static void erofs_readahead ( struct readahead_control * rac )
{
return iomap_readahead ( rac , & erofs_iomap_ops ) ;
}
static sector_t erofs_bmap ( struct address_space * mapping , sector_t block )
{
return iomap_bmap ( mapping , block , & erofs_iomap_ops ) ;
}
2022-07-20 16:22:29 +08:00
static ssize_t erofs_file_read_iter ( struct kiocb * iocb , struct iov_iter * to )
2021-08-05 08:35:59 +08:00
{
struct inode * inode = file_inode ( iocb - > ki_filp ) ;
/* no need taking (shared) inode lock since it's a ro filesystem */
if ( ! iov_iter_count ( to ) )
return 0 ;
2021-08-05 08:36:00 +08:00
# ifdef CONFIG_FS_DAX
2022-07-20 16:22:29 +08:00
if ( IS_DAX ( inode ) )
2021-08-05 08:36:00 +08:00
return dax_iomap_rw ( iocb , to , & erofs_iomap_ops ) ;
# endif
2021-08-05 08:35:59 +08:00
if ( iocb - > ki_flags & IOCB_DIRECT ) {
2022-07-20 16:22:29 +08:00
struct block_device * bdev = inode - > i_sb - > s_bdev ;
unsigned int blksize_mask ;
if ( bdev )
blksize_mask = bdev_logical_block_size ( bdev ) - 1 ;
else
2023-03-06 15:55:27 +08:00
blksize_mask = i_blocksize ( inode ) - 1 ;
2022-07-20 16:22:29 +08:00
if ( ( iocb - > ki_pos | iov_iter_count ( to ) |
iov_iter_alignment ( to ) ) & blksize_mask )
return - EINVAL ;
2021-08-05 08:35:59 +08:00
2022-07-20 16:22:29 +08:00
return iomap_dio_rw ( iocb , to , & erofs_iomap_ops ,
NULL , 0 , NULL , 0 ) ;
2021-08-05 08:35:59 +08:00
}
return filemap_read ( iocb , to , 0 ) ;
}
2018-07-26 20:21:47 +08:00
/* for uncompressed (aligned) files and raw access for other files */
const struct address_space_operations erofs_raw_access_aops = {
2022-04-29 08:54:32 -04:00
. read_folio = erofs_read_folio ,
2021-08-05 08:36:01 +08:00
. readahead = erofs_readahead ,
2019-07-16 17:32:56 +08:00
. bmap = erofs_bmap ,
2021-08-05 08:35:59 +08:00
. direct_IO = noop_direct_IO ,
2022-11-30 14:04:55 +08:00
. release_folio = iomap_release_folio ,
. invalidate_folio = iomap_invalidate_folio ,
2021-08-05 08:35:59 +08:00
} ;
2021-08-05 08:36:00 +08:00
# ifdef CONFIG_FS_DAX
static vm_fault_t erofs_dax_huge_fault ( struct vm_fault * vmf ,
2023-08-18 21:23:35 +01:00
unsigned int order )
2021-08-05 08:36:00 +08:00
{
2023-08-18 21:23:35 +01:00
return dax_iomap_fault ( vmf , order , NULL , NULL , & erofs_iomap_ops ) ;
2021-08-05 08:36:00 +08:00
}
static vm_fault_t erofs_dax_fault ( struct vm_fault * vmf )
{
2023-08-18 21:23:35 +01:00
return erofs_dax_huge_fault ( vmf , 0 ) ;
2021-08-05 08:36:00 +08:00
}
static const struct vm_operations_struct erofs_dax_vm_ops = {
. fault = erofs_dax_fault ,
. huge_fault = erofs_dax_huge_fault ,
} ;
static int erofs_file_mmap ( struct file * file , struct vm_area_struct * vma )
{
if ( ! IS_DAX ( file_inode ( file ) ) )
return generic_file_readonly_mmap ( file , vma ) ;
if ( ( vma - > vm_flags & VM_SHARED ) & & ( vma - > vm_flags & VM_MAYWRITE ) )
return - EINVAL ;
vma - > vm_ops = & erofs_dax_vm_ops ;
2023-01-26 11:37:49 -08:00
vm_flags_set ( vma , VM_HUGEPAGE ) ;
2021-08-05 08:36:00 +08:00
return 0 ;
}
# else
# define erofs_file_mmap generic_file_readonly_mmap
# endif
2021-08-05 08:35:59 +08:00
const struct file_operations erofs_file_fops = {
. llseek = generic_file_llseek ,
. read_iter = erofs_file_read_iter ,
2021-08-05 08:36:00 +08:00
. mmap = erofs_file_mmap ,
2024-03-06 13:31:38 +08:00
. get_unmapped_area = thp_get_unmapped_area ,
2023-05-22 14:50:15 +01:00
. splice_read = filemap_splice_read ,
2018-07-26 20:21:47 +08:00
} ;