2019-07-31 23:57:31 +08:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 20:21:48 +08:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 15:09:44 +02:00
* https : //www.huawei.com/
2021-08-20 18:00:19 +08:00
* Copyright ( C ) 2021 , Alibaba Cloud
2018-07-26 20:21:48 +08:00
*/
2018-07-26 20:21:52 +08:00
# include "xattr.h"
2018-07-26 20:21:48 +08:00
2018-07-26 20:21:55 +08:00
# include <trace/events/erofs.h>
2022-01-02 12:00:14 +08:00
static void * erofs_read_inode ( struct erofs_buf * buf ,
struct inode * inode , unsigned int * ofs )
2018-07-26 20:21:48 +08:00
{
2020-07-30 01:58:01 +08:00
struct super_block * sb = inode - > i_sb ;
struct erofs_sb_info * sbi = EROFS_SB ( sb ) ;
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2023-01-14 23:08:23 +08:00
const erofs_off_t inode_loc = erofs_iloc ( inode ) ;
2020-07-30 01:58:01 +08:00
erofs_blk_t blkaddr , nblks = 0 ;
2022-01-02 12:00:14 +08:00
void * kaddr ;
2020-07-30 01:58:01 +08:00
struct erofs_inode_compact * dic ;
struct erofs_inode_extended * die , * copied = NULL ;
unsigned int ifmt ;
int err ;
2019-09-04 10:08:54 +08:00
2023-03-13 21:53:08 +08:00
blkaddr = erofs_blknr ( sb , inode_loc ) ;
* ofs = erofs_blkoff ( sb , inode_loc ) ;
2018-07-26 20:21:48 +08:00
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_metabuf ( buf , sb , blkaddr , EROFS_KMAP ) ;
if ( IS_ERR ( kaddr ) ) {
2020-07-30 01:58:01 +08:00
erofs_err ( sb , " failed to get inode (nid: %llu) page, err %ld " ,
2022-01-02 12:00:14 +08:00
vi - > nid , PTR_ERR ( kaddr ) ) ;
return kaddr ;
2020-07-30 01:58:01 +08:00
}
2018-07-26 20:21:48 +08:00
2022-01-02 12:00:14 +08:00
dic = kaddr + * ofs ;
2020-07-30 01:58:01 +08:00
ifmt = le16_to_cpu ( dic - > i_format ) ;
2021-03-29 08:36:14 +08:00
if ( ifmt & ~ EROFS_I_ALL ) {
erofs_err ( inode - > i_sb , " unsupported i_format %u of nid %llu " ,
ifmt , vi - > nid ) ;
err = - EOPNOTSUPP ;
goto err_out ;
}
2020-07-30 01:58:01 +08:00
vi - > datalayout = erofs_inode_datalayout ( ifmt ) ;
2019-09-04 10:08:54 +08:00
if ( vi - > datalayout > = EROFS_INODE_DATALAYOUT_MAX ) {
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb , " unsupported datalayout %u of nid %llu " ,
vi - > datalayout , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 20:21:48 +08:00
}
2019-09-04 10:08:54 +08:00
switch ( erofs_inode_version ( ifmt ) ) {
case EROFS_INODE_LAYOUT_EXTENDED :
vi - > inode_isize = sizeof ( struct erofs_inode_extended ) ;
2022-01-02 12:00:14 +08:00
/* check if the extended inode acrosses block boundary */
2023-03-13 21:53:08 +08:00
if ( * ofs + vi - > inode_isize < = sb - > s_blocksize ) {
2020-07-30 01:58:01 +08:00
* ofs + = vi - > inode_isize ;
die = ( struct erofs_inode_extended * ) dic ;
} else {
2023-03-13 21:53:08 +08:00
const unsigned int gotten = sb - > s_blocksize - * ofs ;
2020-07-30 01:58:01 +08:00
copied = kmalloc ( vi - > inode_isize , GFP_NOFS ) ;
if ( ! copied ) {
err = - ENOMEM ;
goto err_out ;
}
memcpy ( copied , dic , gotten ) ;
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_metabuf ( buf , sb , blkaddr + 1 ,
EROFS_KMAP ) ;
if ( IS_ERR ( kaddr ) ) {
erofs_err ( sb , " failed to get inode payload block (nid: %llu), err %ld " ,
vi - > nid , PTR_ERR ( kaddr ) ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2022-01-02 12:00:14 +08:00
return kaddr ;
2020-07-30 01:58:01 +08:00
}
* ofs = vi - > inode_isize - gotten ;
2022-01-02 12:00:14 +08:00
memcpy ( ( u8 * ) copied + gotten , kaddr , * ofs ) ;
2020-07-30 01:58:01 +08:00
die = copied ;
}
2019-09-04 10:08:54 +08:00
vi - > xattr_isize = erofs_xattr_ibody_size ( die - > i_xattr_icount ) ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_mode = le16_to_cpu ( die - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( die - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev =
2019-09-04 10:08:54 +08:00
new_decode_dev ( le32_to_cpu ( die - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev = 0 ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-08-14 18:37:03 +08:00
goto bogusimode ;
2019-09-04 10:08:54 +08:00
}
i_uid_write ( inode , le32_to_cpu ( die - > i_uid ) ) ;
i_gid_write ( inode , le32_to_cpu ( die - > i_gid ) ) ;
set_nlink ( inode , le32_to_cpu ( die - > i_nlink ) ) ;
2018-07-26 20:21:48 +08:00
2020-11-01 03:51:02 +08:00
/* extended inode has its own timestamp */
2023-07-05 15:01:04 -04:00
inode_set_ctime ( inode , le64_to_cpu ( die - > i_mtime ) ,
le32_to_cpu ( die - > i_mtime_nsec ) ) ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_size = le64_to_cpu ( die - > i_size ) ;
2019-05-28 11:19:43 +08:00
/* total blocks for compressed files */
2019-09-04 10:08:54 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( die - > i_u . compressed_blocks ) ;
2021-08-20 18:00:19 +08:00
else if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED )
/* fill chunked inode summary info */
vi - > chunkformat = le16_to_cpu ( die - > i_u . c . format ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2021-08-25 20:07:57 +08:00
copied = NULL ;
2019-09-04 10:08:54 +08:00
break ;
case EROFS_INODE_LAYOUT_COMPACT :
vi - > inode_isize = sizeof ( struct erofs_inode_compact ) ;
2020-07-30 01:58:01 +08:00
* ofs + = vi - > inode_isize ;
2019-09-04 10:08:54 +08:00
vi - > xattr_isize = erofs_xattr_ibody_size ( dic - > i_xattr_icount ) ;
inode - > i_mode = le16_to_cpu ( dic - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( dic - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev =
2019-09-04 10:08:54 +08:00
new_decode_dev ( le32_to_cpu ( dic - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev = 0 ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-08-14 18:37:03 +08:00
goto bogusimode ;
2019-09-04 10:08:54 +08:00
}
i_uid_write ( inode , le16_to_cpu ( dic - > i_uid ) ) ;
i_gid_write ( inode , le16_to_cpu ( dic - > i_gid ) ) ;
set_nlink ( inode , le16_to_cpu ( dic - > i_nlink ) ) ;
2018-07-26 20:21:48 +08:00
2020-11-01 03:51:02 +08:00
/* use build time for compact inodes */
2023-07-05 15:01:04 -04:00
inode_set_ctime ( inode , sbi - > build_time , sbi - > build_time_nsec ) ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_size = le32_to_cpu ( dic - > i_size ) ;
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( dic - > i_u . compressed_blocks ) ;
2021-08-20 18:00:19 +08:00
else if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED )
vi - > chunkformat = le16_to_cpu ( dic - > i_u . c . format ) ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" unsupported on-disk inode version %u of nid %llu " ,
erofs_inode_version ( ifmt ) , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 20:21:48 +08:00
}
2021-08-20 18:00:19 +08:00
if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED ) {
2021-09-22 17:51:41 +08:00
if ( vi - > chunkformat & ~ EROFS_CHUNK_FORMAT_ALL ) {
2021-08-20 18:00:19 +08:00
erofs_err ( inode - > i_sb ,
" unsupported chunk format %x of nid %llu " ,
vi - > chunkformat , vi - > nid ) ;
err = - EOPNOTSUPP ;
goto err_out ;
}
2023-03-13 21:53:08 +08:00
vi - > chunkbits = sb - > s_blocksize_bits +
2021-08-20 18:00:19 +08:00
( vi - > chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK ) ;
}
2023-10-04 14:52:17 -04:00
inode_set_mtime_to_ts ( inode ,
inode_set_atime_to_ts ( inode , inode_get_ctime ( inode ) ) ) ;
2020-11-01 03:51:02 +08:00
2021-08-05 08:36:00 +08:00
inode - > i_flags & = ~ S_DAX ;
2021-10-07 15:02:23 +08:00
if ( test_opt ( & sbi - > opt , DAX_ALWAYS ) & & S_ISREG ( inode - > i_mode ) & &
2023-07-11 14:21:30 +08:00
( vi - > datalayout = = EROFS_INODE_FLAT_PLAIN | |
vi - > datalayout = = EROFS_INODE_CHUNK_BASED ) )
2021-08-05 08:36:00 +08:00
inode - > i_flags | = S_DAX ;
2023-03-13 21:53:08 +08:00
2019-05-28 11:19:43 +08:00
if ( ! nblks )
/* measure inode.i_blocks as generic filesystems */
2023-03-13 21:53:08 +08:00
inode - > i_blocks = round_up ( inode - > i_size , sb - > s_blocksize ) > > 9 ;
2019-05-28 11:19:43 +08:00
else
2023-03-13 21:53:08 +08:00
inode - > i_blocks = nblks < < ( sb - > s_blocksize_bits - 9 ) ;
2022-01-02 12:00:14 +08:00
return kaddr ;
2019-08-14 18:37:03 +08:00
bogusimode :
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb , " bogus i_mode (%o) @ nid %llu " ,
inode - > i_mode , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EFSCORRUPTED ;
err_out :
2019-08-14 18:37:03 +08:00
DBG_BUGON ( 1 ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2022-01-02 12:00:14 +08:00
erofs_put_metabuf ( buf ) ;
2020-07-30 01:58:01 +08:00
return ERR_PTR ( err ) ;
2018-07-26 20:21:48 +08:00
}
2022-01-02 12:00:14 +08:00
static int erofs_fill_symlink ( struct inode * inode , void * kaddr ,
2019-09-04 10:08:59 +08:00
unsigned int m_pofs )
2018-07-26 20:21:48 +08:00
{
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2023-03-13 21:53:08 +08:00
unsigned int bsz = i_blocksize ( inode ) ;
2019-09-04 10:08:59 +08:00
char * lnk ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:59 +08:00
/* if it cannot be handled with fast symlink scheme */
if ( vi - > datalayout ! = EROFS_INODE_FLAT_INLINE | |
2023-03-13 21:53:08 +08:00
inode - > i_size > = bsz | | inode - > i_size < 0 ) {
2019-09-04 10:08:59 +08:00
inode - > i_op = & erofs_symlink_iops ;
2018-07-26 20:21:48 +08:00
return 0 ;
2019-09-04 10:08:59 +08:00
}
2018-07-26 20:21:48 +08:00
2019-09-04 10:09:06 +08:00
lnk = kmalloc ( inode - > i_size + 1 , GFP_KERNEL ) ;
2019-09-04 10:08:59 +08:00
if ( ! lnk )
return - ENOMEM ;
2018-12-05 21:23:13 +08:00
2020-07-30 01:58:01 +08:00
m_pofs + = vi - > xattr_isize ;
2022-01-02 12:00:14 +08:00
/* inline symlink data shouldn't cross block boundary */
2023-03-13 21:53:08 +08:00
if ( m_pofs + inode - > i_size > bsz ) {
2019-09-04 10:08:59 +08:00
kfree ( lnk ) ;
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" inline data cross block boundary @ nid %llu " ,
vi - > nid ) ;
2019-09-04 10:08:59 +08:00
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
2022-01-02 12:00:14 +08:00
memcpy ( lnk , kaddr + m_pofs , inode - > i_size ) ;
2019-09-04 10:08:59 +08:00
lnk [ inode - > i_size ] = ' \0 ' ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:59 +08:00
inode - > i_link = lnk ;
inode - > i_op = & erofs_fast_symlink_iops ;
2019-06-27 17:46:15 +08:00
return 0 ;
2018-07-26 20:21:48 +08:00
}
2022-09-27 14:36:07 +08:00
static int erofs_fill_inode ( struct inode * inode )
2018-07-26 20:21:48 +08:00
{
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2022-01-02 12:00:14 +08:00
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
void * kaddr ;
2018-09-10 21:41:14 +02:00
unsigned int ofs ;
2020-07-30 01:58:01 +08:00
int err = 0 ;
2018-07-26 20:21:48 +08:00
2022-09-27 14:36:07 +08:00
trace_erofs_fill_inode ( inode ) ;
2018-07-26 20:21:48 +08:00
2020-07-30 01:58:01 +08:00
/* read inode base data from disk */
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_inode ( & buf , inode , & ofs ) ;
if ( IS_ERR ( kaddr ) )
return PTR_ERR ( kaddr ) ;
2019-09-04 10:09:08 +08:00
/* setup the new inode */
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
inode - > i_op = & erofs_generic_iops ;
2021-08-05 08:35:59 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
inode - > i_fop = & generic_ro_fops ;
else
inode - > i_fop = & erofs_file_fops ;
2019-09-04 10:09:08 +08:00
break ;
case S_IFDIR :
inode - > i_op = & erofs_dir_iops ;
inode - > i_fop = & erofs_dir_fops ;
2022-10-18 18:53:13 +08:00
inode_nohighmem ( inode ) ;
2019-09-04 10:09:08 +08:00
break ;
case S_IFLNK :
2022-01-02 12:00:14 +08:00
err = erofs_fill_symlink ( inode , kaddr , ofs ) ;
2019-09-04 10:09:08 +08:00
if ( err )
2018-07-26 20:21:48 +08:00
goto out_unlock ;
2019-09-04 10:09:08 +08:00
inode_nohighmem ( inode ) ;
break ;
case S_IFCHR :
case S_IFBLK :
case S_IFIFO :
case S_IFSOCK :
inode - > i_op = & erofs_generic_iops ;
init_special_inode ( inode , inode - > i_mode , inode - > i_rdev ) ;
goto out_unlock ;
default :
err = - EFSCORRUPTED ;
goto out_unlock ;
}
2018-07-26 20:21:48 +08:00
2019-09-04 10:09:08 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) ) {
2023-04-13 17:22:41 +08:00
# ifdef CONFIG_EROFS_FS_ZIP
2023-03-13 21:53:09 +08:00
if ( ! erofs_is_fscache_mode ( inode - > i_sb ) & &
2023-04-13 17:22:41 +08:00
inode - > i_sb - > s_blocksize_bits = = PAGE_SHIFT ) {
inode - > i_mapping - > a_ops = & z_erofs_aops ;
err = 0 ;
goto out_unlock ;
}
# endif
err = - EOPNOTSUPP ;
2019-09-04 10:09:08 +08:00
goto out_unlock ;
2018-07-26 20:21:48 +08:00
}
2019-09-04 10:09:08 +08:00
inode - > i_mapping - > a_ops = & erofs_raw_access_aops ;
2022-12-01 15:42:56 +08:00
mapping_set_large_folios ( inode - > i_mapping ) ;
2022-04-25 20:21:40 +08:00
# ifdef CONFIG_EROFS_FS_ONDEMAND
if ( erofs_is_fscache_mode ( inode - > i_sb ) )
inode - > i_mapping - > a_ops = & erofs_fscache_access_aops ;
# endif
2018-07-26 20:21:48 +08:00
out_unlock :
2022-01-02 12:00:14 +08:00
erofs_put_metabuf ( & buf ) ;
2018-07-26 20:21:48 +08:00
return err ;
}
2018-10-09 22:07:13 +08:00
/*
2023-01-13 14:52:25 +08:00
* ino_t is 32 - bits on 32 - bit arch . We have to squash the 64 - bit value down
* so that it will fit .
2018-10-09 22:07:13 +08:00
*/
2023-01-13 14:52:25 +08:00
static ino_t erofs_squash_ino ( erofs_nid_t nid )
2018-10-09 22:07:13 +08:00
{
2023-01-13 14:52:25 +08:00
ino_t ino = ( ino_t ) nid ;
if ( sizeof ( ino_t ) < sizeof ( erofs_nid_t ) )
ino ^ = nid > > ( sizeof ( erofs_nid_t ) - sizeof ( ino_t ) ) * 8 ;
return ino ;
}
2018-10-09 22:07:13 +08:00
2023-01-13 14:52:25 +08:00
static int erofs_iget5_eq ( struct inode * inode , void * opaque )
{
return EROFS_I ( inode ) - > nid = = * ( erofs_nid_t * ) opaque ;
2018-10-09 22:07:13 +08:00
}
2023-01-13 14:52:25 +08:00
static int erofs_iget5_set ( struct inode * inode , void * opaque )
2018-10-09 22:07:13 +08:00
{
const erofs_nid_t nid = * ( erofs_nid_t * ) opaque ;
2023-01-13 14:52:25 +08:00
inode - > i_ino = erofs_squash_ino ( nid ) ;
EROFS_I ( inode ) - > nid = nid ;
2018-10-09 22:07:13 +08:00
return 0 ;
}
2022-09-27 14:36:07 +08:00
struct inode * erofs_iget ( struct super_block * sb , erofs_nid_t nid )
2018-10-09 22:07:13 +08:00
{
2022-09-27 14:36:07 +08:00
struct inode * inode ;
2018-10-09 22:07:13 +08:00
2023-01-13 14:52:25 +08:00
inode = iget5_locked ( sb , erofs_squash_ino ( nid ) , erofs_iget5_eq ,
erofs_iget5_set , & nid ) ;
2019-08-30 00:38:27 +08:00
if ( ! inode )
2018-07-26 20:21:48 +08:00
return ERR_PTR ( - ENOMEM ) ;
if ( inode - > i_state & I_NEW ) {
2023-01-13 14:52:25 +08:00
int err = erofs_fill_inode ( inode ) ;
2018-07-26 20:21:48 +08:00
2023-01-13 14:52:25 +08:00
if ( err ) {
2018-07-26 20:21:48 +08:00
iget_failed ( inode ) ;
2023-01-13 14:52:25 +08:00
return ERR_PTR ( err ) ;
2018-07-26 20:21:48 +08:00
}
2023-01-13 14:52:25 +08:00
unlock_new_inode ( inode ) ;
2018-07-26 20:21:48 +08:00
}
return inode ;
}
2023-01-13 12:49:12 +01:00
int erofs_getattr ( struct mnt_idmap * idmap , const struct path * path ,
2021-01-21 14:19:43 +01:00
struct kstat * stat , u32 request_mask ,
unsigned int query_flags )
2019-05-28 11:19:42 +08:00
{
struct inode * const inode = d_inode ( path - > dentry ) ;
2019-09-04 10:08:56 +08:00
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) )
2019-05-28 11:19:42 +08:00
stat - > attributes | = STATX_ATTR_COMPRESSED ;
stat - > attributes | = STATX_ATTR_IMMUTABLE ;
stat - > attributes_mask | = ( STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE ) ;
2023-08-07 15:38:33 -04:00
generic_fillattr ( idmap , request_mask , inode , stat ) ;
2019-05-28 11:19:42 +08:00
return 0 ;
}
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_generic_iops = {
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2022-09-22 17:17:00 +02:00
. get_inode_acl = erofs_get_acl ,
2021-08-13 13:29:31 +08:00
. fiemap = erofs_fiemap ,
2018-07-26 20:21:52 +08:00
} ;
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_symlink_iops = {
2018-07-26 20:21:52 +08:00
. get_link = page_get_link ,
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2022-09-22 17:17:00 +02:00
. get_inode_acl = erofs_get_acl ,
2018-07-26 20:21:52 +08:00
} ;
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_fast_symlink_iops = {
2018-07-26 20:21:52 +08:00
. get_link = simple_get_link ,
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2022-09-22 17:17:00 +02:00
. get_inode_acl = erofs_get_acl ,
2019-01-14 19:40:24 +08:00
} ;