2019-07-31 23:57:31 +08:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 20:21:48 +08:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 15:09:44 +02:00
* https : //www.huawei.com/
2021-08-20 18:00:19 +08:00
* Copyright ( C ) 2021 , Alibaba Cloud
2018-07-26 20:21:48 +08:00
*/
2018-07-26 20:21:52 +08:00
# include "xattr.h"
2018-07-26 20:21:48 +08:00
2018-07-26 20:21:55 +08:00
# include <trace/events/erofs.h>
2022-01-02 12:00:14 +08:00
static void * erofs_read_inode ( struct erofs_buf * buf ,
struct inode * inode , unsigned int * ofs )
2018-07-26 20:21:48 +08:00
{
2020-07-30 01:58:01 +08:00
struct super_block * sb = inode - > i_sb ;
struct erofs_sb_info * sbi = EROFS_SB ( sb ) ;
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2020-07-30 01:58:01 +08:00
const erofs_off_t inode_loc = iloc ( sbi , vi - > nid ) ;
erofs_blk_t blkaddr , nblks = 0 ;
2022-01-02 12:00:14 +08:00
void * kaddr ;
2020-07-30 01:58:01 +08:00
struct erofs_inode_compact * dic ;
struct erofs_inode_extended * die , * copied = NULL ;
unsigned int ifmt ;
int err ;
2019-09-04 10:08:54 +08:00
2020-07-30 01:58:01 +08:00
blkaddr = erofs_blknr ( inode_loc ) ;
* ofs = erofs_blkoff ( inode_loc ) ;
2018-07-26 20:21:48 +08:00
2020-07-30 01:58:01 +08:00
erofs_dbg ( " %s, reading inode nid %llu at %u of blkaddr %u " ,
__func__ , vi - > nid , * ofs , blkaddr ) ;
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_metabuf ( buf , sb , blkaddr , EROFS_KMAP ) ;
if ( IS_ERR ( kaddr ) ) {
2020-07-30 01:58:01 +08:00
erofs_err ( sb , " failed to get inode (nid: %llu) page, err %ld " ,
2022-01-02 12:00:14 +08:00
vi - > nid , PTR_ERR ( kaddr ) ) ;
return kaddr ;
2020-07-30 01:58:01 +08:00
}
2018-07-26 20:21:48 +08:00
2022-01-02 12:00:14 +08:00
dic = kaddr + * ofs ;
2020-07-30 01:58:01 +08:00
ifmt = le16_to_cpu ( dic - > i_format ) ;
2021-03-29 08:36:14 +08:00
if ( ifmt & ~ EROFS_I_ALL ) {
erofs_err ( inode - > i_sb , " unsupported i_format %u of nid %llu " ,
ifmt , vi - > nid ) ;
err = - EOPNOTSUPP ;
goto err_out ;
}
2020-07-30 01:58:01 +08:00
vi - > datalayout = erofs_inode_datalayout ( ifmt ) ;
2019-09-04 10:08:54 +08:00
if ( vi - > datalayout > = EROFS_INODE_DATALAYOUT_MAX ) {
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb , " unsupported datalayout %u of nid %llu " ,
vi - > datalayout , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 20:21:48 +08:00
}
2019-09-04 10:08:54 +08:00
switch ( erofs_inode_version ( ifmt ) ) {
case EROFS_INODE_LAYOUT_EXTENDED :
vi - > inode_isize = sizeof ( struct erofs_inode_extended ) ;
2022-01-02 12:00:14 +08:00
/* check if the extended inode acrosses block boundary */
if ( * ofs + vi - > inode_isize < = EROFS_BLKSIZ ) {
2020-07-30 01:58:01 +08:00
* ofs + = vi - > inode_isize ;
die = ( struct erofs_inode_extended * ) dic ;
} else {
2022-01-02 12:00:14 +08:00
const unsigned int gotten = EROFS_BLKSIZ - * ofs ;
2020-07-30 01:58:01 +08:00
copied = kmalloc ( vi - > inode_isize , GFP_NOFS ) ;
if ( ! copied ) {
err = - ENOMEM ;
goto err_out ;
}
memcpy ( copied , dic , gotten ) ;
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_metabuf ( buf , sb , blkaddr + 1 ,
EROFS_KMAP ) ;
if ( IS_ERR ( kaddr ) ) {
erofs_err ( sb , " failed to get inode payload block (nid: %llu), err %ld " ,
vi - > nid , PTR_ERR ( kaddr ) ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2022-01-02 12:00:14 +08:00
return kaddr ;
2020-07-30 01:58:01 +08:00
}
* ofs = vi - > inode_isize - gotten ;
2022-01-02 12:00:14 +08:00
memcpy ( ( u8 * ) copied + gotten , kaddr , * ofs ) ;
2020-07-30 01:58:01 +08:00
die = copied ;
}
2019-09-04 10:08:54 +08:00
vi - > xattr_isize = erofs_xattr_ibody_size ( die - > i_xattr_icount ) ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_mode = le16_to_cpu ( die - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( die - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev =
2019-09-04 10:08:54 +08:00
new_decode_dev ( le32_to_cpu ( die - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev = 0 ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-08-14 18:37:03 +08:00
goto bogusimode ;
2019-09-04 10:08:54 +08:00
}
i_uid_write ( inode , le32_to_cpu ( die - > i_uid ) ) ;
i_gid_write ( inode , le32_to_cpu ( die - > i_gid ) ) ;
set_nlink ( inode , le32_to_cpu ( die - > i_nlink ) ) ;
2018-07-26 20:21:48 +08:00
2020-11-01 03:51:02 +08:00
/* extended inode has its own timestamp */
2022-03-17 19:49:59 +08:00
inode - > i_ctime . tv_sec = le64_to_cpu ( die - > i_mtime ) ;
inode - > i_ctime . tv_nsec = le32_to_cpu ( die - > i_mtime_nsec ) ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_size = le64_to_cpu ( die - > i_size ) ;
2019-05-28 11:19:43 +08:00
/* total blocks for compressed files */
2019-09-04 10:08:54 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( die - > i_u . compressed_blocks ) ;
2021-08-20 18:00:19 +08:00
else if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED )
/* fill chunked inode summary info */
vi - > chunkformat = le16_to_cpu ( die - > i_u . c . format ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2021-08-25 20:07:57 +08:00
copied = NULL ;
2019-09-04 10:08:54 +08:00
break ;
case EROFS_INODE_LAYOUT_COMPACT :
vi - > inode_isize = sizeof ( struct erofs_inode_compact ) ;
2020-07-30 01:58:01 +08:00
* ofs + = vi - > inode_isize ;
2019-09-04 10:08:54 +08:00
vi - > xattr_isize = erofs_xattr_ibody_size ( dic - > i_xattr_icount ) ;
inode - > i_mode = le16_to_cpu ( dic - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( dic - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev =
2019-09-04 10:08:54 +08:00
new_decode_dev ( le32_to_cpu ( dic - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 20:21:53 +08:00
inode - > i_rdev = 0 ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-08-14 18:37:03 +08:00
goto bogusimode ;
2019-09-04 10:08:54 +08:00
}
i_uid_write ( inode , le16_to_cpu ( dic - > i_uid ) ) ;
i_gid_write ( inode , le16_to_cpu ( dic - > i_gid ) ) ;
set_nlink ( inode , le16_to_cpu ( dic - > i_nlink ) ) ;
2018-07-26 20:21:48 +08:00
2020-11-01 03:51:02 +08:00
/* use build time for compact inodes */
inode - > i_ctime . tv_sec = sbi - > build_time ;
inode - > i_ctime . tv_nsec = sbi - > build_time_nsec ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:54 +08:00
inode - > i_size = le32_to_cpu ( dic - > i_size ) ;
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( dic - > i_u . compressed_blocks ) ;
2021-08-20 18:00:19 +08:00
else if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED )
vi - > chunkformat = le16_to_cpu ( dic - > i_u . c . format ) ;
2019-09-04 10:08:54 +08:00
break ;
default :
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" unsupported on-disk inode version %u of nid %llu " ,
erofs_inode_version ( ifmt ) , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 20:21:48 +08:00
}
2021-08-20 18:00:19 +08:00
if ( vi - > datalayout = = EROFS_INODE_CHUNK_BASED ) {
2021-09-22 17:51:41 +08:00
if ( vi - > chunkformat & ~ EROFS_CHUNK_FORMAT_ALL ) {
2021-08-20 18:00:19 +08:00
erofs_err ( inode - > i_sb ,
" unsupported chunk format %x of nid %llu " ,
vi - > chunkformat , vi - > nid ) ;
err = - EOPNOTSUPP ;
goto err_out ;
}
vi - > chunkbits = LOG_BLOCK_SIZE +
( vi - > chunkformat & EROFS_CHUNK_FORMAT_BLKBITS_MASK ) ;
}
2020-11-01 03:51:02 +08:00
inode - > i_mtime . tv_sec = inode - > i_ctime . tv_sec ;
inode - > i_atime . tv_sec = inode - > i_ctime . tv_sec ;
inode - > i_mtime . tv_nsec = inode - > i_ctime . tv_nsec ;
inode - > i_atime . tv_nsec = inode - > i_ctime . tv_nsec ;
2021-08-05 08:36:00 +08:00
inode - > i_flags & = ~ S_DAX ;
2021-10-07 15:02:23 +08:00
if ( test_opt ( & sbi - > opt , DAX_ALWAYS ) & & S_ISREG ( inode - > i_mode ) & &
2021-08-05 08:36:00 +08:00
vi - > datalayout = = EROFS_INODE_FLAT_PLAIN )
inode - > i_flags | = S_DAX ;
2019-05-28 11:19:43 +08:00
if ( ! nblks )
/* measure inode.i_blocks as generic filesystems */
inode - > i_blocks = roundup ( inode - > i_size , EROFS_BLKSIZ ) > > 9 ;
else
inode - > i_blocks = nblks < < LOG_SECTORS_PER_BLOCK ;
2022-01-02 12:00:14 +08:00
return kaddr ;
2019-08-14 18:37:03 +08:00
bogusimode :
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb , " bogus i_mode (%o) @ nid %llu " ,
inode - > i_mode , vi - > nid ) ;
2020-07-30 01:58:01 +08:00
err = - EFSCORRUPTED ;
err_out :
2019-08-14 18:37:03 +08:00
DBG_BUGON ( 1 ) ;
2020-07-30 01:58:01 +08:00
kfree ( copied ) ;
2022-01-02 12:00:14 +08:00
erofs_put_metabuf ( buf ) ;
2020-07-30 01:58:01 +08:00
return ERR_PTR ( err ) ;
2018-07-26 20:21:48 +08:00
}
2022-01-02 12:00:14 +08:00
static int erofs_fill_symlink ( struct inode * inode , void * kaddr ,
2019-09-04 10:08:59 +08:00
unsigned int m_pofs )
2018-07-26 20:21:48 +08:00
{
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-09-04 10:08:59 +08:00
char * lnk ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:59 +08:00
/* if it cannot be handled with fast symlink scheme */
if ( vi - > datalayout ! = EROFS_INODE_FLAT_INLINE | |
2022-01-02 12:00:14 +08:00
inode - > i_size > = EROFS_BLKSIZ ) {
2019-09-04 10:08:59 +08:00
inode - > i_op = & erofs_symlink_iops ;
2018-07-26 20:21:48 +08:00
return 0 ;
2019-09-04 10:08:59 +08:00
}
2018-07-26 20:21:48 +08:00
2019-09-04 10:09:06 +08:00
lnk = kmalloc ( inode - > i_size + 1 , GFP_KERNEL ) ;
2019-09-04 10:08:59 +08:00
if ( ! lnk )
return - ENOMEM ;
2018-12-05 21:23:13 +08:00
2020-07-30 01:58:01 +08:00
m_pofs + = vi - > xattr_isize ;
2022-01-02 12:00:14 +08:00
/* inline symlink data shouldn't cross block boundary */
if ( m_pofs + inode - > i_size > EROFS_BLKSIZ ) {
2019-09-04 10:08:59 +08:00
kfree ( lnk ) ;
2019-09-04 10:09:09 +08:00
erofs_err ( inode - > i_sb ,
" inline data cross block boundary @ nid %llu " ,
vi - > nid ) ;
2019-09-04 10:08:59 +08:00
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
2022-01-02 12:00:14 +08:00
memcpy ( lnk , kaddr + m_pofs , inode - > i_size ) ;
2019-09-04 10:08:59 +08:00
lnk [ inode - > i_size ] = ' \0 ' ;
2018-07-26 20:21:48 +08:00
2019-09-04 10:08:59 +08:00
inode - > i_link = lnk ;
inode - > i_op = & erofs_fast_symlink_iops ;
2019-06-27 17:46:15 +08:00
return 0 ;
2018-07-26 20:21:48 +08:00
}
2019-09-04 10:09:05 +08:00
static int erofs_fill_inode ( struct inode * inode , int isdir )
2018-07-26 20:21:48 +08:00
{
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2022-01-02 12:00:14 +08:00
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
void * kaddr ;
2018-09-10 21:41:14 +02:00
unsigned int ofs ;
2020-07-30 01:58:01 +08:00
int err = 0 ;
2018-07-26 20:21:48 +08:00
2018-07-26 20:21:55 +08:00
trace_erofs_fill_inode ( inode , isdir ) ;
2018-07-26 20:21:48 +08:00
2020-07-30 01:58:01 +08:00
/* read inode base data from disk */
2022-01-02 12:00:14 +08:00
kaddr = erofs_read_inode ( & buf , inode , & ofs ) ;
if ( IS_ERR ( kaddr ) )
return PTR_ERR ( kaddr ) ;
2019-09-04 10:09:08 +08:00
/* setup the new inode */
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
inode - > i_op = & erofs_generic_iops ;
2021-08-05 08:35:59 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
inode - > i_fop = & generic_ro_fops ;
else
inode - > i_fop = & erofs_file_fops ;
2019-09-04 10:09:08 +08:00
break ;
case S_IFDIR :
inode - > i_op = & erofs_dir_iops ;
inode - > i_fop = & erofs_dir_fops ;
break ;
case S_IFLNK :
2022-01-02 12:00:14 +08:00
err = erofs_fill_symlink ( inode , kaddr , ofs ) ;
2019-09-04 10:09:08 +08:00
if ( err )
2018-07-26 20:21:48 +08:00
goto out_unlock ;
2019-09-04 10:09:08 +08:00
inode_nohighmem ( inode ) ;
break ;
case S_IFCHR :
case S_IFBLK :
case S_IFIFO :
case S_IFSOCK :
inode - > i_op = & erofs_generic_iops ;
init_special_inode ( inode , inode - > i_mode , inode - > i_rdev ) ;
goto out_unlock ;
default :
err = - EFSCORRUPTED ;
goto out_unlock ;
}
2018-07-26 20:21:48 +08:00
2019-09-04 10:09:08 +08:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) ) {
err = z_erofs_fill_inode ( inode ) ;
goto out_unlock ;
2018-07-26 20:21:48 +08:00
}
2019-09-04 10:09:08 +08:00
inode - > i_mapping - > a_ops = & erofs_raw_access_aops ;
2018-07-26 20:21:48 +08:00
out_unlock :
2022-01-02 12:00:14 +08:00
erofs_put_metabuf ( & buf ) ;
2018-07-26 20:21:48 +08:00
return err ;
}
2018-10-09 22:07:13 +08:00
/*
* erofs nid is 64 bits , but i_ino is ' unsigned long ' , therefore
* we should do more for 32 - bit platform to find the right inode .
*/
static int erofs_ilookup_test_actor ( struct inode * inode , void * opaque )
{
const erofs_nid_t nid = * ( erofs_nid_t * ) opaque ;
2019-09-04 10:08:56 +08:00
return EROFS_I ( inode ) - > nid = = nid ;
2018-10-09 22:07:13 +08:00
}
static int erofs_iget_set_actor ( struct inode * inode , void * opaque )
{
const erofs_nid_t nid = * ( erofs_nid_t * ) opaque ;
inode - > i_ino = erofs_inode_hash ( nid ) ;
return 0 ;
}
static inline struct inode * erofs_iget_locked ( struct super_block * sb ,
erofs_nid_t nid )
{
const unsigned long hashval = erofs_inode_hash ( nid ) ;
return iget5_locked ( sb , hashval , erofs_ilookup_test_actor ,
erofs_iget_set_actor , & nid ) ;
}
2018-07-26 20:21:48 +08:00
struct inode * erofs_iget ( struct super_block * sb ,
2019-03-18 20:58:41 -03:00
erofs_nid_t nid ,
bool isdir )
2018-07-26 20:21:48 +08:00
{
2018-10-09 22:07:13 +08:00
struct inode * inode = erofs_iget_locked ( sb , nid ) ;
2018-07-26 20:21:48 +08:00
2019-08-30 00:38:27 +08:00
if ( ! inode )
2018-07-26 20:21:48 +08:00
return ERR_PTR ( - ENOMEM ) ;
if ( inode - > i_state & I_NEW ) {
int err ;
2019-09-04 10:08:56 +08:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-03-09 14:08:53 -03:00
2018-07-26 20:21:48 +08:00
vi - > nid = nid ;
2019-09-04 10:09:05 +08:00
err = erofs_fill_inode ( inode , isdir ) ;
2019-08-30 00:38:27 +08:00
if ( ! err )
2018-07-26 20:21:48 +08:00
unlock_new_inode ( inode ) ;
else {
iget_failed ( inode ) ;
inode = ERR_PTR ( err ) ;
}
}
return inode ;
}
2021-01-21 14:19:43 +01:00
int erofs_getattr ( struct user_namespace * mnt_userns , const struct path * path ,
struct kstat * stat , u32 request_mask ,
unsigned int query_flags )
2019-05-28 11:19:42 +08:00
{
struct inode * const inode = d_inode ( path - > dentry ) ;
2019-09-04 10:08:56 +08:00
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) )
2019-05-28 11:19:42 +08:00
stat - > attributes | = STATX_ATTR_COMPRESSED ;
stat - > attributes | = STATX_ATTR_IMMUTABLE ;
stat - > attributes_mask | = ( STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE ) ;
2022-05-17 18:41:03 +08:00
generic_fillattr ( mnt_userns , inode , stat ) ;
2019-05-28 11:19:42 +08:00
return 0 ;
}
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_generic_iops = {
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2019-01-29 16:35:20 +08:00
. get_acl = erofs_get_acl ,
2021-08-13 13:29:31 +08:00
. fiemap = erofs_fiemap ,
2018-07-26 20:21:52 +08:00
} ;
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_symlink_iops = {
2018-07-26 20:21:52 +08:00
. get_link = page_get_link ,
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2019-01-29 16:35:20 +08:00
. get_acl = erofs_get_acl ,
2018-07-26 20:21:52 +08:00
} ;
2019-01-14 19:40:24 +08:00
const struct inode_operations erofs_fast_symlink_iops = {
2018-07-26 20:21:52 +08:00
. get_link = simple_get_link ,
2019-05-28 11:19:42 +08:00
. getattr = erofs_getattr ,
2018-07-26 20:21:52 +08:00
. listxattr = erofs_listxattr ,
2019-01-29 16:35:20 +08:00
. get_acl = erofs_get_acl ,
2019-01-14 19:40:24 +08:00
} ;