2019-07-31 18:57:31 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2018-07-26 15:21:48 +03:00
/*
* Copyright ( C ) 2017 - 2018 HUAWEI , Inc .
2020-07-13 16:09:44 +03:00
* https : //www.huawei.com/
2018-07-26 15:21:48 +03:00
* Created by Gao Xiang < gaoxiang25 @ huawei . com >
*/
2018-07-26 15:21:52 +03:00
# include "xattr.h"
2018-07-26 15:21:48 +03:00
2018-07-26 15:21:55 +03:00
# include <trace/events/erofs.h>
2020-07-29 20:58:01 +03:00
/*
* if inode is successfully read , return its inode page ( or sometimes
* the inode payload page if it ' s an extended inode ) in order to fill
* inline data if possible .
*/
static struct page * erofs_read_inode ( struct inode * inode ,
unsigned int * ofs )
2018-07-26 15:21:48 +03:00
{
2020-07-29 20:58:01 +03:00
struct super_block * sb = inode - > i_sb ;
struct erofs_sb_info * sbi = EROFS_SB ( sb ) ;
2019-09-04 05:08:56 +03:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2020-07-29 20:58:01 +03:00
const erofs_off_t inode_loc = iloc ( sbi , vi - > nid ) ;
erofs_blk_t blkaddr , nblks = 0 ;
struct page * page ;
struct erofs_inode_compact * dic ;
struct erofs_inode_extended * die , * copied = NULL ;
unsigned int ifmt ;
int err ;
2019-09-04 05:08:54 +03:00
2020-07-29 20:58:01 +03:00
blkaddr = erofs_blknr ( inode_loc ) ;
* ofs = erofs_blkoff ( inode_loc ) ;
2018-07-26 15:21:48 +03:00
2020-07-29 20:58:01 +03:00
erofs_dbg ( " %s, reading inode nid %llu at %u of blkaddr %u " ,
__func__ , vi - > nid , * ofs , blkaddr ) ;
page = erofs_get_meta_page ( sb , blkaddr ) ;
if ( IS_ERR ( page ) ) {
erofs_err ( sb , " failed to get inode (nid: %llu) page, err %ld " ,
vi - > nid , PTR_ERR ( page ) ) ;
return page ;
}
2018-07-26 15:21:48 +03:00
2020-07-29 20:58:01 +03:00
dic = page_address ( page ) + * ofs ;
ifmt = le16_to_cpu ( dic - > i_format ) ;
vi - > datalayout = erofs_inode_datalayout ( ifmt ) ;
2019-09-04 05:08:54 +03:00
if ( vi - > datalayout > = EROFS_INODE_DATALAYOUT_MAX ) {
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb , " unsupported datalayout %u of nid %llu " ,
vi - > datalayout , vi - > nid ) ;
2020-07-29 20:58:01 +03:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 15:21:48 +03:00
}
2019-09-04 05:08:54 +03:00
switch ( erofs_inode_version ( ifmt ) ) {
case EROFS_INODE_LAYOUT_EXTENDED :
vi - > inode_isize = sizeof ( struct erofs_inode_extended ) ;
2020-07-29 20:58:01 +03:00
/* check if the inode acrosses page boundary */
if ( * ofs + vi - > inode_isize < = PAGE_SIZE ) {
* ofs + = vi - > inode_isize ;
die = ( struct erofs_inode_extended * ) dic ;
} else {
const unsigned int gotten = PAGE_SIZE - * ofs ;
copied = kmalloc ( vi - > inode_isize , GFP_NOFS ) ;
if ( ! copied ) {
err = - ENOMEM ;
goto err_out ;
}
memcpy ( copied , dic , gotten ) ;
unlock_page ( page ) ;
put_page ( page ) ;
page = erofs_get_meta_page ( sb , blkaddr + 1 ) ;
if ( IS_ERR ( page ) ) {
erofs_err ( sb , " failed to get inode payload page (nid: %llu), err %ld " ,
vi - > nid , PTR_ERR ( page ) ) ;
kfree ( copied ) ;
return page ;
}
* ofs = vi - > inode_isize - gotten ;
memcpy ( ( u8 * ) copied + gotten , page_address ( page ) , * ofs ) ;
die = copied ;
}
2019-09-04 05:08:54 +03:00
vi - > xattr_isize = erofs_xattr_ibody_size ( die - > i_xattr_icount ) ;
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:54 +03:00
inode - > i_mode = le16_to_cpu ( die - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( die - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 15:21:53 +03:00
inode - > i_rdev =
2019-09-04 05:08:54 +03:00
new_decode_dev ( le32_to_cpu ( die - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 15:21:53 +03:00
inode - > i_rdev = 0 ;
2019-09-04 05:08:54 +03:00
break ;
default :
2019-08-14 13:37:03 +03:00
goto bogusimode ;
2019-09-04 05:08:54 +03:00
}
i_uid_write ( inode , le32_to_cpu ( die - > i_uid ) ) ;
i_gid_write ( inode , le32_to_cpu ( die - > i_gid ) ) ;
set_nlink ( inode , le32_to_cpu ( die - > i_nlink ) ) ;
2018-07-26 15:21:48 +03:00
2020-10-31 22:51:02 +03:00
/* extended inode has its own timestamp */
inode - > i_ctime . tv_sec = le64_to_cpu ( die - > i_ctime ) ;
inode - > i_ctime . tv_nsec = le32_to_cpu ( die - > i_ctime_nsec ) ;
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:54 +03:00
inode - > i_size = le64_to_cpu ( die - > i_size ) ;
2019-05-28 06:19:43 +03:00
/* total blocks for compressed files */
2019-09-04 05:08:54 +03:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( die - > i_u . compressed_blocks ) ;
2020-07-29 20:58:01 +03:00
kfree ( copied ) ;
2019-09-04 05:08:54 +03:00
break ;
case EROFS_INODE_LAYOUT_COMPACT :
vi - > inode_isize = sizeof ( struct erofs_inode_compact ) ;
2020-07-29 20:58:01 +03:00
* ofs + = vi - > inode_isize ;
2019-09-04 05:08:54 +03:00
vi - > xattr_isize = erofs_xattr_ibody_size ( dic - > i_xattr_icount ) ;
inode - > i_mode = le16_to_cpu ( dic - > i_mode ) ;
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
case S_IFDIR :
case S_IFLNK :
vi - > raw_blkaddr = le32_to_cpu ( dic - > i_u . raw_blkaddr ) ;
break ;
case S_IFCHR :
case S_IFBLK :
2018-07-26 15:21:53 +03:00
inode - > i_rdev =
2019-09-04 05:08:54 +03:00
new_decode_dev ( le32_to_cpu ( dic - > i_u . rdev ) ) ;
break ;
case S_IFIFO :
case S_IFSOCK :
2018-07-26 15:21:53 +03:00
inode - > i_rdev = 0 ;
2019-09-04 05:08:54 +03:00
break ;
default :
2019-08-14 13:37:03 +03:00
goto bogusimode ;
2019-09-04 05:08:54 +03:00
}
i_uid_write ( inode , le16_to_cpu ( dic - > i_uid ) ) ;
i_gid_write ( inode , le16_to_cpu ( dic - > i_gid ) ) ;
set_nlink ( inode , le16_to_cpu ( dic - > i_nlink ) ) ;
2018-07-26 15:21:48 +03:00
2020-10-31 22:51:02 +03:00
/* use build time for compact inodes */
inode - > i_ctime . tv_sec = sbi - > build_time ;
inode - > i_ctime . tv_nsec = sbi - > build_time_nsec ;
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:54 +03:00
inode - > i_size = le32_to_cpu ( dic - > i_size ) ;
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) )
nblks = le32_to_cpu ( dic - > i_u . compressed_blocks ) ;
break ;
default :
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" unsupported on-disk inode version %u of nid %llu " ,
erofs_inode_version ( ifmt ) , vi - > nid ) ;
2020-07-29 20:58:01 +03:00
err = - EOPNOTSUPP ;
goto err_out ;
2018-07-26 15:21:48 +03:00
}
2020-10-31 22:51:02 +03:00
inode - > i_mtime . tv_sec = inode - > i_ctime . tv_sec ;
inode - > i_atime . tv_sec = inode - > i_ctime . tv_sec ;
inode - > i_mtime . tv_nsec = inode - > i_ctime . tv_nsec ;
inode - > i_atime . tv_nsec = inode - > i_ctime . tv_nsec ;
2019-05-28 06:19:43 +03:00
if ( ! nblks )
/* measure inode.i_blocks as generic filesystems */
inode - > i_blocks = roundup ( inode - > i_size , EROFS_BLKSIZ ) > > 9 ;
else
inode - > i_blocks = nblks < < LOG_SECTORS_PER_BLOCK ;
2020-07-29 20:58:01 +03:00
return page ;
2019-08-14 13:37:03 +03:00
bogusimode :
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb , " bogus i_mode (%o) @ nid %llu " ,
inode - > i_mode , vi - > nid ) ;
2020-07-29 20:58:01 +03:00
err = - EFSCORRUPTED ;
err_out :
2019-08-14 13:37:03 +03:00
DBG_BUGON ( 1 ) ;
2020-07-29 20:58:01 +03:00
kfree ( copied ) ;
unlock_page ( page ) ;
put_page ( page ) ;
return ERR_PTR ( err ) ;
2018-07-26 15:21:48 +03:00
}
2019-09-04 05:08:59 +03:00
static int erofs_fill_symlink ( struct inode * inode , void * data ,
unsigned int m_pofs )
2018-07-26 15:21:48 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-09-04 05:08:59 +03:00
char * lnk ;
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:59 +03:00
/* if it cannot be handled with fast symlink scheme */
if ( vi - > datalayout ! = EROFS_INODE_FLAT_INLINE | |
inode - > i_size > = PAGE_SIZE ) {
inode - > i_op = & erofs_symlink_iops ;
2018-07-26 15:21:48 +03:00
return 0 ;
2019-09-04 05:08:59 +03:00
}
2018-07-26 15:21:48 +03:00
2019-09-04 05:09:06 +03:00
lnk = kmalloc ( inode - > i_size + 1 , GFP_KERNEL ) ;
2019-09-04 05:08:59 +03:00
if ( ! lnk )
return - ENOMEM ;
2018-12-05 16:23:13 +03:00
2020-07-29 20:58:01 +03:00
m_pofs + = vi - > xattr_isize ;
2019-09-04 05:08:59 +03:00
/* inline symlink data shouldn't cross page boundary as well */
if ( m_pofs + inode - > i_size > PAGE_SIZE ) {
kfree ( lnk ) ;
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" inline data cross block boundary @ nid %llu " ,
vi - > nid ) ;
2019-09-04 05:08:59 +03:00
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:59 +03:00
memcpy ( lnk , data + m_pofs , inode - > i_size ) ;
lnk [ inode - > i_size ] = ' \0 ' ;
2018-07-26 15:21:48 +03:00
2019-09-04 05:08:59 +03:00
inode - > i_link = lnk ;
inode - > i_op = & erofs_fast_symlink_iops ;
2019-06-27 12:46:15 +03:00
return 0 ;
2018-07-26 15:21:48 +03:00
}
2019-09-04 05:09:05 +03:00
static int erofs_fill_inode ( struct inode * inode , int isdir )
2018-07-26 15:21:48 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2018-07-26 15:21:48 +03:00
struct page * page ;
2018-09-10 22:41:14 +03:00
unsigned int ofs ;
2020-07-29 20:58:01 +03:00
int err = 0 ;
2018-07-26 15:21:48 +03:00
2018-07-26 15:21:55 +03:00
trace_erofs_fill_inode ( inode , isdir ) ;
2018-07-26 15:21:48 +03:00
2020-07-29 20:58:01 +03:00
/* read inode base data from disk */
page = erofs_read_inode ( inode , & ofs ) ;
if ( IS_ERR ( page ) )
2018-07-26 15:21:48 +03:00
return PTR_ERR ( page ) ;
2019-09-04 05:09:08 +03:00
/* setup the new inode */
switch ( inode - > i_mode & S_IFMT ) {
case S_IFREG :
inode - > i_op = & erofs_generic_iops ;
inode - > i_fop = & generic_ro_fops ;
break ;
case S_IFDIR :
inode - > i_op = & erofs_dir_iops ;
inode - > i_fop = & erofs_dir_fops ;
break ;
case S_IFLNK :
2020-07-29 20:58:01 +03:00
err = erofs_fill_symlink ( inode , page_address ( page ) , ofs ) ;
2019-09-04 05:09:08 +03:00
if ( err )
2018-07-26 15:21:48 +03:00
goto out_unlock ;
2019-09-04 05:09:08 +03:00
inode_nohighmem ( inode ) ;
break ;
case S_IFCHR :
case S_IFBLK :
case S_IFIFO :
case S_IFSOCK :
inode - > i_op = & erofs_generic_iops ;
init_special_inode ( inode , inode - > i_mode , inode - > i_rdev ) ;
goto out_unlock ;
default :
err = - EFSCORRUPTED ;
goto out_unlock ;
}
2018-07-26 15:21:48 +03:00
2019-09-04 05:09:08 +03:00
if ( erofs_inode_is_data_compressed ( vi - > datalayout ) ) {
err = z_erofs_fill_inode ( inode ) ;
goto out_unlock ;
2018-07-26 15:21:48 +03:00
}
2019-09-04 05:09:08 +03:00
inode - > i_mapping - > a_ops = & erofs_raw_access_aops ;
2018-07-26 15:21:48 +03:00
out_unlock :
unlock_page ( page ) ;
put_page ( page ) ;
return err ;
}
2018-10-09 17:07:13 +03:00
/*
* erofs nid is 64 bits , but i_ino is ' unsigned long ' , therefore
* we should do more for 32 - bit platform to find the right inode .
*/
static int erofs_ilookup_test_actor ( struct inode * inode , void * opaque )
{
const erofs_nid_t nid = * ( erofs_nid_t * ) opaque ;
2019-09-04 05:08:56 +03:00
return EROFS_I ( inode ) - > nid = = nid ;
2018-10-09 17:07:13 +03:00
}
static int erofs_iget_set_actor ( struct inode * inode , void * opaque )
{
const erofs_nid_t nid = * ( erofs_nid_t * ) opaque ;
inode - > i_ino = erofs_inode_hash ( nid ) ;
return 0 ;
}
static inline struct inode * erofs_iget_locked ( struct super_block * sb ,
erofs_nid_t nid )
{
const unsigned long hashval = erofs_inode_hash ( nid ) ;
return iget5_locked ( sb , hashval , erofs_ilookup_test_actor ,
erofs_iget_set_actor , & nid ) ;
}
2018-07-26 15:21:48 +03:00
struct inode * erofs_iget ( struct super_block * sb ,
2019-03-19 02:58:41 +03:00
erofs_nid_t nid ,
bool isdir )
2018-07-26 15:21:48 +03:00
{
2018-10-09 17:07:13 +03:00
struct inode * inode = erofs_iget_locked ( sb , nid ) ;
2018-07-26 15:21:48 +03:00
2019-08-29 19:38:27 +03:00
if ( ! inode )
2018-07-26 15:21:48 +03:00
return ERR_PTR ( - ENOMEM ) ;
if ( inode - > i_state & I_NEW ) {
int err ;
2019-09-04 05:08:56 +03:00
struct erofs_inode * vi = EROFS_I ( inode ) ;
2019-03-09 20:08:53 +03:00
2018-07-26 15:21:48 +03:00
vi - > nid = nid ;
2019-09-04 05:09:05 +03:00
err = erofs_fill_inode ( inode , isdir ) ;
2019-08-29 19:38:27 +03:00
if ( ! err )
2018-07-26 15:21:48 +03:00
unlock_new_inode ( inode ) ;
else {
iget_failed ( inode ) ;
inode = ERR_PTR ( err ) ;
}
}
return inode ;
}
2019-05-28 06:19:42 +03:00
int erofs_getattr ( const struct path * path , struct kstat * stat ,
u32 request_mask , unsigned int query_flags )
{
struct inode * const inode = d_inode ( path - > dentry ) ;
2019-09-04 05:08:56 +03:00
if ( erofs_inode_is_data_compressed ( EROFS_I ( inode ) - > datalayout ) )
2019-05-28 06:19:42 +03:00
stat - > attributes | = STATX_ATTR_COMPRESSED ;
stat - > attributes | = STATX_ATTR_IMMUTABLE ;
stat - > attributes_mask | = ( STATX_ATTR_COMPRESSED |
STATX_ATTR_IMMUTABLE ) ;
generic_fillattr ( inode , stat ) ;
return 0 ;
}
2019-01-14 14:40:24 +03:00
const struct inode_operations erofs_generic_iops = {
2019-05-28 06:19:42 +03:00
. getattr = erofs_getattr ,
2018-07-26 15:21:52 +03:00
. listxattr = erofs_listxattr ,
2019-01-29 11:35:20 +03:00
. get_acl = erofs_get_acl ,
2018-07-26 15:21:52 +03:00
} ;
2019-01-14 14:40:24 +03:00
const struct inode_operations erofs_symlink_iops = {
2018-07-26 15:21:52 +03:00
. get_link = page_get_link ,
2019-05-28 06:19:42 +03:00
. getattr = erofs_getattr ,
2018-07-26 15:21:52 +03:00
. listxattr = erofs_listxattr ,
2019-01-29 11:35:20 +03:00
. get_acl = erofs_get_acl ,
2018-07-26 15:21:52 +03:00
} ;
2019-01-14 14:40:24 +03:00
const struct inode_operations erofs_fast_symlink_iops = {
2018-07-26 15:21:52 +03:00
. get_link = simple_get_link ,
2019-05-28 06:19:42 +03:00
. getattr = erofs_getattr ,
2018-07-26 15:21:52 +03:00
. listxattr = erofs_listxattr ,
2019-01-29 11:35:20 +03:00
. get_acl = erofs_get_acl ,
2019-01-14 14:40:24 +03:00
} ;
2018-07-26 15:21:52 +03:00