2019-07-31 18:57:31 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2019-06-24 10:22:52 +03:00
/*
* Copyright ( C ) 2018 - 2019 HUAWEI , Inc .
2020-07-13 16:09:44 +03:00
* https : //www.huawei.com/
2019-06-24 10:22:52 +03:00
*/
# include "internal.h"
# include <asm/unaligned.h>
# include <trace/events/erofs.h>
2021-12-28 08:46:04 +03:00
static int z_erofs_do_map_blocks ( struct inode * inode ,
struct erofs_map_blocks * map ,
int flags ) ;
2019-06-24 10:22:52 +03:00
int z_erofs_fill_inode ( struct inode * inode )
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( inode ) ;
2021-04-07 07:39:24 +03:00
struct erofs_sb_info * sbi = EROFS_SB ( inode - > i_sb ) ;
2019-06-24 10:22:52 +03:00
2021-04-07 07:39:24 +03:00
if ( ! erofs_sb_has_big_pcluster ( sbi ) & &
2021-12-28 08:46:04 +03:00
! erofs_sb_has_ztailpacking ( sbi ) & &
2021-04-07 07:39:24 +03:00
vi - > datalayout = = EROFS_INODE_FLAT_COMPRESSION_LEGACY ) {
2019-06-24 10:22:52 +03:00
vi - > z_advise = 0 ;
vi - > z_algorithmtype [ 0 ] = 0 ;
vi - > z_algorithmtype [ 1 ] = 0 ;
2019-07-31 18:57:48 +03:00
vi - > z_logical_clusterbits = LOG_BLOCK_SIZE ;
2019-09-04 05:08:56 +03:00
set_bit ( EROFS_I_Z_INITED_BIT , & vi - > flags ) ;
2019-06-24 10:22:52 +03:00
}
2019-11-08 06:37:33 +03:00
inode - > i_mapping - > a_ops = & z_erofs_aops ;
2019-06-24 10:22:52 +03:00
return 0 ;
}
2019-11-08 06:37:33 +03:00
static int z_erofs_fill_inode_lazy ( struct inode * inode )
2019-06-24 10:22:52 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( inode ) ;
2019-06-24 10:22:52 +03:00
struct super_block * const sb = inode - > i_sb ;
2021-10-17 19:57:21 +03:00
int err , headnr ;
2019-06-24 10:22:52 +03:00
erofs_off_t pos ;
2022-01-02 07:00:17 +03:00
struct erofs_buf buf = __EROFS_BUF_INITIALIZER ;
2019-06-24 10:22:52 +03:00
void * kaddr ;
struct z_erofs_map_header * h ;
2021-02-09 16:06:18 +03:00
if ( test_bit ( EROFS_I_Z_INITED_BIT , & vi - > flags ) ) {
/*
* paired with smp_mb ( ) at the end of the function to ensure
* fields will only be observed after the bit is set .
*/
smp_mb ( ) ;
2019-06-24 10:22:52 +03:00
return 0 ;
2021-02-09 16:06:18 +03:00
}
2019-06-24 10:22:52 +03:00
2019-09-04 05:08:56 +03:00
if ( wait_on_bit_lock ( & vi - > flags , EROFS_I_BL_Z_BIT , TASK_KILLABLE ) )
2019-06-24 10:22:52 +03:00
return - ERESTARTSYS ;
err = 0 ;
2019-09-04 05:08:56 +03:00
if ( test_bit ( EROFS_I_Z_INITED_BIT , & vi - > flags ) )
2019-06-24 10:22:52 +03:00
goto out_unlock ;
2021-04-07 07:39:24 +03:00
DBG_BUGON ( ! erofs_sb_has_big_pcluster ( EROFS_SB ( sb ) ) & &
2021-12-28 08:46:04 +03:00
! erofs_sb_has_ztailpacking ( EROFS_SB ( sb ) ) & &
2021-04-07 07:39:24 +03:00
vi - > datalayout = = EROFS_INODE_FLAT_COMPRESSION_LEGACY ) ;
2019-06-24 10:22:52 +03:00
pos = ALIGN ( iloc ( EROFS_SB ( sb ) , vi - > nid ) + vi - > inode_isize +
vi - > xattr_isize , 8 ) ;
2022-01-02 07:00:17 +03:00
kaddr = erofs_read_metabuf ( & buf , sb , erofs_blknr ( pos ) ,
EROFS_KMAP_ATOMIC ) ;
if ( IS_ERR ( kaddr ) ) {
err = PTR_ERR ( kaddr ) ;
2019-06-24 10:22:52 +03:00
goto out_unlock ;
}
h = kaddr + erofs_blkoff ( pos ) ;
vi - > z_advise = le16_to_cpu ( h - > h_advise ) ;
vi - > z_algorithmtype [ 0 ] = h - > h_algorithmtype & 15 ;
vi - > z_algorithmtype [ 1 ] = h - > h_algorithmtype > > 4 ;
2021-10-17 19:57:21 +03:00
headnr = 0 ;
if ( vi - > z_algorithmtype [ 0 ] > = Z_EROFS_COMPRESSION_MAX | |
vi - > z_algorithmtype [ + + headnr ] > = Z_EROFS_COMPRESSION_MAX ) {
erofs_err ( sb , " unknown HEAD%u format %u for nid %llu, please upgrade kernel " ,
headnr + 1 , vi - > z_algorithmtype [ headnr ] , vi - > nid ) ;
2019-08-14 13:37:05 +03:00
err = - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
goto unmap_done ;
}
vi - > z_logical_clusterbits = LOG_BLOCK_SIZE + ( h - > h_clusterbits & 7 ) ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
if ( ! erofs_sb_has_big_pcluster ( EROFS_SB ( sb ) ) & &
vi - > z_advise & ( Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2 ) ) {
erofs_err ( sb , " per-inode big pcluster without sb feature for nid %llu " ,
vi - > nid ) ;
err = - EFSCORRUPTED ;
goto unmap_done ;
}
if ( vi - > datalayout = = EROFS_INODE_FLAT_COMPRESSION & &
! ( vi - > z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1 ) ^
! ( vi - > z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2 ) ) {
erofs_err ( sb , " big pcluster head1/2 of compact indexes should be consistent for nid %llu " ,
vi - > nid ) ;
err = - EFSCORRUPTED ;
goto unmap_done ;
}
2019-06-24 10:22:52 +03:00
unmap_done :
2022-01-02 07:00:17 +03:00
erofs_put_metabuf ( & buf ) ;
2021-12-28 08:46:04 +03:00
if ( err )
goto out_unlock ;
if ( vi - > z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER ) {
2022-01-02 07:00:17 +03:00
struct erofs_map_blocks map = {
. buf = __EROFS_BUF_INITIALIZER
} ;
2021-12-28 08:46:04 +03:00
vi - > z_idata_size = le16_to_cpu ( h - > h_idata_size ) ;
err = z_erofs_do_map_blocks ( inode , & map ,
EROFS_GET_BLOCKS_FINDTAIL ) ;
2022-01-02 07:00:17 +03:00
erofs_put_metabuf ( & map . buf ) ;
2021-12-28 08:46:04 +03:00
if ( ! map . m_plen | |
erofs_blkoff ( map . m_pa ) + map . m_plen > EROFS_BLKSIZ ) {
erofs_err ( sb , " invalid tail-packing pclustersize %llu " ,
map . m_plen ) ;
err = - EFSCORRUPTED ;
}
if ( err < 0 )
goto out_unlock ;
}
/* paired with smp_mb() at the beginning of the function */
smp_mb ( ) ;
set_bit ( EROFS_I_Z_INITED_BIT , & vi - > flags ) ;
2019-06-24 10:22:52 +03:00
out_unlock :
2019-09-04 05:08:56 +03:00
clear_and_wake_up_bit ( EROFS_I_BL_Z_BIT , & vi - > flags ) ;
2019-06-24 10:22:52 +03:00
return err ;
}
struct z_erofs_maprecorder {
struct inode * inode ;
struct erofs_map_blocks * map ;
void * kaddr ;
unsigned long lcn ;
/* compression extent information gathered */
2021-10-08 23:08:37 +03:00
u8 type , headtype ;
2019-06-24 10:22:52 +03:00
u16 clusterofs ;
u16 delta [ 2 ] ;
2021-04-07 07:39:24 +03:00
erofs_blk_t pblk , compressedlcs ;
2021-12-28 08:46:04 +03:00
erofs_off_t nextpackoff ;
2019-06-24 10:22:52 +03:00
} ;
static int z_erofs_reload_indexes ( struct z_erofs_maprecorder * m ,
erofs_blk_t eblk )
{
struct super_block * const sb = m - > inode - > i_sb ;
2022-01-02 07:00:17 +03:00
m - > kaddr = erofs_read_metabuf ( & m - > map - > buf , sb , eblk ,
EROFS_KMAP_ATOMIC ) ;
if ( IS_ERR ( m - > kaddr ) )
return PTR_ERR ( m - > kaddr ) ;
2019-06-24 10:22:52 +03:00
return 0 ;
}
2019-11-08 06:37:33 +03:00
static int legacy_load_cluster_from_disk ( struct z_erofs_maprecorder * m ,
unsigned long lcn )
2019-06-24 10:22:52 +03:00
{
struct inode * const inode = m - > inode ;
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( inode ) ;
2019-06-24 10:22:52 +03:00
const erofs_off_t ibase = iloc ( EROFS_I_SB ( inode ) , vi - > nid ) ;
const erofs_off_t pos =
Z_EROFS_VLE_LEGACY_INDEX_ALIGN ( ibase + vi - > inode_isize +
vi - > xattr_isize ) +
lcn * sizeof ( struct z_erofs_vle_decompressed_index ) ;
struct z_erofs_vle_decompressed_index * di ;
unsigned int advise , type ;
int err ;
err = z_erofs_reload_indexes ( m , erofs_blknr ( pos ) ) ;
if ( err )
return err ;
2021-12-28 08:46:04 +03:00
m - > nextpackoff = pos + sizeof ( struct z_erofs_vle_decompressed_index ) ;
2019-06-24 10:22:52 +03:00
m - > lcn = lcn ;
di = m - > kaddr + erofs_blkoff ( pos ) ;
advise = le16_to_cpu ( di - > di_advise ) ;
type = ( advise > > Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT ) &
( ( 1 < < Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS ) - 1 ) ;
switch ( type ) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD :
m - > clusterofs = 1 < < vi - > z_logical_clusterbits ;
m - > delta [ 0 ] = le16_to_cpu ( di - > di_u . delta [ 0 ] ) ;
2021-04-07 07:39:24 +03:00
if ( m - > delta [ 0 ] & Z_EROFS_VLE_DI_D0_CBLKCNT ) {
2021-10-17 19:57:21 +03:00
if ( ! ( vi - > z_advise & ( Z_EROFS_ADVISE_BIG_PCLUSTER_1 |
Z_EROFS_ADVISE_BIG_PCLUSTER_2 ) ) ) {
2021-04-07 07:39:24 +03:00
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
m - > compressedlcs = m - > delta [ 0 ] &
~ Z_EROFS_VLE_DI_D0_CBLKCNT ;
m - > delta [ 0 ] = 1 ;
}
2019-06-24 10:22:52 +03:00
m - > delta [ 1 ] = le16_to_cpu ( di - > di_u . delta [ 1 ] ) ;
break ;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
2021-10-17 19:57:21 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 :
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 :
2019-06-24 10:22:52 +03:00
m - > clusterofs = le16_to_cpu ( di - > di_clusterofs ) ;
m - > pblk = le32_to_cpu ( di - > di_u . blkaddr ) ;
break ;
default :
DBG_BUGON ( 1 ) ;
2019-08-14 13:37:04 +03:00
return - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
}
m - > type = type ;
return 0 ;
}
static unsigned int decode_compactedbits ( unsigned int lobits ,
unsigned int lomask ,
u8 * in , unsigned int pos , u8 * type )
{
const unsigned int v = get_unaligned_le32 ( in + pos / 8 ) > > ( pos & 7 ) ;
const unsigned int lo = v & lomask ;
* type = ( v > > lobits ) & 3 ;
return lo ;
}
2021-08-18 18:22:31 +03:00
static int get_compacted_la_distance ( unsigned int lclusterbits ,
unsigned int encodebits ,
unsigned int vcnt , u8 * in , int i )
{
const unsigned int lomask = ( 1 < < lclusterbits ) - 1 ;
unsigned int lo , d1 = 0 ;
u8 type ;
DBG_BUGON ( i > = vcnt ) ;
do {
lo = decode_compactedbits ( lclusterbits , lomask ,
in , encodebits * i , & type ) ;
if ( type ! = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD )
return d1 ;
+ + d1 ;
} while ( + + i < vcnt ) ;
/* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */
if ( ! ( lo & Z_EROFS_VLE_DI_D0_CBLKCNT ) )
d1 + = lo - 1 ;
return d1 ;
}
2019-06-24 10:22:52 +03:00
static int unpack_compacted_index ( struct z_erofs_maprecorder * m ,
unsigned int amortizedshift ,
2021-12-28 08:46:04 +03:00
erofs_off_t pos , bool lookahead )
2019-06-24 10:22:52 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( m - > inode ) ;
2019-06-24 10:22:52 +03:00
const unsigned int lclusterbits = vi - > z_logical_clusterbits ;
const unsigned int lomask = ( 1 < < lclusterbits ) - 1 ;
2021-12-28 08:46:04 +03:00
unsigned int vcnt , base , lo , encodebits , nblk , eofs ;
2019-06-24 10:22:52 +03:00
int i ;
u8 * in , type ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
bool big_pcluster ;
2019-06-24 10:22:52 +03:00
if ( 1 < < amortizedshift = = 4 )
vcnt = 2 ;
else if ( 1 < < amortizedshift = = 2 & & lclusterbits = = 12 )
vcnt = 16 ;
else
2019-08-14 13:37:05 +03:00
return - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
2021-12-28 08:46:04 +03:00
/* it doesn't equal to round_up(..) */
m - > nextpackoff = round_down ( pos , vcnt < < amortizedshift ) +
( vcnt < < amortizedshift ) ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
big_pcluster = vi - > z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1 ;
2019-06-24 10:22:52 +03:00
encodebits = ( ( vcnt < < amortizedshift ) - sizeof ( __le32 ) ) * 8 / vcnt ;
2021-12-28 08:46:04 +03:00
eofs = erofs_blkoff ( pos ) ;
2019-06-24 10:22:52 +03:00
base = round_down ( eofs , vcnt < < amortizedshift ) ;
in = m - > kaddr + base ;
i = ( eofs - base ) > > amortizedshift ;
lo = decode_compactedbits ( lclusterbits , lomask ,
in , encodebits * i , & type ) ;
m - > type = type ;
if ( type = = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD ) {
m - > clusterofs = 1 < < lclusterbits ;
2021-08-18 18:22:31 +03:00
/* figure out lookahead_distance: delta[1] if needed */
if ( lookahead )
m - > delta [ 1 ] = get_compacted_la_distance ( lclusterbits ,
encodebits , vcnt , in , i ) ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
if ( lo & Z_EROFS_VLE_DI_D0_CBLKCNT ) {
if ( ! big_pcluster ) {
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
m - > compressedlcs = lo & ~ Z_EROFS_VLE_DI_D0_CBLKCNT ;
m - > delta [ 0 ] = 1 ;
return 0 ;
} else if ( i + 1 ! = ( int ) vcnt ) {
2019-06-24 10:22:52 +03:00
m - > delta [ 0 ] = lo ;
return 0 ;
}
/*
* since the last lcluster in the pack is special ,
* of which lo saves delta [ 1 ] rather than delta [ 0 ] .
* Hence , get delta [ 0 ] by the previous lcluster indirectly .
*/
lo = decode_compactedbits ( lclusterbits , lomask ,
in , encodebits * ( i - 1 ) , & type ) ;
if ( type ! = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD )
lo = 0 ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
else if ( lo & Z_EROFS_VLE_DI_D0_CBLKCNT )
lo = 1 ;
2019-06-24 10:22:52 +03:00
m - > delta [ 0 ] = lo + 1 ;
return 0 ;
}
m - > clusterofs = lo ;
m - > delta [ 0 ] = 0 ;
/* figout out blkaddr (pblk) for HEAD lclusters */
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
if ( ! big_pcluster ) {
nblk = 1 ;
while ( i > 0 ) {
- - i ;
lo = decode_compactedbits ( lclusterbits , lomask ,
in , encodebits * i , & type ) ;
if ( type = = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD )
i - = lo ;
if ( i > = 0 )
+ + nblk ;
}
} else {
nblk = 0 ;
while ( i > 0 ) {
- - i ;
lo = decode_compactedbits ( lclusterbits , lomask ,
in , encodebits * i , & type ) ;
if ( type = = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD ) {
if ( lo & Z_EROFS_VLE_DI_D0_CBLKCNT ) {
- - i ;
nblk + = lo & ~ Z_EROFS_VLE_DI_D0_CBLKCNT ;
continue ;
}
/* bigpcluster shouldn't have plain d0 == 1 */
if ( lo < = 1 ) {
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
i - = lo - 2 ;
continue ;
}
2019-06-24 10:22:52 +03:00
+ + nblk ;
erofs: support parsing big pcluster compact indexes
Different from non-compact indexes, several lclusters are packed
as the compact form at once and an unique base blkaddr is stored for
each pack, so each lcluster index would take less space on avarage
(e.g. 2 bytes for COMPACT_2B.) btw, that is also why BIG_PCLUSTER
switch should be consistent for compact head0/1.
Prior to big pcluster, the size of all pclusters was 1 lcluster.
Therefore, when a new HEAD lcluster was scanned, blkaddr would be
bumped by 1 lcluster. However, that way doesn't work anymore for
big pcluster since we actually don't know the compressed size of
pclusters in advance (before reading CBLKCNT lcluster).
So, instead, let blkaddr of each pack be the first pcluster blkaddr
with a valid CBLKCNT, in detail,
1) if CBLKCNT starts at the pack, this first valid pcluster is
itself, e.g.
_____________________________________________________________
|_CBLKCNT0_|_NONHEAD_| .. |_HEAD_|_CBLKCNT1_| ... |_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0 ^ += CBLKCNT1
2) if CBLKCNT doesn't start at the pack, the first valid pcluster
is the next pcluster, e.g.
_________________________________________________________
| NONHEAD_| .. |_HEAD_|_CBLKCNT0_| ... |_HEAD_|_HEAD_| ...
^ = blkaddr base ^ += CBLKCNT0
^ += 1
When a CBLKCNT is found, blkaddr will be increased by CBLKCNT
lclusters, or a new HEAD is found immediately, bump blkaddr by 1
instead (see the picture above.)
Also noted if CBLKCNT is the end of the pack, instead of storing
delta1 (distance of the next HEAD lcluster) as normal NONHEADs,
it still uses the compressed block count (delta0) since delta1
can be calculated indirectly but the block count can't.
Adjust decoding logic to fit big pcluster compact indexes as well.
Link: https://lore.kernel.org/r/20210407043927.10623-9-xiang@kernel.org
Acked-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <hsiangkao@redhat.com>
2021-04-07 07:39:25 +03:00
}
2019-06-24 10:22:52 +03:00
}
in + = ( vcnt < < amortizedshift ) - sizeof ( __le32 ) ;
m - > pblk = le32_to_cpu ( * ( __le32 * ) in ) + nblk ;
return 0 ;
}
static int compacted_load_cluster_from_disk ( struct z_erofs_maprecorder * m ,
2021-08-18 18:22:31 +03:00
unsigned long lcn , bool lookahead )
2019-06-24 10:22:52 +03:00
{
struct inode * const inode = m - > inode ;
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( inode ) ;
2019-06-24 10:22:52 +03:00
const unsigned int lclusterbits = vi - > z_logical_clusterbits ;
const erofs_off_t ebase = ALIGN ( iloc ( EROFS_I_SB ( inode ) , vi - > nid ) +
vi - > inode_isize + vi - > xattr_isize , 8 ) +
sizeof ( struct z_erofs_map_header ) ;
const unsigned int totalidx = DIV_ROUND_UP ( inode - > i_size , EROFS_BLKSIZ ) ;
unsigned int compacted_4b_initial , compacted_2b ;
unsigned int amortizedshift ;
erofs_off_t pos ;
int err ;
if ( lclusterbits ! = 12 )
2019-08-14 13:37:05 +03:00
return - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
if ( lcn > = totalidx )
return - EINVAL ;
m - > lcn = lcn ;
/* used to align to 32-byte (compacted_2b) alignment */
compacted_4b_initial = ( 32 - ebase % 32 ) / 4 ;
if ( compacted_4b_initial = = 32 / 4 )
compacted_4b_initial = 0 ;
2021-09-14 06:59:15 +03:00
if ( ( vi - > z_advise & Z_EROFS_ADVISE_COMPACTED_2B ) & &
compacted_4b_initial < totalidx )
2019-06-24 10:22:52 +03:00
compacted_2b = rounddown ( totalidx - compacted_4b_initial , 16 ) ;
else
compacted_2b = 0 ;
pos = ebase ;
if ( lcn < compacted_4b_initial ) {
amortizedshift = 2 ;
goto out ;
}
pos + = compacted_4b_initial * 4 ;
lcn - = compacted_4b_initial ;
if ( lcn < compacted_2b ) {
amortizedshift = 1 ;
goto out ;
}
pos + = compacted_2b * 2 ;
lcn - = compacted_2b ;
amortizedshift = 2 ;
out :
pos + = lcn * ( 1 < < amortizedshift ) ;
err = z_erofs_reload_indexes ( m , erofs_blknr ( pos ) ) ;
if ( err )
return err ;
2021-12-28 08:46:04 +03:00
return unpack_compacted_index ( m , amortizedshift , pos , lookahead ) ;
2019-06-24 10:22:52 +03:00
}
2019-11-08 06:37:33 +03:00
static int z_erofs_load_cluster_from_disk ( struct z_erofs_maprecorder * m ,
2021-08-18 18:22:31 +03:00
unsigned int lcn , bool lookahead )
2019-06-24 10:22:52 +03:00
{
2019-09-04 05:08:56 +03:00
const unsigned int datamode = EROFS_I ( m - > inode ) - > datalayout ;
2019-06-24 10:22:52 +03:00
if ( datamode = = EROFS_INODE_FLAT_COMPRESSION_LEGACY )
2019-11-08 06:37:33 +03:00
return legacy_load_cluster_from_disk ( m , lcn ) ;
2019-06-24 10:22:52 +03:00
if ( datamode = = EROFS_INODE_FLAT_COMPRESSION )
2021-08-18 18:22:31 +03:00
return compacted_load_cluster_from_disk ( m , lcn , lookahead ) ;
2019-06-24 10:22:52 +03:00
return - EINVAL ;
}
2019-11-08 06:37:33 +03:00
static int z_erofs_extent_lookback ( struct z_erofs_maprecorder * m ,
unsigned int lookback_distance )
2019-06-24 10:22:52 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( m - > inode ) ;
2019-06-24 10:22:52 +03:00
struct erofs_map_blocks * const map = m - > map ;
const unsigned int lclusterbits = vi - > z_logical_clusterbits ;
unsigned long lcn = m - > lcn ;
int err ;
if ( lcn < lookback_distance ) {
2019-09-04 05:09:09 +03:00
erofs_err ( m - > inode - > i_sb ,
" bogus lookback distance @ nid %llu " , vi - > nid ) ;
2019-06-24 10:22:52 +03:00
DBG_BUGON ( 1 ) ;
2019-08-14 13:37:03 +03:00
return - EFSCORRUPTED ;
2019-06-24 10:22:52 +03:00
}
/* load extent head logical cluster if needed */
lcn - = lookback_distance ;
2021-08-18 18:22:31 +03:00
err = z_erofs_load_cluster_from_disk ( m , lcn , false ) ;
2019-06-24 10:22:52 +03:00
if ( err )
return err ;
switch ( m - > type ) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD :
2019-08-29 19:38:27 +03:00
if ( ! m - > delta [ 0 ] ) {
2019-09-04 05:09:09 +03:00
erofs_err ( m - > inode - > i_sb ,
" invalid lookback distance 0 @ nid %llu " ,
vi - > nid ) ;
2019-08-19 13:34:26 +03:00
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
2019-11-08 06:37:33 +03:00
return z_erofs_extent_lookback ( m , m - > delta [ 0 ] ) ;
2019-06-24 10:22:52 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
2021-10-17 19:57:21 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 :
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 :
2021-10-08 23:08:37 +03:00
m - > headtype = m - > type ;
2019-06-24 10:22:52 +03:00
map - > m_la = ( lcn < < lclusterbits ) | m - > clusterofs ;
break ;
default :
2019-09-04 05:09:09 +03:00
erofs_err ( m - > inode - > i_sb ,
" unknown type %u @ lcn %lu of nid %llu " ,
m - > type , lcn , vi - > nid ) ;
2019-06-24 10:22:52 +03:00
DBG_BUGON ( 1 ) ;
2019-08-14 13:37:04 +03:00
return - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
}
return 0 ;
}
2021-04-07 07:39:24 +03:00
static int z_erofs_get_extent_compressedlen ( struct z_erofs_maprecorder * m ,
unsigned int initial_lcn )
{
struct erofs_inode * const vi = EROFS_I ( m - > inode ) ;
struct erofs_map_blocks * const map = m - > map ;
const unsigned int lclusterbits = vi - > z_logical_clusterbits ;
unsigned long lcn ;
int err ;
DBG_BUGON ( m - > type ! = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN & &
2021-10-17 19:57:21 +03:00
m - > type ! = Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 & &
m - > type ! = Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ) ;
DBG_BUGON ( m - > type ! = m - > headtype ) ;
2021-10-08 23:08:37 +03:00
if ( m - > headtype = = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN | |
2021-10-17 19:57:21 +03:00
( ( m - > headtype = = Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 ) & &
! ( vi - > z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1 ) ) | |
( ( m - > headtype = = Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ) & &
! ( vi - > z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2 ) ) ) {
2022-03-10 20:34:48 +03:00
map - > m_plen = 1ULL < < lclusterbits ;
2021-04-07 07:39:24 +03:00
return 0 ;
}
lcn = m - > lcn + 1 ;
if ( m - > compressedlcs )
goto out ;
2021-08-18 18:22:31 +03:00
err = z_erofs_load_cluster_from_disk ( m , lcn , false ) ;
2021-04-07 07:39:24 +03:00
if ( err )
return err ;
2021-05-10 09:47:15 +03:00
/*
* If the 1 st NONHEAD lcluster has already been handled initially w / o
* valid compressedlcs , which means at least it mustn ' t be CBLKCNT , or
* an internal implemenatation error is detected .
*
* The following code can also handle it properly anyway , but let ' s
* BUG_ON in the debugging mode only for developers to notice that .
*/
DBG_BUGON ( lcn = = initial_lcn & &
m - > type = = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD ) ;
2021-04-07 07:39:24 +03:00
switch ( m - > type ) {
2021-05-10 09:47:15 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
2021-10-17 19:57:21 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 :
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 :
2021-05-10 09:47:15 +03:00
/*
* if the 1 st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT , it ' s a 1 lcluster - sized pcluster .
*/
m - > compressedlcs = 1 ;
break ;
2021-04-07 07:39:24 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD :
if ( m - > delta [ 0 ] ! = 1 )
goto err_bonus_cblkcnt ;
if ( m - > compressedlcs )
break ;
fallthrough ;
default :
erofs_err ( m - > inode - > i_sb ,
" cannot found CBLKCNT @ lcn %lu of nid %llu " ,
lcn , vi - > nid ) ;
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
out :
2022-03-10 20:34:48 +03:00
map - > m_plen = ( u64 ) m - > compressedlcs < < lclusterbits ;
2021-04-07 07:39:24 +03:00
return 0 ;
err_bonus_cblkcnt :
erofs_err ( m - > inode - > i_sb ,
" bogus CBLKCNT @ lcn %lu of nid %llu " ,
lcn , vi - > nid ) ;
DBG_BUGON ( 1 ) ;
return - EFSCORRUPTED ;
}
2021-08-18 18:22:31 +03:00
static int z_erofs_get_extent_decompressedlen ( struct z_erofs_maprecorder * m )
{
struct inode * inode = m - > inode ;
struct erofs_inode * vi = EROFS_I ( inode ) ;
struct erofs_map_blocks * map = m - > map ;
unsigned int lclusterbits = vi - > z_logical_clusterbits ;
u64 lcn = m - > lcn , headlcn = map - > m_la > > lclusterbits ;
int err ;
do {
/* handle the last EOF pcluster (no next HEAD lcluster) */
if ( ( lcn < < lclusterbits ) > = inode - > i_size ) {
map - > m_llen = inode - > i_size - map - > m_la ;
return 0 ;
}
err = z_erofs_load_cluster_from_disk ( m , lcn , true ) ;
if ( err )
return err ;
if ( m - > type = = Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD ) {
DBG_BUGON ( ! m - > delta [ 1 ] & &
m - > clusterofs ! = 1 < < lclusterbits ) ;
} else if ( m - > type = = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN | |
2021-10-17 19:57:21 +03:00
m - > type = = Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 | |
m - > type = = Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ) {
2021-08-18 18:22:31 +03:00
/* go on until the next HEAD lcluster */
if ( lcn ! = headlcn )
break ;
m - > delta [ 1 ] = 1 ;
} else {
erofs_err ( inode - > i_sb , " unknown type %u @ lcn %llu of nid %llu " ,
m - > type , lcn , vi - > nid ) ;
DBG_BUGON ( 1 ) ;
return - EOPNOTSUPP ;
}
lcn + = m - > delta [ 1 ] ;
} while ( m - > delta [ 1 ] ) ;
map - > m_llen = ( lcn < < lclusterbits ) + m - > clusterofs - map - > m_la ;
return 0 ;
}
2021-12-28 08:46:04 +03:00
static int z_erofs_do_map_blocks ( struct inode * inode ,
struct erofs_map_blocks * map ,
int flags )
2019-06-24 10:22:52 +03:00
{
2019-09-04 05:08:56 +03:00
struct erofs_inode * const vi = EROFS_I ( inode ) ;
2021-12-28 08:46:04 +03:00
bool ztailpacking = vi - > z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER ;
2019-06-24 10:22:52 +03:00
struct z_erofs_maprecorder m = {
. inode = inode ,
. map = map ,
} ;
int err = 0 ;
unsigned int lclusterbits , endoff ;
2021-04-07 07:39:24 +03:00
unsigned long initial_lcn ;
2019-06-24 10:22:52 +03:00
unsigned long long ofs , end ;
lclusterbits = vi - > z_logical_clusterbits ;
2021-12-28 08:46:04 +03:00
ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode - > i_size - 1 : map - > m_la ;
2021-04-07 07:39:24 +03:00
initial_lcn = ofs > > lclusterbits ;
2019-06-24 10:22:52 +03:00
endoff = ofs & ( ( 1 < < lclusterbits ) - 1 ) ;
2021-08-18 18:22:31 +03:00
err = z_erofs_load_cluster_from_disk ( & m , initial_lcn , false ) ;
2019-06-24 10:22:52 +03:00
if ( err )
goto unmap_out ;
2021-12-28 08:46:04 +03:00
if ( ztailpacking & & ( flags & EROFS_GET_BLOCKS_FINDTAIL ) )
vi - > z_idataoff = m . nextpackoff ;
2021-10-08 23:08:37 +03:00
map - > m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED ;
2019-06-24 10:22:52 +03:00
end = ( m . lcn + 1ULL ) < < lclusterbits ;
switch ( m . type ) {
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN :
2021-10-17 19:57:21 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 :
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 :
2019-06-24 10:22:52 +03:00
if ( endoff > = m . clusterofs ) {
2021-10-08 23:08:37 +03:00
m . headtype = m . type ;
2019-06-24 10:22:52 +03:00
map - > m_la = ( m . lcn < < lclusterbits ) | m . clusterofs ;
erofs: fix small compressed files inlining
Prior to ztailpacking feature, it's enough that each lcluster has
two pclusters at most, and the last pcluster should be turned into
an uncompressed pcluster when necessary. For example,
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 _|____ EOFed ____|
which should be converted into:
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 (uncompressed)' _|
That is fine since either pcluster n-1 or (uncompressed)' takes one
physical block.
However, after ztailpacking was supported, the game is changed since
the last pcluster can be inlined now. And such case above is quite
common for inlining small files. Therefore, in order to inline more
effectively, special EOF lclusters are now supported which can have
three parts at most, as illustrated below:
_________________________________________________
|_ pcluster n-2 _|_ pcluster n-1 _|____ EOFed ____|
^ i_size
Actually similar code exists in Yue Hu's original patchset [1], but I
removed this part on purpose. After evaluating more real cases with
small files, I've changed my mind.
[1] https://lore.kernel.org/r/20211215094449.15162-1-huyue2@yulong.com
Link: https://lore.kernel.org/r/20220203190203.30794-1-xiang@kernel.org
Fixes: ab92184ff8f1 ("erofs: add on-disk compressed tail-packing inline support")
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
2022-02-03 22:02:03 +03:00
/*
* For ztailpacking files , in order to inline data more
* effectively , special EOF lclusters are now supported
* which can have three parts at most .
*/
if ( ztailpacking & & end > inode - > i_size )
end = inode - > i_size ;
2019-06-24 10:22:52 +03:00
break ;
}
/* m.lcn should be >= 1 if endoff < m.clusterofs */
2019-08-29 19:38:27 +03:00
if ( ! m . lcn ) {
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" invalid logical cluster 0 at nid %llu " ,
vi - > nid ) ;
2019-08-14 13:37:03 +03:00
err = - EFSCORRUPTED ;
2019-06-24 10:22:52 +03:00
goto unmap_out ;
}
end = ( m . lcn < < lclusterbits ) | m . clusterofs ;
2019-06-24 10:22:58 +03:00
map - > m_flags | = EROFS_MAP_FULL_MAPPED ;
2019-06-24 10:22:52 +03:00
m . delta [ 0 ] = 1 ;
2020-08-24 01:36:59 +03:00
fallthrough ;
2019-06-24 10:22:52 +03:00
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD :
2021-03-31 12:39:20 +03:00
/* get the corresponding first chunk */
2019-11-08 06:37:33 +03:00
err = z_erofs_extent_lookback ( & m , m . delta [ 0 ] ) ;
2019-08-29 19:38:27 +03:00
if ( err )
2019-06-24 10:22:52 +03:00
goto unmap_out ;
break ;
default :
2019-09-04 05:09:09 +03:00
erofs_err ( inode - > i_sb ,
" unknown type %u @ offset %llu of nid %llu " ,
m . type , ofs , vi - > nid ) ;
2019-08-14 13:37:04 +03:00
err = - EOPNOTSUPP ;
2019-06-24 10:22:52 +03:00
goto unmap_out ;
}
map - > m_llen = end - map - > m_la ;
2021-12-28 08:46:04 +03:00
if ( flags & EROFS_GET_BLOCKS_FINDTAIL )
vi - > z_tailextent_headlcn = m . lcn ;
if ( ztailpacking & & m . lcn = = vi - > z_tailextent_headlcn ) {
map - > m_flags | = EROFS_MAP_META ;
map - > m_pa = vi - > z_idataoff ;
map - > m_plen = vi - > z_idata_size ;
} else {
map - > m_pa = blknr_to_addr ( m . pblk ) ;
err = z_erofs_get_extent_compressedlen ( & m , initial_lcn ) ;
if ( err )
goto out ;
}
2021-08-18 18:22:31 +03:00
2021-10-08 23:08:37 +03:00
if ( m . headtype = = Z_EROFS_VLE_CLUSTER_TYPE_PLAIN )
map - > m_algorithmformat = Z_EROFS_COMPRESSION_SHIFTED ;
2021-10-17 19:57:21 +03:00
else if ( m . headtype = = Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 )
map - > m_algorithmformat = vi - > z_algorithmtype [ 1 ] ;
2021-10-08 23:08:37 +03:00
else
map - > m_algorithmformat = vi - > z_algorithmtype [ 0 ] ;
2021-10-11 00:31:45 +03:00
if ( ( flags & EROFS_GET_BLOCKS_FIEMAP ) | |
( ( flags & EROFS_GET_BLOCKS_READMORE ) & &
map - > m_algorithmformat = = Z_EROFS_COMPRESSION_LZMA & &
map - > m_llen > = EROFS_BLKSIZ ) ) {
2021-08-18 18:22:31 +03:00
err = z_erofs_get_extent_decompressedlen ( & m ) ;
if ( ! err )
map - > m_flags | = EROFS_MAP_FULL_MAPPED ;
}
2019-06-24 10:22:52 +03:00
unmap_out :
2022-01-02 07:00:17 +03:00
erofs_unmap_metabuf ( & m . map - > buf ) ;
2019-06-24 10:22:52 +03:00
out :
2019-09-04 05:09:09 +03:00
erofs_dbg ( " %s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o " ,
__func__ , map - > m_la , map - > m_pa ,
map - > m_llen , map - > m_plen , map - > m_flags ) ;
2019-06-24 10:22:52 +03:00
2021-12-28 08:46:04 +03:00
return err ;
}
int z_erofs_map_blocks_iter ( struct inode * inode ,
struct erofs_map_blocks * map ,
int flags )
{
int err = 0 ;
trace_z_erofs_map_blocks_iter_enter ( inode , map , flags ) ;
/* when trying to read beyond EOF, leave it unmapped */
if ( map - > m_la > = inode - > i_size ) {
map - > m_llen = map - > m_la + 1 - inode - > i_size ;
map - > m_la = inode - > i_size ;
map - > m_flags = 0 ;
goto out ;
}
err = z_erofs_fill_inode_lazy ( inode ) ;
if ( err )
goto out ;
err = z_erofs_do_map_blocks ( inode , map , flags ) ;
out :
2019-06-24 10:22:52 +03:00
trace_z_erofs_map_blocks_iter_exit ( inode , map , flags , err ) ;
/* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
DBG_BUGON ( err < 0 & & err ! = - ENOMEM ) ;
return err ;
}
2021-08-13 08:29:31 +03:00
static int z_erofs_iomap_begin_report ( struct inode * inode , loff_t offset ,
loff_t length , unsigned int flags ,
struct iomap * iomap , struct iomap * srcmap )
{
int ret ;
struct erofs_map_blocks map = { . m_la = offset } ;
ret = z_erofs_map_blocks_iter ( inode , & map , EROFS_GET_BLOCKS_FIEMAP ) ;
2022-01-02 07:00:17 +03:00
erofs_put_metabuf ( & map . buf ) ;
2021-08-13 08:29:31 +03:00
if ( ret < 0 )
return ret ;
iomap - > bdev = inode - > i_sb - > s_bdev ;
iomap - > offset = map . m_la ;
iomap - > length = map . m_llen ;
if ( map . m_flags & EROFS_MAP_MAPPED ) {
iomap - > type = IOMAP_MAPPED ;
iomap - > addr = map . m_pa ;
} else {
iomap - > type = IOMAP_HOLE ;
iomap - > addr = IOMAP_NULL_ADDR ;
/*
* No strict rule how to describe extents for post EOF , yet
* we need do like below . Otherwise , iomap itself will get
* into an endless loop on post EOF .
*/
if ( iomap - > offset > = inode - > i_size )
iomap - > length = length + map . m_la - offset ;
}
iomap - > flags = 0 ;
return 0 ;
}
const struct iomap_ops z_erofs_iomap_report_ops = {
. iomap_begin = z_erofs_iomap_begin_report ,
} ;