1) Replace ext4's bmap and iopoll implementations to use iomap.

2)  Clean up extent tree handling.
 
 3)  Other cleanups and miscellaneous bug fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAl6JWT0ACgkQ8vlZVpUN
 gaMTOQgAo7bkkyc5+FCbpIUejle1DvM9Hg32fG1p/I5XH771RJRlGURVcx4FgsAZ
 TOiP4R4csADWcfUWnky/MK1pubVPLq2x7c0OyYnyOENM5PsLufeIa1EGjKBrGYeE
 6et4zyqlB7bIOUkVarluSp0j3YklxGrg2k45cnVFp572W5FaWiwpIHL/qNcwvbv0
 NIDyB+tW16tlmKINT/YLnDMRAOagoJoQwgNYwQPk+qVAvKWOUT3YQH0KXhXIRaMp
 M2HU9lUqSXT5F2uNNypeRaqwoFayYV7tVnaSoht30fFk04Eg+RLTyhCUWSVR2EHW
 zBjKNy9l3KQFTI8eriguGcLNy8cxig==
 =nLff
 -----END PGP SIGNATURE-----

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:

 - Replace ext4's bmap and iopoll implementations to use iomap.

 - Clean up extent tree handling.

 - Other cleanups and miscellaneous bug fixes

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (31 commits)
  ext4: save all error info in save_error_info() and drop ext4_set_errno()
  ext4: fix incorrect group count in ext4_fill_super error message
  ext4: fix incorrect inodes per group in error message
  ext4: don't set dioread_nolock by default for blocksize < pagesize
  ext4: disable dioread_nolock whenever delayed allocation is disabled
  ext4: do not commit super on read-only bdev
  ext4: avoid ENOSPC when avoiding to reuse recently deleted inodes
  ext4: unregister sysfs path before destroying jbd2 journal
  ext4: check for non-zero journal inum in ext4_calculate_overhead
  ext4: remove map_from_cluster from ext4_ext_map_blocks
  ext4: clean up ext4_ext_insert_extent() call in ext4_ext_map_blocks()
  ext4: mark block bitmap corrupted when found instead of BUGON
  ext4: use flexible-array member for xattr structs
  ext4: use flexible-array member in struct fname
  Documentation: correct the description of FIEMAP_EXTENT_LAST
  ext4: move ext4_fiemap to use iomap framework
  ext4: make ext4_ind_map_blocks work with fiemap
  ext4: move ext4 bmap to use iomap infrastructure
  ext4: optimize ext4_ext_precache for 0 depth
  ext4: add IOMAP_F_MERGED for non-extent based mapping
  ...
This commit is contained in:
Linus Torvalds 2020-04-05 10:54:03 -07:00
commit 9c94b39560
22 changed files with 392 additions and 675 deletions

View File

@ -115,8 +115,10 @@ data. Note that the opposite is not true - it would be valid for
FIEMAP_EXTENT_NOT_ALIGNED to appear alone. FIEMAP_EXTENT_NOT_ALIGNED to appear alone.
* FIEMAP_EXTENT_LAST * FIEMAP_EXTENT_LAST
This is the last extent in the file. A mapping attempt past this This is generally the last extent in the file. A mapping attempt past
extent will return nothing. this extent may return nothing. Some implementations set this flag to
indicate this extent is the last one in the range queried by the user
(via fiemap->fm_length).
* FIEMAP_EXTENT_UNKNOWN * FIEMAP_EXTENT_UNKNOWN
The location of this extent is currently unknown. This may indicate The location of this extent is currently unknown. This may indicate

View File

@ -516,10 +516,9 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
wait_on_buffer(bh); wait_on_buffer(bh);
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO); ext4_simulate_fail_bh(sb, bh, EXT4_SIM_BBITMAP_EIO);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ext4_set_errno(sb, EIO); ext4_error_err(sb, EIO, "Cannot read block bitmap - "
ext4_error(sb, "Cannot read block bitmap - " "block_group = %u, block_bitmap = %llu",
"block_group = %u, block_bitmap = %llu", block_group, (unsigned long long) bh->b_blocknr);
block_group, (unsigned long long) bh->b_blocknr);
ext4_mark_group_bitmap_corrupted(sb, block_group, ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT); EXT4_GROUP_INFO_BBITMAP_CORRUPT);
return -EIO; return -EIO;

View File

@ -166,10 +166,8 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
(start_blk + count < start_blk) || (start_blk + count < start_blk) ||
(start_blk + count > ext4_blocks_count(sbi->s_es))) { (start_blk + count > ext4_blocks_count(sbi->s_es)))
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
return 0; return 0;
}
if (system_blks == NULL) if (system_blks == NULL)
return 1; return 1;
@ -181,10 +179,8 @@ static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi,
n = n->rb_left; n = n->rb_left;
else if (start_blk >= (entry->start_blk + entry->count)) else if (start_blk >= (entry->start_blk + entry->count))
n = n->rb_right; n = n->rb_right;
else { else
sbi->s_es->s_last_error_block = cpu_to_le64(start_blk);
return 0; return 0;
}
} }
return 1; return 1;
} }
@ -220,10 +216,12 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
} else { } else {
if (!ext4_data_block_valid_rcu(sbi, system_blks, if (!ext4_data_block_valid_rcu(sbi, system_blks,
map.m_pblk, n)) { map.m_pblk, n)) {
ext4_error(sb, "blocks %llu-%llu from inode %u "
"overlap system zone", map.m_pblk,
map.m_pblk + map.m_len - 1, ino);
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
__ext4_error(sb, __func__, __LINE__, -err,
map.m_pblk, "blocks %llu-%llu "
"from inode %u overlap system zone",
map.m_pblk,
map.m_pblk + map.m_len - 1, ino);
break; break;
} }
err = add_system_zone(system_blks, map.m_pblk, n); err = add_system_zone(system_blks, map.m_pblk, n);
@ -365,7 +363,6 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk,
int ext4_check_blockref(const char *function, unsigned int line, int ext4_check_blockref(const char *function, unsigned int line,
struct inode *inode, __le32 *p, unsigned int max) struct inode *inode, __le32 *p, unsigned int max)
{ {
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
__le32 *bref = p; __le32 *bref = p;
unsigned int blk; unsigned int blk;
@ -379,7 +376,6 @@ int ext4_check_blockref(const char *function, unsigned int line,
if (blk && if (blk &&
unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
blk, 1))) { blk, 1))) {
es->s_last_error_block = cpu_to_le64(blk);
ext4_error_inode(inode, function, line, blk, ext4_error_inode(inode, function, line, blk,
"invalid block"); "invalid block");
return -EFSCORRUPTED; return -EFSCORRUPTED;

View File

@ -392,7 +392,7 @@ struct fname {
__u32 inode; __u32 inode;
__u8 name_len; __u8 name_len;
__u8 file_type; __u8 file_type;
char name[0]; char name[];
}; };
/* /*

View File

@ -414,7 +414,7 @@ struct flex_groups {
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */ #define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
#define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */ #define EXT4_VERITY_FL 0x00100000 /* Verity protected inode */
#define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define EXT4_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define EXT4_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */ /* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */ #define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ #define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */ #define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */
@ -487,7 +487,7 @@ enum {
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */ EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
EXT4_INODE_VERITY = 20, /* Verity protected inode */ EXT4_INODE_VERITY = 20, /* Verity protected inode */
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */ EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */ /* 22 was formerly EXT4_INODE_EOFBLOCKS */
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */ EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */ EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */ EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
@ -533,7 +533,6 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(EXTENTS); CHECK_FLAG_VALUE(EXTENTS);
CHECK_FLAG_VALUE(VERITY); CHECK_FLAG_VALUE(VERITY);
CHECK_FLAG_VALUE(EA_INODE); CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(EOFBLOCKS);
CHECK_FLAG_VALUE(INLINE_DATA); CHECK_FLAG_VALUE(INLINE_DATA);
CHECK_FLAG_VALUE(PROJINHERIT); CHECK_FLAG_VALUE(PROJINHERIT);
CHECK_FLAG_VALUE(RESERVED); CHECK_FLAG_VALUE(RESERVED);
@ -2771,21 +2770,20 @@ extern const char *ext4_decode_error(struct super_block *sb, int errno,
extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb, extern void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
ext4_group_t block_group, ext4_group_t block_group,
unsigned int flags); unsigned int flags);
extern void ext4_set_errno(struct super_block *sb, int err);
extern __printf(4, 5) extern __printf(6, 7)
void __ext4_error(struct super_block *, const char *, unsigned int, void __ext4_error(struct super_block *, const char *, unsigned int, int, __u64,
const char *, ...); const char *, ...);
extern __printf(5, 6) extern __printf(6, 7)
void __ext4_error_inode(struct inode *, const char *, unsigned int, ext4_fsblk_t, void __ext4_error_inode(struct inode *, const char *, unsigned int,
const char *, ...); ext4_fsblk_t, int, const char *, ...);
extern __printf(5, 6) extern __printf(5, 6)
void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t, void __ext4_error_file(struct file *, const char *, unsigned int, ext4_fsblk_t,
const char *, ...); const char *, ...);
extern void __ext4_std_error(struct super_block *, const char *, extern void __ext4_std_error(struct super_block *, const char *,
unsigned int, int); unsigned int, int);
extern __printf(4, 5) extern __printf(5, 6)
void __ext4_abort(struct super_block *, const char *, unsigned int, void __ext4_abort(struct super_block *, const char *, unsigned int, int,
const char *, ...); const char *, ...);
extern __printf(4, 5) extern __printf(4, 5)
void __ext4_warning(struct super_block *, const char *, unsigned int, void __ext4_warning(struct super_block *, const char *, unsigned int,
@ -2806,8 +2804,12 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#define EXT4_ERROR_INODE(inode, fmt, a...) \ #define EXT4_ERROR_INODE(inode, fmt, a...) \
ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a) ext4_error_inode((inode), __func__, __LINE__, 0, (fmt), ## a)
#define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...) \ #define EXT4_ERROR_INODE_ERR(inode, err, fmt, a...) \
ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a) __ext4_error_inode((inode), __func__, __LINE__, 0, (err), (fmt), ## a)
#define ext4_error_inode_block(inode, block, err, fmt, a...) \
__ext4_error_inode((inode), __func__, __LINE__, (block), (err), \
(fmt), ## a)
#define EXT4_ERROR_FILE(file, block, fmt, a...) \ #define EXT4_ERROR_FILE(file, block, fmt, a...) \
ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a) ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
@ -2815,13 +2817,18 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#ifdef CONFIG_PRINTK #ifdef CONFIG_PRINTK
#define ext4_error_inode(inode, func, line, block, fmt, ...) \ #define ext4_error_inode(inode, func, line, block, fmt, ...) \
__ext4_error_inode(inode, func, line, block, fmt, ##__VA_ARGS__) __ext4_error_inode(inode, func, line, block, 0, fmt, ##__VA_ARGS__)
#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
__ext4_error_inode((inode), (func), (line), (block), \
(err), (fmt), ##__VA_ARGS__)
#define ext4_error_file(file, func, line, block, fmt, ...) \ #define ext4_error_file(file, func, line, block, fmt, ...) \
__ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__) __ext4_error_file(file, func, line, block, fmt, ##__VA_ARGS__)
#define ext4_error(sb, fmt, ...) \ #define ext4_error(sb, fmt, ...) \
__ext4_error(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) __ext4_error((sb), __func__, __LINE__, 0, 0, (fmt), ##__VA_ARGS__)
#define ext4_abort(sb, fmt, ...) \ #define ext4_error_err(sb, err, fmt, ...) \
__ext4_abort(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) __ext4_error((sb), __func__, __LINE__, (err), 0, (fmt), ##__VA_ARGS__)
#define ext4_abort(sb, err, fmt, ...) \
__ext4_abort((sb), __func__, __LINE__, (err), (fmt), ##__VA_ARGS__)
#define ext4_warning(sb, fmt, ...) \ #define ext4_warning(sb, fmt, ...) \
__ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__) __ext4_warning(sb, __func__, __LINE__, fmt, ##__VA_ARGS__)
#define ext4_warning_inode(inode, fmt, ...) \ #define ext4_warning_inode(inode, fmt, ...) \
@ -2839,7 +2846,12 @@ void __ext4_grp_locked_error(const char *, unsigned int,
#define ext4_error_inode(inode, func, line, block, fmt, ...) \ #define ext4_error_inode(inode, func, line, block, fmt, ...) \
do { \ do { \
no_printk(fmt, ##__VA_ARGS__); \ no_printk(fmt, ##__VA_ARGS__); \
__ext4_error_inode(inode, "", 0, block, " "); \ __ext4_error_inode(inode, "", 0, block, 0, " "); \
} while (0)
#define ext4_error_inode_err(inode, func, line, block, err, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
__ext4_error_inode(inode, "", 0, block, err, " "); \
} while (0) } while (0)
#define ext4_error_file(file, func, line, block, fmt, ...) \ #define ext4_error_file(file, func, line, block, fmt, ...) \
do { \ do { \
@ -2849,12 +2861,17 @@ do { \
#define ext4_error(sb, fmt, ...) \ #define ext4_error(sb, fmt, ...) \
do { \ do { \
no_printk(fmt, ##__VA_ARGS__); \ no_printk(fmt, ##__VA_ARGS__); \
__ext4_error(sb, "", 0, " "); \ __ext4_error(sb, "", 0, 0, 0, " "); \
} while (0) } while (0)
#define ext4_abort(sb, fmt, ...) \ #define ext4_error_err(sb, err, fmt, ...) \
do { \ do { \
no_printk(fmt, ##__VA_ARGS__); \ no_printk(fmt, ##__VA_ARGS__); \
__ext4_abort(sb, "", 0, " "); \ __ext4_error(sb, "", 0, err, 0, " "); \
} while (0)
#define ext4_abort(sb, err, fmt, ...) \
do { \
no_printk(fmt, ##__VA_ARGS__); \
__ext4_abort(sb, "", 0, err, " "); \
} while (0) } while (0)
#define ext4_warning(sb, fmt, ...) \ #define ext4_warning(sb, fmt, ...) \
do { \ do { \

View File

@ -80,8 +80,7 @@ static int ext4_journal_check_start(struct super_block *sb)
* take the FS itself readonly cleanly. * take the FS itself readonly cleanly.
*/ */
if (journal && is_journal_aborted(journal)) { if (journal && is_journal_aborted(journal)) {
ext4_set_errno(sb, -journal->j_errno); ext4_abort(sb, -journal->j_errno, "Detected aborted journal");
ext4_abort(sb, "Detected aborted journal");
return -EROFS; return -EROFS;
} }
return 0; return 0;
@ -272,8 +271,7 @@ int __ext4_forget(const char *where, unsigned int line, handle_t *handle,
if (err) { if (err) {
ext4_journal_abort_handle(where, line, __func__, ext4_journal_abort_handle(where, line, __func__,
bh, handle, err); bh, handle, err);
ext4_set_errno(inode->i_sb, -err); __ext4_abort(inode->i_sb, where, line, -err,
__ext4_abort(inode->i_sb, where, line,
"error %d when attempting revoke", err); "error %d when attempting revoke", err);
} }
BUFFER_TRACE(bh, "exit"); BUFFER_TRACE(bh, "exit");
@ -332,6 +330,7 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
err); err);
} }
} else { } else {
set_buffer_uptodate(bh);
if (inode) if (inode)
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
else else
@ -342,11 +341,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
struct ext4_super_block *es; struct ext4_super_block *es;
es = EXT4_SB(inode->i_sb)->s_es; es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_block = ext4_error_inode_err(inode, where, line,
cpu_to_le64(bh->b_blocknr); bh->b_blocknr, EIO,
ext4_set_errno(inode->i_sb, EIO);
ext4_error_inode(inode, where, line,
bh->b_blocknr,
"IO error syncing itable block"); "IO error syncing itable block");
err = -EIO; err = -EIO;
} }

View File

@ -512,6 +512,9 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 0; return 0;
if (ext4_should_journal_data(inode)) if (ext4_should_journal_data(inode))
return 0; return 0;
/* temporary fix to prevent generic/422 test failures */
if (!test_opt(inode->i_sb, DELALLOC))
return 0;
return 1; return 1;
} }

View File

@ -28,6 +28,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/fiemap.h> #include <linux/fiemap.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/iomap.h>
#include "ext4_jbd2.h" #include "ext4_jbd2.h"
#include "ext4_extents.h" #include "ext4_extents.h"
#include "xattr.h" #include "xattr.h"
@ -83,13 +84,6 @@ static void ext4_extent_block_csum_set(struct inode *inode,
et->et_checksum = ext4_extent_block_csum(inode, eh); et->et_checksum = ext4_extent_block_csum(inode, eh);
} }
static int ext4_split_extent(handle_t *handle,
struct inode *inode,
struct ext4_ext_path **ppath,
struct ext4_map_blocks *map,
int split_flag,
int flags);
static int ext4_split_extent_at(handle_t *handle, static int ext4_split_extent_at(handle_t *handle,
struct inode *inode, struct inode *inode,
struct ext4_ext_path **ppath, struct ext4_ext_path **ppath,
@ -97,9 +91,6 @@ static int ext4_split_extent_at(handle_t *handle,
int split_flag, int split_flag,
int flags); int flags);
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes);
static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
{ {
/* /*
@ -358,8 +349,8 @@ static int ext4_valid_extent_idx(struct inode *inode,
} }
static int ext4_valid_extent_entries(struct inode *inode, static int ext4_valid_extent_entries(struct inode *inode,
struct ext4_extent_header *eh, struct ext4_extent_header *eh,
int depth) ext4_fsblk_t *pblk, int depth)
{ {
unsigned short entries; unsigned short entries;
if (eh->eh_entries == 0) if (eh->eh_entries == 0)
@ -370,8 +361,6 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) { if (depth == 0) {
/* leaf entries */ /* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
ext4_fsblk_t pblock = 0;
ext4_lblk_t lblock = 0; ext4_lblk_t lblock = 0;
ext4_lblk_t prev = 0; ext4_lblk_t prev = 0;
int len = 0; int len = 0;
@ -383,8 +372,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
lblock = le32_to_cpu(ext->ee_block); lblock = le32_to_cpu(ext->ee_block);
len = ext4_ext_get_actual_len(ext); len = ext4_ext_get_actual_len(ext);
if ((lblock <= prev) && prev) { if ((lblock <= prev) && prev) {
pblock = ext4_ext_pblock(ext); *pblk = ext4_ext_pblock(ext);
es->s_last_error_block = cpu_to_le64(pblock);
return 0; return 0;
} }
ext++; ext++;
@ -431,7 +419,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid eh_entries"; error_msg = "invalid eh_entries";
goto corrupted; goto corrupted;
} }
if (!ext4_valid_extent_entries(inode, eh, depth)) { if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
error_msg = "invalid extent entries"; error_msg = "invalid extent entries";
goto corrupted; goto corrupted;
} }
@ -449,14 +437,14 @@ static int __ext4_ext_check(const char *function, unsigned int line,
return 0; return 0;
corrupted: corrupted:
ext4_set_errno(inode->i_sb, -err); ext4_error_inode_err(inode, function, line, 0, -err,
ext4_error_inode(inode, function, line, 0, "pblk %llu bad header/extent: %s - magic %x, "
"pblk %llu bad header/extent: %s - magic %x, " "entries %u, max %u(%u), depth %u(%u)",
"entries %u, max %u(%u), depth %u(%u)", (unsigned long long) pblk, error_msg,
(unsigned long long) pblk, error_msg, le16_to_cpu(eh->eh_magic),
le16_to_cpu(eh->eh_magic), le16_to_cpu(eh->eh_entries),
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max), le16_to_cpu(eh->eh_max),
max, le16_to_cpu(eh->eh_depth), depth); max, le16_to_cpu(eh->eh_depth), depth);
return err; return err;
} }
@ -556,6 +544,12 @@ int ext4_ext_precache(struct inode *inode)
down_read(&ei->i_data_sem); down_read(&ei->i_data_sem);
depth = ext_depth(inode); depth = ext_depth(inode);
/* Don't cache anything if there are no external extent blocks */
if (!depth) {
up_read(&ei->i_data_sem);
return ret;
}
path = kcalloc(depth + 1, sizeof(struct ext4_ext_path), path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
GFP_NOFS); GFP_NOFS);
if (path == NULL) { if (path == NULL) {
@ -563,9 +557,6 @@ int ext4_ext_precache(struct inode *inode)
return -ENOMEM; return -ENOMEM;
} }
/* Don't cache anything if there are no external extent blocks */
if (depth == 0)
goto out;
path[0].p_hdr = ext_inode_hdr(inode); path[0].p_hdr = ext_inode_hdr(inode);
ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0); ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
if (ret) if (ret)
@ -2134,155 +2125,6 @@ cleanup:
return err; return err;
} }
static int ext4_fill_fiemap_extents(struct inode *inode,
ext4_lblk_t block, ext4_lblk_t num,
struct fiemap_extent_info *fieinfo)
{
struct ext4_ext_path *path = NULL;
struct ext4_extent *ex;
struct extent_status es;
ext4_lblk_t next, next_del, start = 0, end = 0;
ext4_lblk_t last = block + num;
int exists, depth = 0, err = 0;
unsigned int flags = 0;
unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
while (block < last && block != EXT_MAX_BLOCKS) {
num = last - block;
/* find extent for this block */
down_read(&EXT4_I(inode)->i_data_sem);
path = ext4_find_extent(inode, block, &path, 0);
if (IS_ERR(path)) {
up_read(&EXT4_I(inode)->i_data_sem);
err = PTR_ERR(path);
path = NULL;
break;
}
depth = ext_depth(inode);
if (unlikely(path[depth].p_hdr == NULL)) {
up_read(&EXT4_I(inode)->i_data_sem);
EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
err = -EFSCORRUPTED;
break;
}
ex = path[depth].p_ext;
next = ext4_ext_next_allocated_block(path);
flags = 0;
exists = 0;
if (!ex) {
/* there is no extent yet, so try to allocate
* all requested space */
start = block;
end = block + num;
} else if (le32_to_cpu(ex->ee_block) > block) {
/* need to allocate space before found extent */
start = block;
end = le32_to_cpu(ex->ee_block);
if (block + num < end)
end = block + num;
} else if (block >= le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex)) {
/* need to allocate space after found extent */
start = block;
end = block + num;
if (end >= next)
end = next;
} else if (block >= le32_to_cpu(ex->ee_block)) {
/*
* some part of requested space is covered
* by found extent
*/
start = block;
end = le32_to_cpu(ex->ee_block)
+ ext4_ext_get_actual_len(ex);
if (block + num < end)
end = block + num;
exists = 1;
} else {
BUG();
}
BUG_ON(end <= start);
if (!exists) {
es.es_lblk = start;
es.es_len = end - start;
es.es_pblk = 0;
} else {
es.es_lblk = le32_to_cpu(ex->ee_block);
es.es_len = ext4_ext_get_actual_len(ex);
es.es_pblk = ext4_ext_pblock(ex);
if (ext4_ext_is_unwritten(ex))
flags |= FIEMAP_EXTENT_UNWRITTEN;
}
/*
* Find delayed extent and update es accordingly. We call
* it even in !exists case to find out whether es is the
* last existing extent or not.
*/
next_del = ext4_find_delayed_extent(inode, &es);
if (!exists && next_del) {
exists = 1;
flags |= (FIEMAP_EXTENT_DELALLOC |
FIEMAP_EXTENT_UNKNOWN);
}
up_read(&EXT4_I(inode)->i_data_sem);
if (unlikely(es.es_len == 0)) {
EXT4_ERROR_INODE(inode, "es.es_len == 0");
err = -EFSCORRUPTED;
break;
}
/*
* This is possible iff next == next_del == EXT_MAX_BLOCKS.
* we need to check next == EXT_MAX_BLOCKS because it is
* possible that an extent is with unwritten and delayed
* status due to when an extent is delayed allocated and
* is allocated by fallocate status tree will track both of
* them in a extent.
*
* So we could return a unwritten and delayed extent, and
* its block is equal to 'next'.
*/
if (next == next_del && next == EXT_MAX_BLOCKS) {
flags |= FIEMAP_EXTENT_LAST;
if (unlikely(next_del != EXT_MAX_BLOCKS ||
next != EXT_MAX_BLOCKS)) {
EXT4_ERROR_INODE(inode,
"next extent == %u, next "
"delalloc extent = %u",
next, next_del);
err = -EFSCORRUPTED;
break;
}
}
if (exists) {
err = fiemap_fill_next_extent(fieinfo,
(__u64)es.es_lblk << blksize_bits,
(__u64)es.es_pblk << blksize_bits,
(__u64)es.es_len << blksize_bits,
flags);
if (err < 0)
break;
if (err == 1) {
err = 0;
break;
}
}
block = es.es_lblk + es.es_len;
}
ext4_ext_drop_refs(path);
kfree(path);
return err;
}
static int ext4_fill_es_cache_info(struct inode *inode, static int ext4_fill_es_cache_info(struct inode *inode,
ext4_lblk_t block, ext4_lblk_t num, ext4_lblk_t block, ext4_lblk_t num,
struct fiemap_extent_info *fieinfo) struct fiemap_extent_info *fieinfo)
@ -3874,64 +3716,11 @@ out:
return err; return err;
} }
/*
* Handle EOFBLOCKS_FL flag, clearing it if necessary
*/
static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
ext4_lblk_t lblk,
struct ext4_ext_path *path,
unsigned int len)
{
int i, depth;
struct ext4_extent_header *eh;
struct ext4_extent *last_ex;
if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
return 0;
depth = ext_depth(inode);
eh = path[depth].p_hdr;
/*
* We're going to remove EOFBLOCKS_FL entirely in future so we
* do not care for this case anymore. Simply remove the flag
* if there are no extents.
*/
if (unlikely(!eh->eh_entries))
goto out;
last_ex = EXT_LAST_EXTENT(eh);
/*
* We should clear the EOFBLOCKS_FL flag if we are writing the
* last block in the last extent in the file. We test this by
* first checking to see if the caller to
* ext4_ext_get_blocks() was interested in the last block (or
* a block beyond the last block) in the current extent. If
* this turns out to be false, we can bail out from this
* function immediately.
*/
if (lblk + len < le32_to_cpu(last_ex->ee_block) +
ext4_ext_get_actual_len(last_ex))
return 0;
/*
* If the caller does appear to be planning to write at or
* beyond the end of the current extent, we then test to see
* if the current extent is the last extent in the file, by
* checking to make sure it was reached via the rightmost node
* at each level of the tree.
*/
for (i = depth-1; i >= 0; i--)
if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
return 0;
out:
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
return ext4_mark_inode_dirty(handle, inode);
}
static int static int
convert_initialized_extent(handle_t *handle, struct inode *inode, convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, struct ext4_ext_path **ppath,
unsigned int allocated) unsigned int *allocated)
{ {
struct ext4_ext_path *path = *ppath; struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex; struct ext4_extent *ex;
@ -3991,14 +3780,12 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
ext4_ext_show_leaf(inode, path); ext4_ext_show_leaf(inode, path);
ext4_update_inode_fsync_trans(handle, inode, 1); ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
if (err)
return err;
map->m_flags |= EXT4_MAP_UNWRITTEN; map->m_flags |= EXT4_MAP_UNWRITTEN;
if (allocated > map->m_len) if (*allocated > map->m_len)
allocated = map->m_len; *allocated = map->m_len;
map->m_len = allocated; map->m_len = *allocated;
return allocated; return 0;
} }
static int static int
@ -4007,7 +3794,9 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, int flags, struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock) unsigned int allocated, ext4_fsblk_t newblock)
{ {
#ifdef EXT_DEBUG
struct ext4_ext_path *path = *ppath; struct ext4_ext_path *path = *ppath;
#endif
int ret = 0; int ret = 0;
int err = 0; int err = 0;
@ -4047,11 +3836,9 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
} }
ret = ext4_convert_unwritten_extents_endio(handle, inode, map, ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath); ppath);
if (ret >= 0) { if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1); ext4_update_inode_fsync_trans(handle, inode, 1);
err = check_eofblocks_fl(handle, inode, map->m_lblk, else
path, map->m_len);
} else
err = ret; err = ret;
map->m_flags |= EXT4_MAP_MAPPED; map->m_flags |= EXT4_MAP_MAPPED;
map->m_pblk = newblock; map->m_pblk = newblock;
@ -4100,12 +3887,6 @@ out:
map_out: map_out:
map->m_flags |= EXT4_MAP_MAPPED; map->m_flags |= EXT4_MAP_MAPPED;
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
map->m_len);
if (err < 0)
goto out2;
}
out1: out1:
if (allocated > map->m_len) if (allocated > map->m_len)
allocated = map->m_len; allocated = map->m_len;
@ -4244,12 +4025,11 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_extent newex, *ex, *ex2; struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_fsblk_t newblock = 0; ext4_fsblk_t newblock = 0;
int free_on_err = 0, err = 0, depth, ret; int err = 0, depth, ret;
unsigned int allocated = 0, offset = 0; unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0; unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar; struct ext4_allocation_request ar;
ext4_lblk_t cluster_offset; ext4_lblk_t cluster_offset;
bool map_from_cluster = false;
ext_debug("blocks %u/%u requested for inode %lu\n", ext_debug("blocks %u/%u requested for inode %lu\n",
map->m_lblk, map->m_len, inode->i_ino); map->m_lblk, map->m_len, inode->i_ino);
@ -4308,12 +4088,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/ */
if ((!ext4_ext_is_unwritten(ex)) && if ((!ext4_ext_is_unwritten(ex)) &&
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent( err = convert_initialized_extent(handle,
handle, inode, map, &path, inode, map, &path, &allocated);
allocated);
goto out2; goto out2;
} else if (!ext4_ext_is_unwritten(ex)) } else if (!ext4_ext_is_unwritten(ex)) {
goto out; goto out;
}
ret = ext4_ext_handle_unwritten_extents( ret = ext4_ext_handle_unwritten_extents(
handle, inode, map, &path, flags, handle, inode, map, &path, flags,
@ -4364,7 +4144,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
ar.len = allocated = map->m_len; ar.len = allocated = map->m_len;
newblock = map->m_pblk; newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks; goto got_allocated_blocks;
} }
@ -4385,7 +4164,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
ar.len = allocated = map->m_len; ar.len = allocated = map->m_len;
newblock = map->m_pblk; newblock = map->m_pblk;
map_from_cluster = true;
goto got_allocated_blocks; goto got_allocated_blocks;
} }
@ -4442,7 +4220,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
goto out2; goto out2;
ext_debug("allocate new block: goal %llu, found %llu/%u\n", ext_debug("allocate new block: goal %llu, found %llu/%u\n",
ar.goal, newblock, allocated); ar.goal, newblock, allocated);
free_on_err = 1;
allocated_clusters = ar.len; allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset; ar.len = EXT4_C2B(sbi, ar.len) - offset;
if (ar.len > allocated) if (ar.len > allocated)
@ -4453,28 +4230,28 @@ got_allocated_blocks:
ext4_ext_store_pblock(&newex, newblock + offset); ext4_ext_store_pblock(&newex, newblock + offset);
newex.ee_len = cpu_to_le16(ar.len); newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */ /* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){ if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
ext4_ext_mark_unwritten(&newex); ext4_ext_mark_unwritten(&newex);
map->m_flags |= EXT4_MAP_UNWRITTEN; map->m_flags |= EXT4_MAP_UNWRITTEN;
} }
err = 0; err = ext4_ext_insert_extent(handle, inode, &path, &newex, flags);
if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) if (err) {
err = check_eofblocks_fl(handle, inode, map->m_lblk, if (allocated_clusters) {
path, ar.len); int fb_flags = 0;
if (!err)
err = ext4_ext_insert_extent(handle, inode, &path,
&newex, flags);
if (err && free_on_err) { /*
int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ? * free data blocks we just allocated.
EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0; * not a good idea to call discard here directly,
/* free data blocks we just allocated */ * but otherwise we'd need to call it every free().
/* not a good idea to call discard here directly, */
* but otherwise we'd need to call it every free() */ ext4_discard_preallocations(inode);
ext4_discard_preallocations(inode); if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
ext4_free_blocks(handle, inode, NULL, newblock, fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE;
EXT4_C2B(sbi, allocated_clusters), fb_flags); ext4_free_blocks(handle, inode, NULL, newblock,
EXT4_C2B(sbi, allocated_clusters),
fb_flags);
}
goto out2; goto out2;
} }
@ -4491,7 +4268,7 @@ got_allocated_blocks:
* clusters discovered to be delayed allocated. Once allocated, a * clusters discovered to be delayed allocated. Once allocated, a
* cluster is not included in the reserved count. * cluster is not included in the reserved count.
*/ */
if (test_opt(inode->i_sb, DELALLOC) && !map_from_cluster) { if (test_opt(inode->i_sb, DELALLOC) && allocated_clusters) {
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
/* /*
* When allocating delayed allocated clusters, simply * When allocating delayed allocated clusters, simply
@ -4645,10 +4422,6 @@ retry:
epos = new_size; epos = new_size;
if (ext4_update_inode_size(inode, epos) & 0x1) if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime; inode->i_mtime = inode->i_ctime;
} else {
if (epos > inode->i_size)
ext4_set_inode_flag(inode,
EXT4_INODE_EOFBLOCKS);
} }
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1); ext4_update_inode_fsync_trans(handle, inode, 1);
@ -4802,16 +4575,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
} }
inode->i_mtime = inode->i_ctime = current_time(inode); inode->i_mtime = inode->i_ctime = current_time(inode);
if (new_size) { if (new_size)
ext4_update_inode_size(inode, new_size); ext4_update_inode_size(inode, new_size);
} else {
/*
* Mark that we allocate beyond EOF so the subsequent truncate
* can proceed even if the new size is the same as i_size.
*/
if (offset + len > inode->i_size)
ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
}
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
/* Zero out partial block at the edges of the range */ /* Zero out partial block at the edges of the range */
@ -5009,64 +4774,13 @@ int ext4_convert_unwritten_io_end_vec(handle_t *handle, ext4_io_end_t *io_end)
return ret < 0 ? ret : err; return ret < 0 ? ret : err;
} }
/* static int ext4_iomap_xattr_fiemap(struct inode *inode, struct iomap *iomap)
* If newes is not existing extent (newes->ec_pblk equals zero) find
* delayed extent at start of newes and update newes accordingly and
* return start of the next delayed extent.
*
* If newes is existing extent (newes->ec_pblk is not equal zero)
* return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
* extent found. Leave newes unmodified.
*/
static int ext4_find_delayed_extent(struct inode *inode,
struct extent_status *newes)
{
struct extent_status es;
ext4_lblk_t block, next_del;
if (newes->es_pblk == 0) {
ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
newes->es_lblk,
newes->es_lblk + newes->es_len - 1,
&es);
/*
* No extent in extent-tree contains block @newes->es_pblk,
* then the block may stay in 1)a hole or 2)delayed-extent.
*/
if (es.es_len == 0)
/* A hole found. */
return 0;
if (es.es_lblk > newes->es_lblk) {
/* A hole found. */
newes->es_len = min(es.es_lblk - newes->es_lblk,
newes->es_len);
return 0;
}
newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
}
block = newes->es_lblk + newes->es_len;
ext4_es_find_extent_range(inode, &ext4_es_is_delayed, block,
EXT_MAX_BLOCKS, &es);
if (es.es_len == 0)
next_del = EXT_MAX_BLOCKS;
else
next_del = es.es_lblk;
return next_del;
}
static int ext4_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{ {
__u64 physical = 0; __u64 physical = 0;
__u64 length; __u64 length = 0;
__u32 flags = FIEMAP_EXTENT_LAST;
int blockbits = inode->i_sb->s_blocksize_bits; int blockbits = inode->i_sb->s_blocksize_bits;
int error = 0; int error = 0;
u16 iomap_type;
/* in-inode? */ /* in-inode? */
if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
@ -5081,41 +4795,50 @@ static int ext4_xattr_fiemap(struct inode *inode,
EXT4_I(inode)->i_extra_isize; EXT4_I(inode)->i_extra_isize;
physical += offset; physical += offset;
length = EXT4_SB(inode->i_sb)->s_inode_size - offset; length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
flags |= FIEMAP_EXTENT_DATA_INLINE;
brelse(iloc.bh); brelse(iloc.bh);
} else { /* external block */ iomap_type = IOMAP_INLINE;
} else if (EXT4_I(inode)->i_file_acl) { /* external block */
physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
length = inode->i_sb->s_blocksize; length = inode->i_sb->s_blocksize;
iomap_type = IOMAP_MAPPED;
} else {
/* no in-inode or external block for xattr, so return -ENOENT */
error = -ENOENT;
goto out;
} }
if (physical) iomap->addr = physical;
error = fiemap_fill_next_extent(fieinfo, 0, physical, iomap->offset = 0;
length, flags); iomap->length = length;
return (error < 0 ? error : 0); iomap->type = iomap_type;
iomap->flags = 0;
out:
return error;
} }
static int _ext4_fiemap(struct inode *inode, static int ext4_iomap_xattr_begin(struct inode *inode, loff_t offset,
struct fiemap_extent_info *fieinfo, loff_t length, unsigned flags,
__u64 start, __u64 len, struct iomap *iomap, struct iomap *srcmap)
int (*fill)(struct inode *, ext4_lblk_t, {
ext4_lblk_t, int error;
struct fiemap_extent_info *))
error = ext4_iomap_xattr_fiemap(inode, iomap);
if (error == 0 && (offset >= iomap->length))
error = -ENOENT;
return error;
}
static const struct iomap_ops ext4_iomap_xattr_ops = {
.iomap_begin = ext4_iomap_xattr_begin,
};
static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, bool from_es_cache)
{ {
ext4_lblk_t start_blk; ext4_lblk_t start_blk;
u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR; u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR;
int error = 0; int error = 0;
if (ext4_has_inline_data(inode)) {
int has_inline = 1;
error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
start, len);
if (has_inline)
return error;
}
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) { if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
error = ext4_ext_precache(inode); error = ext4_ext_precache(inode);
if (error) if (error)
@ -5123,19 +4846,19 @@ static int _ext4_fiemap(struct inode *inode,
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE; fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
} }
/* fallback to generic here if not in extents fmt */ if (from_es_cache)
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
fill == ext4_fill_fiemap_extents)
return generic_block_fiemap(inode, fieinfo, start, len,
ext4_get_block);
if (fill == ext4_fill_es_cache_info)
ext4_fiemap_flags &= FIEMAP_FLAG_XATTR; ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
if (fiemap_check_flags(fieinfo, ext4_fiemap_flags)) if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
return -EBADR; return -EBADR;
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
error = ext4_xattr_fiemap(inode, fieinfo); fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
error = iomap_fiemap(inode, fieinfo, start, len,
&ext4_iomap_xattr_ops);
} else if (!from_es_cache) {
error = iomap_fiemap(inode, fieinfo, start, len,
&ext4_iomap_report_ops);
} else { } else {
ext4_lblk_t len_blks; ext4_lblk_t len_blks;
__u64 last_blk; __u64 last_blk;
@ -5150,7 +4873,8 @@ static int _ext4_fiemap(struct inode *inode,
* Walk the extent tree gathering extent information * Walk the extent tree gathering extent information
* and pushing extents back to the user. * and pushing extents back to the user.
*/ */
error = fill(inode, start_blk, len_blks, fieinfo); error = ext4_fill_es_cache_info(inode, start_blk, len_blks,
fieinfo);
} }
return error; return error;
} }
@ -5158,8 +4882,7 @@ static int _ext4_fiemap(struct inode *inode,
int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len) __u64 start, __u64 len)
{ {
return _ext4_fiemap(inode, fieinfo, start, len, return _ext4_fiemap(inode, fieinfo, start, len, false);
ext4_fill_fiemap_extents);
} }
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo, int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
@ -5175,8 +4898,7 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
return 0; return 0;
} }
return _ext4_fiemap(inode, fieinfo, start, len, return _ext4_fiemap(inode, fieinfo, start, len, true);
ext4_fill_es_cache_info);
} }

View File

@ -872,6 +872,7 @@ const struct file_operations ext4_file_operations = {
.llseek = ext4_llseek, .llseek = ext4_llseek,
.read_iter = ext4_file_read_iter, .read_iter = ext4_file_read_iter,
.write_iter = ext4_file_write_iter, .write_iter = ext4_file_write_iter,
.iopoll = iomap_dio_iopoll,
.unlocked_ioctl = ext4_ioctl, .unlocked_ioctl = ext4_ioctl,
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.compat_ioctl = ext4_compat_ioctl, .compat_ioctl = ext4_compat_ioctl,

View File

@ -196,10 +196,9 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO); ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
put_bh(bh); put_bh(bh);
ext4_set_errno(sb, EIO); ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
ext4_error(sb, "Cannot read inode bitmap - " "block_group = %u, inode_bitmap = %llu",
"block_group = %u, inode_bitmap = %llu", block_group, bitmap_blk);
block_group, bitmap_blk);
ext4_mark_group_bitmap_corrupted(sb, block_group, ext4_mark_group_bitmap_corrupted(sb, block_group,
EXT4_GROUP_INFO_IBITMAP_CORRUPT); EXT4_GROUP_INFO_IBITMAP_CORRUPT);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
@ -712,21 +711,34 @@ out:
static int find_inode_bit(struct super_block *sb, ext4_group_t group, static int find_inode_bit(struct super_block *sb, ext4_group_t group,
struct buffer_head *bitmap, unsigned long *ino) struct buffer_head *bitmap, unsigned long *ino)
{ {
bool check_recently_deleted = EXT4_SB(sb)->s_journal == NULL;
unsigned long recently_deleted_ino = EXT4_INODES_PER_GROUP(sb);
next: next:
*ino = ext4_find_next_zero_bit((unsigned long *) *ino = ext4_find_next_zero_bit((unsigned long *)
bitmap->b_data, bitmap->b_data,
EXT4_INODES_PER_GROUP(sb), *ino); EXT4_INODES_PER_GROUP(sb), *ino);
if (*ino >= EXT4_INODES_PER_GROUP(sb)) if (*ino >= EXT4_INODES_PER_GROUP(sb))
return 0; goto not_found;
if ((EXT4_SB(sb)->s_journal == NULL) && if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
recently_deleted(sb, group, *ino)) { recently_deleted_ino = *ino;
*ino = *ino + 1; *ino = *ino + 1;
if (*ino < EXT4_INODES_PER_GROUP(sb)) if (*ino < EXT4_INODES_PER_GROUP(sb))
goto next; goto next;
return 0; goto not_found;
} }
return 1;
not_found:
if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb))
return 0;
/*
* Not reusing recently deleted inodes is mostly a preference. We don't
* want to report ENOSPC or skew allocation patterns because of that.
* So return even recently deleted inode if we could find better in the
* given range.
*/
*ino = recently_deleted_ino;
return 1; return 1;
} }
@ -1231,9 +1243,9 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
err = PTR_ERR(inode); err = PTR_ERR(inode);
ext4_set_errno(sb, -err); ext4_error_err(sb, -err,
ext4_error(sb, "couldn't read orphan inode %lu (err %d)", "couldn't read orphan inode %lu (err %d)",
ino, err); ino, err);
return inode; return inode;
} }

View File

@ -1019,7 +1019,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
* (should be rare). * (should be rare).
*/ */
if (!bh) { if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, nr, ext4_error_inode_block(inode, nr, EIO,
"Read failure"); "Read failure");
continue; continue;
} }

View File

@ -98,10 +98,9 @@ int ext4_get_max_inline_size(struct inode *inode)
error = ext4_get_inode_loc(inode, &iloc); error = ext4_get_inode_loc(inode, &iloc);
if (error) { if (error) {
ext4_set_errno(inode->i_sb, -error); ext4_error_inode_err(inode, __func__, __LINE__, 0, -error,
ext4_error_inode(inode, __func__, __LINE__, 0, "can't get inode location %lu",
"can't get inode location %lu", inode->i_ino);
inode->i_ino);
return 0; return 0;
} }
@ -1762,9 +1761,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
err = ext4_get_inode_loc(dir, &iloc); err = ext4_get_inode_loc(dir, &iloc);
if (err) { if (err) {
ext4_set_errno(dir->i_sb, -err); EXT4_ERROR_INODE_ERR(dir, -err,
EXT4_ERROR_INODE(dir, "error %d getting inode %lu block", "error %d getting inode %lu block",
err, dir->i_ino); err, dir->i_ino);
return true; return true;
} }
@ -1857,47 +1856,6 @@ out:
return error; return error;
} }
int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len)
{
__u64 physical = 0;
__u64 inline_len;
__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
FIEMAP_EXTENT_LAST;
int error = 0;
struct ext4_iloc iloc;
down_read(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
*has_inline = 0;
goto out;
}
inline_len = min_t(size_t, ext4_get_inline_size(inode),
i_size_read(inode));
if (start >= inline_len)
goto out;
if (start + len < inline_len)
inline_len = start + len;
inline_len -= start;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
goto out;
physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
if (physical)
error = fiemap_fill_next_extent(fieinfo, start, physical,
inline_len, flags);
return (error < 0 ? error : 0);
}
int ext4_inline_data_truncate(struct inode *inode, int *has_inline) int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{ {
handle_t *handle; handle_t *handle;

View File

@ -269,10 +269,9 @@ void ext4_evict_inode(struct inode *inode)
if (inode->i_blocks) { if (inode->i_blocks) {
err = ext4_truncate(inode); err = ext4_truncate(inode);
if (err) { if (err) {
ext4_set_errno(inode->i_sb, -err); ext4_error_err(inode->i_sb, -err,
ext4_error(inode->i_sb, "couldn't truncate inode %lu (err %d)",
"couldn't truncate inode %lu (err %d)", inode->i_ino, err);
inode->i_ino, err);
goto stop_handle; goto stop_handle;
} }
} }
@ -2478,10 +2477,9 @@ update_disksize:
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
err2 = ext4_mark_inode_dirty(handle, inode); err2 = ext4_mark_inode_dirty(handle, inode);
if (err2) { if (err2) {
ext4_set_errno(inode->i_sb, -err2); ext4_error_err(inode->i_sb, -err2,
ext4_error(inode->i_sb, "Failed to mark inode %lu dirty",
"Failed to mark inode %lu dirty", inode->i_ino);
inode->i_ino);
} }
if (!err) if (!err)
err = err2; err = err2;
@ -3212,7 +3210,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
return 0; return 0;
} }
return generic_block_bmap(mapping, block, ext4_get_block); return iomap_bmap(mapping, block, &ext4_iomap_ops);
} }
static int ext4_readpage(struct file *file, struct page *page) static int ext4_readpage(struct file *file, struct page *page)
@ -3333,6 +3331,10 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
iomap->offset = (u64) map->m_lblk << blkbits; iomap->offset = (u64) map->m_lblk << blkbits;
iomap->length = (u64) map->m_len << blkbits; iomap->length = (u64) map->m_len << blkbits;
if ((map->m_flags & EXT4_MAP_MAPPED) &&
!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
iomap->flags |= IOMAP_F_MERGED;
/* /*
* Flags passed to ext4_map_blocks() for direct I/O writes can result * Flags passed to ext4_map_blocks() for direct I/O writes can result
* in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
@ -3542,12 +3544,28 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits, map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1; EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
/*
* Fiemap callers may call for offset beyond s_bitmap_maxbytes.
* So handle it here itself instead of querying ext4_map_blocks().
* Since ext4_map_blocks() will warn about it and will return
* -EIO error.
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
if (offset >= sbi->s_bitmap_maxbytes) {
map.m_flags = 0;
goto set_iomap;
}
}
ret = ext4_map_blocks(NULL, inode, &map, 0); ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (ret == 0) if (ret == 0)
delalloc = ext4_iomap_is_delalloc(inode, &map); delalloc = ext4_iomap_is_delalloc(inode, &map);
set_iomap:
ext4_set_iomap(inode, iomap, &map, offset, length); ext4_set_iomap(inode, iomap, &map, offset, length);
if (delalloc && iomap->type == IOMAP_HOLE) if (delalloc && iomap->type == IOMAP_HOLE)
iomap->type = IOMAP_DELALLOC; iomap->type = IOMAP_DELALLOC;
@ -4144,8 +4162,6 @@ int ext4_truncate(struct inode *inode)
if (!ext4_can_truncate(inode)) if (!ext4_can_truncate(inode))
return 0; return 0;
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
@ -4364,8 +4380,7 @@ make_io:
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
simulate_eio: simulate_eio:
ext4_set_errno(inode->i_sb, EIO); ext4_error_inode_block(inode, block, EIO,
EXT4_ERROR_INODE_BLOCK(inode, block,
"unable to read itable block"); "unable to read itable block");
brelse(bh); brelse(bh);
return -EIO; return -EIO;
@ -4517,7 +4532,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
(ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) { (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
if (flags & EXT4_IGET_HANDLE) if (flags & EXT4_IGET_HANDLE)
return ERR_PTR(-ESTALE); return ERR_PTR(-ESTALE);
__ext4_error(sb, function, line, __ext4_error(sb, function, line, EFSCORRUPTED, 0,
"inode #%lu: comm %s: iget: illegal inode #", "inode #%lu: comm %s: iget: illegal inode #",
ino, current->comm); ino, current->comm);
return ERR_PTR(-EFSCORRUPTED); return ERR_PTR(-EFSCORRUPTED);
@ -4580,9 +4595,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
if (!ext4_inode_csum_verify(inode, raw_inode, ei) || if (!ext4_inode_csum_verify(inode, raw_inode, ei) ||
ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) { ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) {
ext4_set_errno(inode->i_sb, EFSBADCRC); ext4_error_inode_err(inode, function, line, 0, EFSBADCRC,
ext4_error_inode(inode, function, line, 0, "iget: checksum invalid");
"iget: checksum invalid");
ret = -EFSBADCRC; ret = -EFSBADCRC;
goto bad_inode; goto bad_inode;
} }
@ -4812,7 +4826,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode_info *ei) struct ext4_inode_info *ei)
{ {
struct inode *inode = &(ei->vfs_inode); struct inode *inode = &(ei->vfs_inode);
u64 i_blocks = inode->i_blocks; u64 i_blocks = READ_ONCE(inode->i_blocks);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
if (i_blocks <= ~0U) { if (i_blocks <= ~0U) {
@ -4982,7 +4996,7 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high = raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32); cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl); raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) { if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize); ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1; need_datasync = 1;
} }
@ -5131,9 +5145,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
sync_dirty_buffer(iloc.bh); sync_dirty_buffer(iloc.bh);
if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) { if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
ext4_set_errno(inode->i_sb, EIO); ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr, "IO error syncing inode");
"IO error syncing inode");
err = -EIO; err = -EIO;
} }
brelse(iloc.bh); brelse(iloc.bh);

View File

@ -327,18 +327,6 @@ static int ext4_ioctl_setflags(struct inode *inode,
if ((flags ^ oldflags) & EXT4_EXTENTS_FL) if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
migrate = 1; migrate = 1;
if (flags & EXT4_EOFBLOCKS_FL) {
/* we don't support adding EOFBLOCKS flag */
if (!(oldflags & EXT4_EOFBLOCKS_FL)) {
err = -EOPNOTSUPP;
goto flags_out;
}
} else if (oldflags & EXT4_EOFBLOCKS_FL) {
err = ext4_truncate(inode);
if (err)
goto flags_out;
}
if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) { if ((flags ^ oldflags) & EXT4_CASEFOLD_FL) {
if (!ext4_has_feature_casefold(sb)) { if (!ext4_has_feature_casefold(sb)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;

View File

@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
BUG_ON(buddy == NULL); BUG_ON(buddy == NULL);
k = mb_find_next_zero_bit(buddy, max, 0); k = mb_find_next_zero_bit(buddy, max, 0);
BUG_ON(k >= max); if (k >= max) {
ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
"%d free clusters of order %d. But found 0",
grp->bb_counters[i], i);
ext4_mark_group_bitmap_corrupted(ac->ac_sb,
e4b->bd_group,
EXT4_GROUP_INFO_BBITMAP_CORRUPT);
break;
}
ac->ac_found++; ac->ac_found++;
ac->ac_b_ex.fe_len = 1 << i; ac->ac_b_ex.fe_len = 1 << i;
@ -3914,9 +3921,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
bitmap_bh = ext4_read_block_bitmap(sb, group); bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) { if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh); err = PTR_ERR(bitmap_bh);
ext4_set_errno(sb, -err); ext4_error_err(sb, -err,
ext4_error(sb, "Error %d reading block bitmap for %u", "Error %d reading block bitmap for %u",
err, group); err, group);
return 0; return 0;
} }
@ -4083,18 +4090,16 @@ repeat:
err = ext4_mb_load_buddy_gfp(sb, group, &e4b, err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL); GFP_NOFS|__GFP_NOFAIL);
if (err) { if (err) {
ext4_set_errno(sb, -err); ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
ext4_error(sb, "Error %d loading buddy information for %u", err, group);
err, group);
continue; continue;
} }
bitmap_bh = ext4_read_block_bitmap(sb, group); bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) { if (IS_ERR(bitmap_bh)) {
err = PTR_ERR(bitmap_bh); err = PTR_ERR(bitmap_bh);
ext4_set_errno(sb, -err); ext4_error_err(sb, -err, "Error %d reading block bitmap for %u",
ext4_error(sb, "Error %d reading block bitmap for %u", err, group);
err, group);
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
continue; continue;
} }
@ -4302,7 +4307,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
spin_lock(&lg->lg_prealloc_lock); spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
pa_inode_list) { pa_inode_list,
lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&pa->pa_lock); spin_lock(&pa->pa_lock);
if (atomic_read(&pa->pa_count)) { if (atomic_read(&pa->pa_count)) {
/* /*
@ -4347,9 +4353,8 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
err = ext4_mb_load_buddy_gfp(sb, group, &e4b, err = ext4_mb_load_buddy_gfp(sb, group, &e4b,
GFP_NOFS|__GFP_NOFAIL); GFP_NOFS|__GFP_NOFAIL);
if (err) { if (err) {
ext4_set_errno(sb, -err); ext4_error_err(sb, -err, "Error %d loading buddy information for %u",
ext4_error(sb, "Error %d loading buddy information for %u", err, group);
err, group);
continue; continue;
} }
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
@ -4386,7 +4391,8 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
/* Add the prealloc space to lg */ /* Add the prealloc space to lg */
spin_lock(&lg->lg_prealloc_lock); spin_lock(&lg->lg_prealloc_lock);
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
pa_inode_list) { pa_inode_list,
lockdep_is_held(&lg->lg_prealloc_lock)) {
spin_lock(&tmp_pa->pa_lock); spin_lock(&tmp_pa->pa_lock);
if (tmp_pa->pa_deleted) { if (tmp_pa->pa_deleted) {
spin_unlock(&tmp_pa->pa_lock); spin_unlock(&tmp_pa->pa_lock);

View File

@ -175,8 +175,8 @@ static int kmmpd(void *data)
*/ */
if (retval) { if (retval) {
if ((failed_writes % 60) == 0) { if ((failed_writes % 60) == 0) {
ext4_set_errno(sb, -retval); ext4_error_err(sb, -retval,
ext4_error(sb, "Error writing to MMP block"); "Error writing to MMP block");
} }
failed_writes++; failed_writes++;
} }
@ -208,9 +208,9 @@ static int kmmpd(void *data)
retval = read_mmp_block(sb, &bh_check, mmp_block); retval = read_mmp_block(sb, &bh_check, mmp_block);
if (retval) { if (retval) {
ext4_set_errno(sb, -retval); ext4_error_err(sb, -retval,
ext4_error(sb, "error reading MMP data: %d", "error reading MMP data: %d",
retval); retval);
goto exit_thread; goto exit_thread;
} }
@ -222,8 +222,7 @@ static int kmmpd(void *data)
"Error while updating MMP info. " "Error while updating MMP info. "
"The filesystem seems to have been" "The filesystem seems to have been"
" multiply mounted."); " multiply mounted.");
ext4_set_errno(sb, EBUSY); ext4_error_err(sb, EBUSY, "abort");
ext4_error(sb, "abort");
put_bh(bh_check); put_bh(bh_check);
retval = -EBUSY; retval = -EBUSY;
goto exit_thread; goto exit_thread;

View File

@ -422,8 +422,8 @@ repair_branches:
block_len_in_page, 0, &err2); block_len_in_page, 0, &err2);
ext4_double_up_write_data_sem(orig_inode, donor_inode); ext4_double_up_write_data_sem(orig_inode, donor_inode);
if (replaced_count != block_len_in_page) { if (replaced_count != block_len_in_page) {
EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset), ext4_error_inode_block(orig_inode, (sector_t)(orig_blk_offset),
"Unable to copy data block," EIO, "Unable to copy data block,"
" data will be lost."); " data will be lost.");
*err = -EIO; *err = -EIO;
} }

View File

@ -160,9 +160,9 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
!ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC)) !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh); set_buffer_verified(bh);
else { else {
ext4_set_errno(inode->i_sb, EFSBADCRC); ext4_error_inode_err(inode, func, line, block,
ext4_error_inode(inode, func, line, block, EFSBADCRC,
"Directory index failed checksum"); "Directory index failed checksum");
brelse(bh); brelse(bh);
return ERR_PTR(-EFSBADCRC); return ERR_PTR(-EFSBADCRC);
} }
@ -172,9 +172,9 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
!ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC)) !ext4_simulate_fail(inode->i_sb, EXT4_SIM_DIRBLOCK_CRC))
set_buffer_verified(bh); set_buffer_verified(bh);
else { else {
ext4_set_errno(inode->i_sb, EFSBADCRC); ext4_error_inode_err(inode, func, line, block,
ext4_error_inode(inode, func, line, block, EFSBADCRC,
"Directory block failed checksum"); "Directory block failed checksum");
brelse(bh); brelse(bh);
return ERR_PTR(-EFSBADCRC); return ERR_PTR(-EFSBADCRC);
} }
@ -233,13 +233,13 @@ struct dx_root
u8 unused_flags; u8 unused_flags;
} }
info; info;
struct dx_entry entries[0]; struct dx_entry entries[];
}; };
struct dx_node struct dx_node
{ {
struct fake_dirent fake; struct fake_dirent fake;
struct dx_entry entries[0]; struct dx_entry entries[];
}; };
@ -1532,9 +1532,9 @@ restart:
goto next; goto next;
wait_on_buffer(bh); wait_on_buffer(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
ext4_set_errno(sb, EIO); EXT4_ERROR_INODE_ERR(dir, EIO,
EXT4_ERROR_INODE(dir, "reading directory lblock %lu", "reading directory lblock %lu",
(unsigned long) block); (unsigned long) block);
brelse(bh); brelse(bh);
ret = ERR_PTR(-EIO); ret = ERR_PTR(-EIO);
goto cleanup_and_exit; goto cleanup_and_exit;
@ -1543,9 +1543,9 @@ restart:
!is_dx_internal_node(dir, block, !is_dx_internal_node(dir, block,
(struct ext4_dir_entry *)bh->b_data) && (struct ext4_dir_entry *)bh->b_data) &&
!ext4_dirblock_csum_verify(dir, bh)) { !ext4_dirblock_csum_verify(dir, bh)) {
ext4_set_errno(sb, EFSBADCRC); EXT4_ERROR_INODE_ERR(dir, EFSBADCRC,
EXT4_ERROR_INODE(dir, "checksumming directory " "checksumming directory "
"block %lu", (unsigned long)block); "block %lu", (unsigned long)block);
brelse(bh); brelse(bh);
ret = ERR_PTR(-EFSBADCRC); ret = ERR_PTR(-EFSBADCRC);
goto cleanup_and_exit; goto cleanup_and_exit;

View File

@ -335,10 +335,12 @@ static time64_t __ext4_get_tstamp(__le32 *lo, __u8 *hi)
#define ext4_get_tstamp(es, tstamp) \ #define ext4_get_tstamp(es, tstamp) \
__ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi) __ext4_get_tstamp(&(es)->tstamp, &(es)->tstamp ## _hi)
static void __save_error_info(struct super_block *sb, const char *func, static void __save_error_info(struct super_block *sb, int error,
unsigned int line) __u32 ino, __u64 block,
const char *func, unsigned int line)
{ {
struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct ext4_super_block *es = EXT4_SB(sb)->s_es;
int err;
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS; EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
if (bdev_read_only(sb->s_bdev)) if (bdev_read_only(sb->s_bdev))
@ -347,8 +349,62 @@ static void __save_error_info(struct super_block *sb, const char *func,
ext4_update_tstamp(es, s_last_error_time); ext4_update_tstamp(es, s_last_error_time);
strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func)); strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
es->s_last_error_line = cpu_to_le32(line); es->s_last_error_line = cpu_to_le32(line);
if (es->s_last_error_errcode == 0) es->s_last_error_ino = cpu_to_le32(ino);
es->s_last_error_errcode = EXT4_ERR_EFSCORRUPTED; es->s_last_error_block = cpu_to_le64(block);
switch (error) {
case EIO:
err = EXT4_ERR_EIO;
break;
case ENOMEM:
err = EXT4_ERR_ENOMEM;
break;
case EFSBADCRC:
err = EXT4_ERR_EFSBADCRC;
break;
case 0:
case EFSCORRUPTED:
err = EXT4_ERR_EFSCORRUPTED;
break;
case ENOSPC:
err = EXT4_ERR_ENOSPC;
break;
case ENOKEY:
err = EXT4_ERR_ENOKEY;
break;
case EROFS:
err = EXT4_ERR_EROFS;
break;
case EFBIG:
err = EXT4_ERR_EFBIG;
break;
case EEXIST:
err = EXT4_ERR_EEXIST;
break;
case ERANGE:
err = EXT4_ERR_ERANGE;
break;
case EOVERFLOW:
err = EXT4_ERR_EOVERFLOW;
break;
case EBUSY:
err = EXT4_ERR_EBUSY;
break;
case ENOTDIR:
err = EXT4_ERR_ENOTDIR;
break;
case ENOTEMPTY:
err = EXT4_ERR_ENOTEMPTY;
break;
case ESHUTDOWN:
err = EXT4_ERR_ESHUTDOWN;
break;
case EFAULT:
err = EXT4_ERR_EFAULT;
break;
default:
err = EXT4_ERR_UNKNOWN;
}
es->s_last_error_errcode = err;
if (!es->s_first_error_time) { if (!es->s_first_error_time) {
es->s_first_error_time = es->s_last_error_time; es->s_first_error_time = es->s_last_error_time;
es->s_first_error_time_hi = es->s_last_error_time_hi; es->s_first_error_time_hi = es->s_last_error_time_hi;
@ -368,11 +424,13 @@ static void __save_error_info(struct super_block *sb, const char *func,
le32_add_cpu(&es->s_error_count, 1); le32_add_cpu(&es->s_error_count, 1);
} }
static void save_error_info(struct super_block *sb, const char *func, static void save_error_info(struct super_block *sb, int error,
unsigned int line) __u32 ino, __u64 block,
const char *func, unsigned int line)
{ {
__save_error_info(sb, func, line); __save_error_info(sb, error, ino, block, func, line);
ext4_commit_super(sb, 1); if (!bdev_read_only(sb->s_bdev))
ext4_commit_super(sb, 1);
} }
/* /*
@ -477,7 +535,8 @@ static void ext4_handle_error(struct super_block *sb)
"EXT4-fs error") "EXT4-fs error")
void __ext4_error(struct super_block *sb, const char *function, void __ext4_error(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...) unsigned int line, int error, __u64 block,
const char *fmt, ...)
{ {
struct va_format vaf; struct va_format vaf;
va_list args; va_list args;
@ -495,24 +554,21 @@ void __ext4_error(struct super_block *sb, const char *function,
sb->s_id, function, line, current->comm, &vaf); sb->s_id, function, line, current->comm, &vaf);
va_end(args); va_end(args);
} }
save_error_info(sb, function, line); save_error_info(sb, error, 0, block, function, line);
ext4_handle_error(sb); ext4_handle_error(sb);
} }
void __ext4_error_inode(struct inode *inode, const char *function, void __ext4_error_inode(struct inode *inode, const char *function,
unsigned int line, ext4_fsblk_t block, unsigned int line, ext4_fsblk_t block, int error,
const char *fmt, ...) const char *fmt, ...)
{ {
va_list args; va_list args;
struct va_format vaf; struct va_format vaf;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return; return;
trace_ext4_error(inode->i_sb, function, line); trace_ext4_error(inode->i_sb, function, line);
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
es->s_last_error_block = cpu_to_le64(block);
if (ext4_error_ratelimit(inode->i_sb)) { if (ext4_error_ratelimit(inode->i_sb)) {
va_start(args, fmt); va_start(args, fmt);
vaf.fmt = fmt; vaf.fmt = fmt;
@ -529,7 +585,8 @@ void __ext4_error_inode(struct inode *inode, const char *function,
current->comm, &vaf); current->comm, &vaf);
va_end(args); va_end(args);
} }
save_error_info(inode->i_sb, function, line); save_error_info(inode->i_sb, error, inode->i_ino, block,
function, line);
ext4_handle_error(inode->i_sb); ext4_handle_error(inode->i_sb);
} }
@ -548,7 +605,6 @@ void __ext4_error_file(struct file *file, const char *function,
trace_ext4_error(inode->i_sb, function, line); trace_ext4_error(inode->i_sb, function, line);
es = EXT4_SB(inode->i_sb)->s_es; es = EXT4_SB(inode->i_sb)->s_es;
es->s_last_error_ino = cpu_to_le32(inode->i_ino);
if (ext4_error_ratelimit(inode->i_sb)) { if (ext4_error_ratelimit(inode->i_sb)) {
path = file_path(file, pathname, sizeof(pathname)); path = file_path(file, pathname, sizeof(pathname));
if (IS_ERR(path)) if (IS_ERR(path))
@ -570,7 +626,8 @@ void __ext4_error_file(struct file *file, const char *function,
current->comm, path, &vaf); current->comm, path, &vaf);
va_end(args); va_end(args);
} }
save_error_info(inode->i_sb, function, line); save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
function, line);
ext4_handle_error(inode->i_sb); ext4_handle_error(inode->i_sb);
} }
@ -614,66 +671,6 @@ const char *ext4_decode_error(struct super_block *sb, int errno,
return errstr; return errstr;
} }
void ext4_set_errno(struct super_block *sb, int err)
{
if (err < 0)
err = -err;
switch (err) {
case EIO:
err = EXT4_ERR_EIO;
break;
case ENOMEM:
err = EXT4_ERR_ENOMEM;
break;
case EFSBADCRC:
err = EXT4_ERR_EFSBADCRC;
break;
case EFSCORRUPTED:
err = EXT4_ERR_EFSCORRUPTED;
break;
case ENOSPC:
err = EXT4_ERR_ENOSPC;
break;
case ENOKEY:
err = EXT4_ERR_ENOKEY;
break;
case EROFS:
err = EXT4_ERR_EROFS;
break;
case EFBIG:
err = EXT4_ERR_EFBIG;
break;
case EEXIST:
err = EXT4_ERR_EEXIST;
break;
case ERANGE:
err = EXT4_ERR_ERANGE;
break;
case EOVERFLOW:
err = EXT4_ERR_EOVERFLOW;
break;
case EBUSY:
err = EXT4_ERR_EBUSY;
break;
case ENOTDIR:
err = EXT4_ERR_ENOTDIR;
break;
case ENOTEMPTY:
err = EXT4_ERR_ENOTEMPTY;
break;
case ESHUTDOWN:
err = EXT4_ERR_ESHUTDOWN;
break;
case EFAULT:
err = EXT4_ERR_EFAULT;
break;
default:
err = EXT4_ERR_UNKNOWN;
}
EXT4_SB(sb)->s_es->s_last_error_errcode = err;
}
/* __ext4_std_error decodes expected errors from journaling functions /* __ext4_std_error decodes expected errors from journaling functions
* automatically and invokes the appropriate error response. */ * automatically and invokes the appropriate error response. */
@ -698,8 +695,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
sb->s_id, function, line, errstr); sb->s_id, function, line, errstr);
} }
ext4_set_errno(sb, -errno); save_error_info(sb, -errno, 0, 0, function, line);
save_error_info(sb, function, line);
ext4_handle_error(sb); ext4_handle_error(sb);
} }
@ -714,7 +710,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
*/ */
void __ext4_abort(struct super_block *sb, const char *function, void __ext4_abort(struct super_block *sb, const char *function,
unsigned int line, const char *fmt, ...) unsigned int line, int error, const char *fmt, ...)
{ {
struct va_format vaf; struct va_format vaf;
va_list args; va_list args;
@ -722,7 +718,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
return; return;
save_error_info(sb, function, line); save_error_info(sb, error, 0, 0, function, line);
va_start(args, fmt); va_start(args, fmt);
vaf.fmt = fmt; vaf.fmt = fmt;
vaf.va = &args; vaf.va = &args;
@ -741,7 +737,6 @@ void __ext4_abort(struct super_block *sb, const char *function,
sb->s_flags |= SB_RDONLY; sb->s_flags |= SB_RDONLY;
if (EXT4_SB(sb)->s_journal) if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO); jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
} }
if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) { if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
if (EXT4_SB(sb)->s_journal && if (EXT4_SB(sb)->s_journal &&
@ -815,15 +810,12 @@ __acquires(bitlock)
{ {
struct va_format vaf; struct va_format vaf;
va_list args; va_list args;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
return; return;
trace_ext4_error(sb, function, line); trace_ext4_error(sb, function, line);
es->s_last_error_ino = cpu_to_le32(ino); __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
es->s_last_error_block = cpu_to_le64(block);
__save_error_info(sb, function, line);
if (ext4_error_ratelimit(sb)) { if (ext4_error_ratelimit(sb)) {
va_start(args, fmt); va_start(args, fmt);
@ -1024,17 +1016,22 @@ static void ext4_put_super(struct super_block *sb)
destroy_workqueue(sbi->rsv_conversion_wq); destroy_workqueue(sbi->rsv_conversion_wq);
/*
* Unregister sysfs before destroying jbd2 journal.
* Since we could still access attr_journal_task attribute via sysfs
* path which could have sbi->s_journal->j_task as NULL
*/
ext4_unregister_sysfs(sb);
if (sbi->s_journal) { if (sbi->s_journal) {
aborted = is_journal_aborted(sbi->s_journal); aborted = is_journal_aborted(sbi->s_journal);
err = jbd2_journal_destroy(sbi->s_journal); err = jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL; sbi->s_journal = NULL;
if ((err < 0) && !aborted) { if ((err < 0) && !aborted) {
ext4_set_errno(sb, -err); ext4_abort(sb, -err, "Couldn't clean up the journal");
ext4_abort(sb, "Couldn't clean up the journal");
} }
} }
ext4_unregister_sysfs(sb);
ext4_es_unregister_shrinker(sbi); ext4_es_unregister_shrinker(sbi);
del_timer_sync(&sbi->s_err_report); del_timer_sync(&sbi->s_err_report);
ext4_release_system_zone(sb); ext4_release_system_zone(sb);
@ -2180,6 +2177,14 @@ static int parse_options(char *options, struct super_block *sb,
} }
} }
#endif #endif
if (test_opt(sb, DIOREAD_NOLOCK)) {
int blocksize =
BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (blocksize < PAGE_SIZE)
ext4_msg(sb, KERN_WARNING, "Warning: mounting with an "
"experimental mount option 'dioread_nolock' "
"for blocksize < PAGE_SIZE");
}
return 1; return 1;
} }
@ -3609,7 +3614,8 @@ int ext4_calculate_overhead(struct super_block *sb)
*/ */
if (sbi->s_journal && !sbi->journal_bdev) if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
else if (ext4_has_feature_journal(sb) && !sbi->s_journal) { else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
/* j_inum for internal journal is non-zero */
j_inode = ext4_get_journal_inode(sb, j_inum); j_inode = ext4_get_journal_inode(sb, j_inum);
if (j_inode) { if (j_inode) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits; j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
@ -3785,7 +3791,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_opt(sb, NO_UID32); set_opt(sb, NO_UID32);
/* xattr user namespace & acls are now defaulted on */ /* xattr user namespace & acls are now defaulted on */
set_opt(sb, XATTR_USER); set_opt(sb, XATTR_USER);
set_opt(sb, DIOREAD_NOLOCK);
#ifdef CONFIG_EXT4_FS_POSIX_ACL #ifdef CONFIG_EXT4_FS_POSIX_ACL
set_opt(sb, POSIX_ACL); set_opt(sb, POSIX_ACL);
#endif #endif
@ -3835,6 +3840,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
if (blocksize == PAGE_SIZE)
set_opt(sb, DIOREAD_NOLOCK);
if (blocksize < EXT4_MIN_BLOCK_SIZE || if (blocksize < EXT4_MIN_BLOCK_SIZE ||
blocksize > EXT4_MAX_BLOCK_SIZE) { blocksize > EXT4_MAX_BLOCK_SIZE) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
@ -4157,7 +4166,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
sbi->s_inodes_per_group > blocksize * 8) { sbi->s_inodes_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
sbi->s_blocks_per_group); sbi->s_inodes_per_group);
goto failed_mount; goto failed_mount;
} }
sbi->s_itb_per_group = sbi->s_inodes_per_group / sbi->s_itb_per_group = sbi->s_inodes_per_group /
@ -4286,9 +4295,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
EXT4_BLOCKS_PER_GROUP(sb) - 1); EXT4_BLOCKS_PER_GROUP(sb) - 1);
do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb)); do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) { if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
ext4_msg(sb, KERN_WARNING, "groups count too large: %u " ext4_msg(sb, KERN_WARNING, "groups count too large: %llu "
"(block count %llu, first data block %u, " "(block count %llu, first data block %u, "
"blocks per group %lu)", sbi->s_groups_count, "blocks per group %lu)", blocks_count,
ext4_blocks_count(es), ext4_blocks_count(es),
le32_to_cpu(es->s_first_data_block), le32_to_cpu(es->s_first_data_block),
EXT4_BLOCKS_PER_GROUP(sb)); EXT4_BLOCKS_PER_GROUP(sb));
@ -5433,7 +5442,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
} }
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user"); ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@ -5622,10 +5631,8 @@ static int ext4_statfs_project(struct super_block *sb,
return PTR_ERR(dquot); return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock); spin_lock(&dquot->dq_dqb_lock);
limit = dquot->dq_dqb.dqb_bsoftlimit; limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
if (dquot->dq_dqb.dqb_bhardlimit && dquot->dq_dqb.dqb_bhardlimit);
(!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
limit = dquot->dq_dqb.dqb_bhardlimit;
limit >>= sb->s_blocksize_bits; limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) { if (limit && buf->f_blocks > limit) {
@ -5637,11 +5644,8 @@ static int ext4_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0; (buf->f_blocks - curblock) : 0;
} }
limit = dquot->dq_dqb.dqb_isoftlimit; limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
if (dquot->dq_dqb.dqb_ihardlimit && dquot->dq_dqb.dqb_ihardlimit);
(!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
limit = dquot->dq_dqb.dqb_ihardlimit;
if (limit && buf->f_files > limit) { if (limit && buf->f_files > limit) {
buf->f_files = limit; buf->f_files = limit;
buf->f_ffree = buf->f_ffree =

View File

@ -245,7 +245,7 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
bh->b_data); bh->b_data);
errout: errout:
if (error) if (error)
__ext4_error_inode(inode, function, line, 0, __ext4_error_inode(inode, function, line, 0, -error,
"corrupted xattr block %llu", "corrupted xattr block %llu",
(unsigned long long) bh->b_blocknr); (unsigned long long) bh->b_blocknr);
else else
@ -269,7 +269,7 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header)); error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
errout: errout:
if (error) if (error)
__ext4_error_inode(inode, function, line, 0, __ext4_error_inode(inode, function, line, 0, -error,
"corrupted in-inode xattr"); "corrupted in-inode xattr");
return error; return error;
} }
@ -2880,9 +2880,9 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
if (IS_ERR(bh)) { if (IS_ERR(bh)) {
error = PTR_ERR(bh); error = PTR_ERR(bh);
if (error == -EIO) { if (error == -EIO) {
ext4_set_errno(inode->i_sb, EIO); EXT4_ERROR_INODE_ERR(inode, EIO,
EXT4_ERROR_INODE(inode, "block %llu read error", "block %llu read error",
EXT4_I(inode)->i_file_acl); EXT4_I(inode)->i_file_acl);
} }
bh = NULL; bh = NULL;
goto cleanup; goto cleanup;

View File

@ -48,7 +48,7 @@ struct ext4_xattr_entry {
__le32 e_value_inum; /* inode in which the value is stored */ __le32 e_value_inum; /* inode in which the value is stored */
__le32 e_value_size; /* size of attribute value */ __le32 e_value_size; /* size of attribute value */
__le32 e_hash; /* hash value of name and value */ __le32 e_hash; /* hash value of name and value */
char e_name[0]; /* attribute name */ char e_name[]; /* attribute name */
}; };
#define EXT4_XATTR_PAD_BITS 2 #define EXT4_XATTR_PAD_BITS 2
@ -118,7 +118,7 @@ struct ext4_xattr_ibody_find {
struct ext4_xattr_inode_array { struct ext4_xattr_inode_array {
unsigned int count; /* # of used items in the array */ unsigned int count; /* # of used items in the array */
struct inode *inodes[0]; struct inode *inodes[];
}; };
extern const struct xattr_handler ext4_xattr_user_handler; extern const struct xattr_handler ext4_xattr_user_handler;

View File

@ -997,9 +997,10 @@ restart_loop:
* journalled data) we need to unmap buffer and clear * journalled data) we need to unmap buffer and clear
* more bits. We also need to be careful about the check * more bits. We also need to be careful about the check
* because the data page mapping can get cleared under * because the data page mapping can get cleared under
* out hands, which alse need not to clear more bits * our hands. Note that if mapping == NULL, we don't
* because the page and buffers will be freed and can * need to make buffer unmapped because the page is
* never be reused once we are done with them. * already detached from the mapping and buffers cannot
* get reused.
*/ */
mapping = READ_ONCE(bh->b_page->mapping); mapping = READ_ONCE(bh->b_page->mapping);
if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {