Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
Pull ext4 updates from Ted Ts'o: "Only bug fixes and cleanups for ext4 this merge window. Of note are fixes for the combination of the inline_data and fast_commit fixes, and more accurately calculating when to schedule additional lazy inode table init, especially when CONFIG_HZ is 100HZ" * tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: fix error code saved on super block during file system abort ext4: inline data inode fast commit replay fixes ext4: commit inline data during fast commit ext4: scope ret locally in ext4_try_to_trim_range() ext4: remove an unused variable warning with CONFIG_QUOTA=n ext4: fix boolreturn.cocci warnings in fs/ext4/name.c ext4: prevent getting empty inode buffer ext4: move ext4_fill_raw_inode() related functions ext4: factor out ext4_fill_raw_inode() ext4: prevent partial update of the extent blocks ext4: check for inconsistent extents between index and leaf block ext4: check for out-of-order index extents in ext4_valid_extent_entries() ext4: convert from atomic_t to refcount_t on ext4_io_end->count ext4: refresh the ext4_ext_path struct after dropping i_data_sem. ext4: ensure enough credits in ext4_ext_shift_path_extents ext4: correct the left/middle/right debug message for binsearch ext4: fix lazy initialization next schedule time computation in more granular unit Revert "ext4: enforce buffer head state assertion in ext4_da_map_blocks"
This commit is contained in:
@@ -17,6 +17,7 @@
|
|||||||
#ifndef _EXT4_H
|
#ifndef _EXT4_H
|
||||||
#define _EXT4_H
|
#define _EXT4_H
|
||||||
|
|
||||||
|
#include <linux/refcount.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
@@ -241,7 +242,7 @@ typedef struct ext4_io_end {
|
|||||||
struct bio *bio; /* Linked list of completed
|
struct bio *bio; /* Linked list of completed
|
||||||
* bios covering the extent */
|
* bios covering the extent */
|
||||||
unsigned int flag; /* unwritten or not */
|
unsigned int flag; /* unwritten or not */
|
||||||
atomic_t count; /* reference counter */
|
refcount_t count; /* reference counter */
|
||||||
struct list_head list_vec; /* list of ext4_io_end_vec */
|
struct list_head list_vec; /* list of ext4_io_end_vec */
|
||||||
} ext4_io_end_t;
|
} ext4_io_end_t;
|
||||||
|
|
||||||
|
@@ -136,15 +136,25 @@ int ext4_datasem_ensure_credits(handle_t *handle, struct inode *inode,
|
|||||||
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
|
static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
|
||||||
struct ext4_ext_path *path)
|
struct ext4_ext_path *path)
|
||||||
{
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
if (path->p_bh) {
|
if (path->p_bh) {
|
||||||
/* path points to block */
|
/* path points to block */
|
||||||
BUFFER_TRACE(path->p_bh, "get_write_access");
|
BUFFER_TRACE(path->p_bh, "get_write_access");
|
||||||
return ext4_journal_get_write_access(handle, inode->i_sb,
|
err = ext4_journal_get_write_access(handle, inode->i_sb,
|
||||||
path->p_bh, EXT4_JTR_NONE);
|
path->p_bh, EXT4_JTR_NONE);
|
||||||
|
/*
|
||||||
|
* The extent buffer's verified bit will be set again in
|
||||||
|
* __ext4_ext_dirty(). We could leave an inconsistent
|
||||||
|
* buffer if the extents updating procudure break off du
|
||||||
|
* to some error happens, force to check it again.
|
||||||
|
*/
|
||||||
|
if (!err)
|
||||||
|
clear_buffer_verified(path->p_bh);
|
||||||
}
|
}
|
||||||
/* path points to leaf/index in inode body */
|
/* path points to leaf/index in inode body */
|
||||||
/* we use in-core data, no need to protect them */
|
/* we use in-core data, no need to protect them */
|
||||||
return 0;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -165,6 +175,9 @@ static int __ext4_ext_dirty(const char *where, unsigned int line,
|
|||||||
/* path points to block */
|
/* path points to block */
|
||||||
err = __ext4_handle_dirty_metadata(where, line, handle,
|
err = __ext4_handle_dirty_metadata(where, line, handle,
|
||||||
inode, path->p_bh);
|
inode, path->p_bh);
|
||||||
|
/* Extents updating done, re-set verified flag */
|
||||||
|
if (!err)
|
||||||
|
set_buffer_verified(path->p_bh);
|
||||||
} else {
|
} else {
|
||||||
/* path points to leaf/index in inode body */
|
/* path points to leaf/index in inode body */
|
||||||
err = ext4_mark_inode_dirty(handle, inode);
|
err = ext4_mark_inode_dirty(handle, inode);
|
||||||
@@ -354,9 +367,13 @@ static int ext4_valid_extent_idx(struct inode *inode,
|
|||||||
|
|
||||||
static int ext4_valid_extent_entries(struct inode *inode,
|
static int ext4_valid_extent_entries(struct inode *inode,
|
||||||
struct ext4_extent_header *eh,
|
struct ext4_extent_header *eh,
|
||||||
ext4_fsblk_t *pblk, int depth)
|
ext4_lblk_t lblk, ext4_fsblk_t *pblk,
|
||||||
|
int depth)
|
||||||
{
|
{
|
||||||
unsigned short entries;
|
unsigned short entries;
|
||||||
|
ext4_lblk_t lblock = 0;
|
||||||
|
ext4_lblk_t prev = 0;
|
||||||
|
|
||||||
if (eh->eh_entries == 0)
|
if (eh->eh_entries == 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
@@ -365,31 +382,51 @@ static int ext4_valid_extent_entries(struct inode *inode,
|
|||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
/* leaf entries */
|
/* leaf entries */
|
||||||
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
|
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
|
||||||
ext4_lblk_t lblock = 0;
|
|
||||||
ext4_lblk_t prev = 0;
|
/*
|
||||||
int len = 0;
|
* The logical block in the first entry should equal to
|
||||||
|
* the number in the index block.
|
||||||
|
*/
|
||||||
|
if (depth != ext_depth(inode) &&
|
||||||
|
lblk != le32_to_cpu(ext->ee_block))
|
||||||
|
return 0;
|
||||||
while (entries) {
|
while (entries) {
|
||||||
if (!ext4_valid_extent(inode, ext))
|
if (!ext4_valid_extent(inode, ext))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Check for overlapping extents */
|
/* Check for overlapping extents */
|
||||||
lblock = le32_to_cpu(ext->ee_block);
|
lblock = le32_to_cpu(ext->ee_block);
|
||||||
len = ext4_ext_get_actual_len(ext);
|
|
||||||
if ((lblock <= prev) && prev) {
|
if ((lblock <= prev) && prev) {
|
||||||
*pblk = ext4_ext_pblock(ext);
|
*pblk = ext4_ext_pblock(ext);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
prev = lblock + ext4_ext_get_actual_len(ext) - 1;
|
||||||
ext++;
|
ext++;
|
||||||
entries--;
|
entries--;
|
||||||
prev = lblock + len - 1;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
|
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The logical block in the first entry should equal to
|
||||||
|
* the number in the parent index block.
|
||||||
|
*/
|
||||||
|
if (depth != ext_depth(inode) &&
|
||||||
|
lblk != le32_to_cpu(ext_idx->ei_block))
|
||||||
|
return 0;
|
||||||
while (entries) {
|
while (entries) {
|
||||||
if (!ext4_valid_extent_idx(inode, ext_idx))
|
if (!ext4_valid_extent_idx(inode, ext_idx))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/* Check for overlapping index extents */
|
||||||
|
lblock = le32_to_cpu(ext_idx->ei_block);
|
||||||
|
if ((lblock <= prev) && prev) {
|
||||||
|
*pblk = ext4_idx_pblock(ext_idx);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
ext_idx++;
|
ext_idx++;
|
||||||
entries--;
|
entries--;
|
||||||
|
prev = lblock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@@ -397,7 +434,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
|
|||||||
|
|
||||||
static int __ext4_ext_check(const char *function, unsigned int line,
|
static int __ext4_ext_check(const char *function, unsigned int line,
|
||||||
struct inode *inode, struct ext4_extent_header *eh,
|
struct inode *inode, struct ext4_extent_header *eh,
|
||||||
int depth, ext4_fsblk_t pblk)
|
int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
|
||||||
{
|
{
|
||||||
const char *error_msg;
|
const char *error_msg;
|
||||||
int max = 0, err = -EFSCORRUPTED;
|
int max = 0, err = -EFSCORRUPTED;
|
||||||
@@ -423,7 +460,7 @@ static int __ext4_ext_check(const char *function, unsigned int line,
|
|||||||
error_msg = "invalid eh_entries";
|
error_msg = "invalid eh_entries";
|
||||||
goto corrupted;
|
goto corrupted;
|
||||||
}
|
}
|
||||||
if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
|
if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
|
||||||
error_msg = "invalid extent entries";
|
error_msg = "invalid extent entries";
|
||||||
goto corrupted;
|
goto corrupted;
|
||||||
}
|
}
|
||||||
@@ -453,7 +490,7 @@ corrupted:
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define ext4_ext_check(inode, eh, depth, pblk) \
|
#define ext4_ext_check(inode, eh, depth, pblk) \
|
||||||
__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
|
__ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
|
||||||
|
|
||||||
int ext4_ext_check_inode(struct inode *inode)
|
int ext4_ext_check_inode(struct inode *inode)
|
||||||
{
|
{
|
||||||
@@ -486,16 +523,18 @@ static void ext4_cache_extents(struct inode *inode,
|
|||||||
|
|
||||||
static struct buffer_head *
|
static struct buffer_head *
|
||||||
__read_extent_tree_block(const char *function, unsigned int line,
|
__read_extent_tree_block(const char *function, unsigned int line,
|
||||||
struct inode *inode, ext4_fsblk_t pblk, int depth,
|
struct inode *inode, struct ext4_extent_idx *idx,
|
||||||
int flags)
|
int depth, int flags)
|
||||||
{
|
{
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
int err;
|
int err;
|
||||||
gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
|
gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
|
||||||
|
ext4_fsblk_t pblk;
|
||||||
|
|
||||||
if (flags & EXT4_EX_NOFAIL)
|
if (flags & EXT4_EX_NOFAIL)
|
||||||
gfp_flags |= __GFP_NOFAIL;
|
gfp_flags |= __GFP_NOFAIL;
|
||||||
|
|
||||||
|
pblk = ext4_idx_pblock(idx);
|
||||||
bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
|
bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
|
||||||
if (unlikely(!bh))
|
if (unlikely(!bh))
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@@ -508,8 +547,8 @@ __read_extent_tree_block(const char *function, unsigned int line,
|
|||||||
}
|
}
|
||||||
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
|
if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
|
||||||
return bh;
|
return bh;
|
||||||
err = __ext4_ext_check(function, line, inode,
|
err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
|
||||||
ext_block_hdr(bh), depth, pblk);
|
depth, pblk, le32_to_cpu(idx->ei_block));
|
||||||
if (err)
|
if (err)
|
||||||
goto errout;
|
goto errout;
|
||||||
set_buffer_verified(bh);
|
set_buffer_verified(bh);
|
||||||
@@ -527,8 +566,8 @@ errout:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define read_extent_tree_block(inode, pblk, depth, flags) \
|
#define read_extent_tree_block(inode, idx, depth, flags) \
|
||||||
__read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
|
__read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
|
||||||
(depth), (flags))
|
(depth), (flags))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -578,8 +617,7 @@ int ext4_ext_precache(struct inode *inode)
|
|||||||
i--;
|
i--;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
bh = read_extent_tree_block(inode,
|
bh = read_extent_tree_block(inode, path[i].p_idx++,
|
||||||
ext4_idx_pblock(path[i].p_idx++),
|
|
||||||
depth - i - 1,
|
depth - i - 1,
|
||||||
EXT4_EX_FORCE_CACHE);
|
EXT4_EX_FORCE_CACHE);
|
||||||
if (IS_ERR(bh)) {
|
if (IS_ERR(bh)) {
|
||||||
@@ -714,13 +752,14 @@ ext4_ext_binsearch_idx(struct inode *inode,
|
|||||||
r = EXT_LAST_INDEX(eh);
|
r = EXT_LAST_INDEX(eh);
|
||||||
while (l <= r) {
|
while (l <= r) {
|
||||||
m = l + (r - l) / 2;
|
m = l + (r - l) / 2;
|
||||||
|
ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
|
||||||
|
le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
|
||||||
|
r, le32_to_cpu(r->ei_block));
|
||||||
|
|
||||||
if (block < le32_to_cpu(m->ei_block))
|
if (block < le32_to_cpu(m->ei_block))
|
||||||
r = m - 1;
|
r = m - 1;
|
||||||
else
|
else
|
||||||
l = m + 1;
|
l = m + 1;
|
||||||
ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
|
|
||||||
le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
|
|
||||||
r, le32_to_cpu(r->ei_block));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
path->p_idx = l - 1;
|
path->p_idx = l - 1;
|
||||||
@@ -782,13 +821,14 @@ ext4_ext_binsearch(struct inode *inode,
|
|||||||
|
|
||||||
while (l <= r) {
|
while (l <= r) {
|
||||||
m = l + (r - l) / 2;
|
m = l + (r - l) / 2;
|
||||||
|
ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
|
||||||
|
le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
|
||||||
|
r, le32_to_cpu(r->ee_block));
|
||||||
|
|
||||||
if (block < le32_to_cpu(m->ee_block))
|
if (block < le32_to_cpu(m->ee_block))
|
||||||
r = m - 1;
|
r = m - 1;
|
||||||
else
|
else
|
||||||
l = m + 1;
|
l = m + 1;
|
||||||
ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
|
|
||||||
le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
|
|
||||||
r, le32_to_cpu(r->ee_block));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
path->p_ext = l - 1;
|
path->p_ext = l - 1;
|
||||||
@@ -884,8 +924,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
|
|||||||
path[ppos].p_depth = i;
|
path[ppos].p_depth = i;
|
||||||
path[ppos].p_ext = NULL;
|
path[ppos].p_ext = NULL;
|
||||||
|
|
||||||
bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
|
bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
|
||||||
flags);
|
|
||||||
if (IS_ERR(bh)) {
|
if (IS_ERR(bh)) {
|
||||||
ret = PTR_ERR(bh);
|
ret = PTR_ERR(bh);
|
||||||
goto err;
|
goto err;
|
||||||
@@ -1494,7 +1533,6 @@ static int ext4_ext_search_right(struct inode *inode,
|
|||||||
struct ext4_extent_header *eh;
|
struct ext4_extent_header *eh;
|
||||||
struct ext4_extent_idx *ix;
|
struct ext4_extent_idx *ix;
|
||||||
struct ext4_extent *ex;
|
struct ext4_extent *ex;
|
||||||
ext4_fsblk_t block;
|
|
||||||
int depth; /* Note, NOT eh_depth; depth from top of tree */
|
int depth; /* Note, NOT eh_depth; depth from top of tree */
|
||||||
int ee_len;
|
int ee_len;
|
||||||
|
|
||||||
@@ -1561,20 +1599,17 @@ got_index:
|
|||||||
* follow it and find the closest allocated
|
* follow it and find the closest allocated
|
||||||
* block to the right */
|
* block to the right */
|
||||||
ix++;
|
ix++;
|
||||||
block = ext4_idx_pblock(ix);
|
|
||||||
while (++depth < path->p_depth) {
|
while (++depth < path->p_depth) {
|
||||||
/* subtract from p_depth to get proper eh_depth */
|
/* subtract from p_depth to get proper eh_depth */
|
||||||
bh = read_extent_tree_block(inode, block,
|
bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
|
||||||
path->p_depth - depth, 0);
|
|
||||||
if (IS_ERR(bh))
|
if (IS_ERR(bh))
|
||||||
return PTR_ERR(bh);
|
return PTR_ERR(bh);
|
||||||
eh = ext_block_hdr(bh);
|
eh = ext_block_hdr(bh);
|
||||||
ix = EXT_FIRST_INDEX(eh);
|
ix = EXT_FIRST_INDEX(eh);
|
||||||
block = ext4_idx_pblock(ix);
|
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
|
bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
|
||||||
if (IS_ERR(bh))
|
if (IS_ERR(bh))
|
||||||
return PTR_ERR(bh);
|
return PTR_ERR(bh);
|
||||||
eh = ext_block_hdr(bh);
|
eh = ext_block_hdr(bh);
|
||||||
@@ -2953,9 +2988,9 @@ again:
|
|||||||
ext_debug(inode, "move to level %d (block %llu)\n",
|
ext_debug(inode, "move to level %d (block %llu)\n",
|
||||||
i + 1, ext4_idx_pblock(path[i].p_idx));
|
i + 1, ext4_idx_pblock(path[i].p_idx));
|
||||||
memset(path + i + 1, 0, sizeof(*path));
|
memset(path + i + 1, 0, sizeof(*path));
|
||||||
bh = read_extent_tree_block(inode,
|
bh = read_extent_tree_block(inode, path[i].p_idx,
|
||||||
ext4_idx_pblock(path[i].p_idx), depth - i - 1,
|
depth - i - 1,
|
||||||
EXT4_EX_NOCACHE);
|
EXT4_EX_NOCACHE);
|
||||||
if (IS_ERR(bh)) {
|
if (IS_ERR(bh)) {
|
||||||
/* should we reset i_size? */
|
/* should we reset i_size? */
|
||||||
err = PTR_ERR(bh);
|
err = PTR_ERR(bh);
|
||||||
@@ -4977,36 +5012,6 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||||||
return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
|
return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* ext4_access_path:
|
|
||||||
* Function to access the path buffer for marking it dirty.
|
|
||||||
* It also checks if there are sufficient credits left in the journal handle
|
|
||||||
* to update path.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
ext4_access_path(handle_t *handle, struct inode *inode,
|
|
||||||
struct ext4_ext_path *path)
|
|
||||||
{
|
|
||||||
int credits, err;
|
|
||||||
|
|
||||||
if (!ext4_handle_valid(handle))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check if need to extend journal credits
|
|
||||||
* 3 for leaf, sb, and inode plus 2 (bmap and group
|
|
||||||
* descriptor) for each block group; assume two block
|
|
||||||
* groups
|
|
||||||
*/
|
|
||||||
credits = ext4_writepage_trans_blocks(inode);
|
|
||||||
err = ext4_datasem_ensure_credits(handle, inode, 7, credits, 0);
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = ext4_ext_get_access(handle, inode, path);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ext4_ext_shift_path_extents:
|
* ext4_ext_shift_path_extents:
|
||||||
* Shift the extents of a path structure lying between path[depth].p_ext
|
* Shift the extents of a path structure lying between path[depth].p_ext
|
||||||
@@ -5021,6 +5026,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
|
|||||||
int depth, err = 0;
|
int depth, err = 0;
|
||||||
struct ext4_extent *ex_start, *ex_last;
|
struct ext4_extent *ex_start, *ex_last;
|
||||||
bool update = false;
|
bool update = false;
|
||||||
|
int credits, restart_credits;
|
||||||
depth = path->p_depth;
|
depth = path->p_depth;
|
||||||
|
|
||||||
while (depth >= 0) {
|
while (depth >= 0) {
|
||||||
@@ -5030,14 +5036,27 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
|
|||||||
return -EFSCORRUPTED;
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
|
ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
|
||||||
|
/* leaf + sb + inode */
|
||||||
|
credits = 3;
|
||||||
|
if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr)) {
|
||||||
|
update = true;
|
||||||
|
/* extent tree + sb + inode */
|
||||||
|
credits = depth + 2;
|
||||||
|
}
|
||||||
|
|
||||||
err = ext4_access_path(handle, inode, path + depth);
|
restart_credits = ext4_writepage_trans_blocks(inode);
|
||||||
|
err = ext4_datasem_ensure_credits(handle, inode, credits,
|
||||||
|
restart_credits, 0);
|
||||||
|
if (err) {
|
||||||
|
if (err > 0)
|
||||||
|
err = -EAGAIN;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
|
|
||||||
update = true;
|
|
||||||
|
|
||||||
while (ex_start <= ex_last) {
|
while (ex_start <= ex_last) {
|
||||||
if (SHIFT == SHIFT_LEFT) {
|
if (SHIFT == SHIFT_LEFT) {
|
||||||
le32_add_cpu(&ex_start->ee_block,
|
le32_add_cpu(&ex_start->ee_block,
|
||||||
@@ -5067,7 +5086,7 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Update index too */
|
/* Update index too */
|
||||||
err = ext4_access_path(handle, inode, path + depth);
|
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@@ -5106,6 +5125,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
|
|||||||
int ret = 0, depth;
|
int ret = 0, depth;
|
||||||
struct ext4_extent *extent;
|
struct ext4_extent *extent;
|
||||||
ext4_lblk_t stop, *iterator, ex_start, ex_end;
|
ext4_lblk_t stop, *iterator, ex_start, ex_end;
|
||||||
|
ext4_lblk_t tmp = EXT_MAX_BLOCKS;
|
||||||
|
|
||||||
/* Let path point to the last extent */
|
/* Let path point to the last extent */
|
||||||
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
|
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
|
||||||
@@ -5159,11 +5179,15 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
|
|||||||
* till we reach stop. In case of right shift, iterator points to stop
|
* till we reach stop. In case of right shift, iterator points to stop
|
||||||
* and it is decreased till we reach start.
|
* and it is decreased till we reach start.
|
||||||
*/
|
*/
|
||||||
|
again:
|
||||||
if (SHIFT == SHIFT_LEFT)
|
if (SHIFT == SHIFT_LEFT)
|
||||||
iterator = &start;
|
iterator = &start;
|
||||||
else
|
else
|
||||||
iterator = &stop;
|
iterator = &stop;
|
||||||
|
|
||||||
|
if (tmp != EXT_MAX_BLOCKS)
|
||||||
|
*iterator = tmp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Its safe to start updating extents. Start and stop are unsigned, so
|
* Its safe to start updating extents. Start and stop are unsigned, so
|
||||||
* in case of right shift if extent with 0 block is reached, iterator
|
* in case of right shift if extent with 0 block is reached, iterator
|
||||||
@@ -5192,6 +5216,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tmp = *iterator;
|
||||||
if (SHIFT == SHIFT_LEFT) {
|
if (SHIFT == SHIFT_LEFT) {
|
||||||
extent = EXT_LAST_EXTENT(path[depth].p_hdr);
|
extent = EXT_LAST_EXTENT(path[depth].p_hdr);
|
||||||
*iterator = le32_to_cpu(extent->ee_block) +
|
*iterator = le32_to_cpu(extent->ee_block) +
|
||||||
@@ -5210,6 +5235,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
|
|||||||
}
|
}
|
||||||
ret = ext4_ext_shift_path_extents(path, shift, inode,
|
ret = ext4_ext_shift_path_extents(path, shift, inode,
|
||||||
handle, SHIFT);
|
handle, SHIFT);
|
||||||
|
/* iterator can be NULL which means we should break */
|
||||||
|
if (ret == -EAGAIN)
|
||||||
|
goto again;
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@@ -6043,6 +6071,9 @@ int ext4_ext_clear_bb(struct inode *inode)
|
|||||||
int j, ret = 0;
|
int j, ret = 0;
|
||||||
struct ext4_map_blocks map;
|
struct ext4_map_blocks map;
|
||||||
|
|
||||||
|
if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Determin the size of the file first */
|
/* Determin the size of the file first */
|
||||||
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
|
path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL,
|
||||||
EXT4_EX_NOCACHE);
|
EXT4_EX_NOCACHE);
|
||||||
|
@@ -819,7 +819,9 @@ static int ext4_fc_write_inode(struct inode *inode, u32 *crc)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE)
|
if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
|
||||||
|
inode_len = EXT4_INODE_SIZE(inode->i_sb);
|
||||||
|
else if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE)
|
||||||
inode_len += ei->i_extra_isize;
|
inode_len += ei->i_extra_isize;
|
||||||
|
|
||||||
fc_inode.fc_ino = cpu_to_le32(inode->i_ino);
|
fc_inode.fc_ino = cpu_to_le32(inode->i_ino);
|
||||||
@@ -1524,7 +1526,8 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
|
|||||||
* crashing. This should be fixed but until then, we calculate
|
* crashing. This should be fixed but until then, we calculate
|
||||||
* the number of blocks the inode.
|
* the number of blocks the inode.
|
||||||
*/
|
*/
|
||||||
ext4_ext_replay_set_iblocks(inode);
|
if (!ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA))
|
||||||
|
ext4_ext_replay_set_iblocks(inode);
|
||||||
|
|
||||||
inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation);
|
inode->i_generation = le32_to_cpu(ext4_raw_inode(&iloc)->i_generation);
|
||||||
ext4_reset_inode_seed(inode);
|
ext4_reset_inode_seed(inode);
|
||||||
@@ -1842,6 +1845,10 @@ static void ext4_fc_set_bitmaps_and_counters(struct super_block *sb)
|
|||||||
}
|
}
|
||||||
cur = 0;
|
cur = 0;
|
||||||
end = EXT_MAX_BLOCKS;
|
end = EXT_MAX_BLOCKS;
|
||||||
|
if (ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA)) {
|
||||||
|
iput(inode);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
while (cur < end) {
|
while (cur < end) {
|
||||||
map.m_lblk = cur;
|
map.m_lblk = cur;
|
||||||
map.m_len = end - cur;
|
map.m_len = end - cur;
|
||||||
|
331
fs/ext4/inode.c
331
fs/ext4/inode.c
@@ -1711,16 +1711,13 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the buffer head associated with a delayed and not unwritten
|
* Delayed extent could be allocated by fallocate.
|
||||||
* block found in the extent status cache must contain an
|
* So we need to check it.
|
||||||
* invalid block number and have its BH_New and BH_Delay bits
|
|
||||||
* set, reflecting the state assigned when the block was
|
|
||||||
* initially delayed allocated
|
|
||||||
*/
|
*/
|
||||||
if (ext4_es_is_delonly(&es)) {
|
if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
|
||||||
BUG_ON(bh->b_blocknr != invalid_block);
|
map_bh(bh, inode->i_sb, invalid_block);
|
||||||
BUG_ON(!buffer_new(bh));
|
set_buffer_new(bh);
|
||||||
BUG_ON(!buffer_delay(bh));
|
set_buffer_delay(bh);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -4234,14 +4231,161 @@ out_trace:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
|
||||||
|
{
|
||||||
|
if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
|
||||||
|
return inode_peek_iversion_raw(inode);
|
||||||
|
else
|
||||||
|
return inode_peek_iversion(inode);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ext4_inode_blocks_set(struct ext4_inode *raw_inode,
|
||||||
|
struct ext4_inode_info *ei)
|
||||||
|
{
|
||||||
|
struct inode *inode = &(ei->vfs_inode);
|
||||||
|
u64 i_blocks = READ_ONCE(inode->i_blocks);
|
||||||
|
struct super_block *sb = inode->i_sb;
|
||||||
|
|
||||||
|
if (i_blocks <= ~0U) {
|
||||||
|
/*
|
||||||
|
* i_blocks can be represented in a 32 bit variable
|
||||||
|
* as multiple of 512 bytes
|
||||||
|
*/
|
||||||
|
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
||||||
|
raw_inode->i_blocks_high = 0;
|
||||||
|
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This should never happen since sb->s_maxbytes should not have
|
||||||
|
* allowed this, sb->s_maxbytes was set according to the huge_file
|
||||||
|
* feature in ext4_fill_super().
|
||||||
|
*/
|
||||||
|
if (!ext4_has_feature_huge_file(sb))
|
||||||
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
|
if (i_blocks <= 0xffffffffffffULL) {
|
||||||
|
/*
|
||||||
|
* i_blocks can be represented in a 48 bit variable
|
||||||
|
* as multiple of 512 bytes
|
||||||
|
*/
|
||||||
|
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
||||||
|
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
|
||||||
|
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
||||||
|
} else {
|
||||||
|
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
||||||
|
/* i_block is stored in file system block size */
|
||||||
|
i_blocks = i_blocks >> (inode->i_blkbits - 9);
|
||||||
|
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
||||||
|
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode)
|
||||||
|
{
|
||||||
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||||
|
uid_t i_uid;
|
||||||
|
gid_t i_gid;
|
||||||
|
projid_t i_projid;
|
||||||
|
int block;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = ext4_inode_blocks_set(raw_inode, ei);
|
||||||
|
|
||||||
|
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
|
||||||
|
i_uid = i_uid_read(inode);
|
||||||
|
i_gid = i_gid_read(inode);
|
||||||
|
i_projid = from_kprojid(&init_user_ns, ei->i_projid);
|
||||||
|
if (!(test_opt(inode->i_sb, NO_UID32))) {
|
||||||
|
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
|
||||||
|
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
|
||||||
|
/*
|
||||||
|
* Fix up interoperability with old kernels. Otherwise,
|
||||||
|
* old inodes get re-used with the upper 16 bits of the
|
||||||
|
* uid/gid intact.
|
||||||
|
*/
|
||||||
|
if (ei->i_dtime && list_empty(&ei->i_orphan)) {
|
||||||
|
raw_inode->i_uid_high = 0;
|
||||||
|
raw_inode->i_gid_high = 0;
|
||||||
|
} else {
|
||||||
|
raw_inode->i_uid_high =
|
||||||
|
cpu_to_le16(high_16_bits(i_uid));
|
||||||
|
raw_inode->i_gid_high =
|
||||||
|
cpu_to_le16(high_16_bits(i_gid));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
|
||||||
|
raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
|
||||||
|
raw_inode->i_uid_high = 0;
|
||||||
|
raw_inode->i_gid_high = 0;
|
||||||
|
}
|
||||||
|
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
|
||||||
|
|
||||||
|
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
|
||||||
|
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
|
||||||
|
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
|
||||||
|
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
|
||||||
|
|
||||||
|
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
||||||
|
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
|
||||||
|
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
|
||||||
|
raw_inode->i_file_acl_high =
|
||||||
|
cpu_to_le16(ei->i_file_acl >> 32);
|
||||||
|
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
|
||||||
|
ext4_isize_set(raw_inode, ei->i_disksize);
|
||||||
|
|
||||||
|
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
|
||||||
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
||||||
|
if (old_valid_dev(inode->i_rdev)) {
|
||||||
|
raw_inode->i_block[0] =
|
||||||
|
cpu_to_le32(old_encode_dev(inode->i_rdev));
|
||||||
|
raw_inode->i_block[1] = 0;
|
||||||
|
} else {
|
||||||
|
raw_inode->i_block[0] = 0;
|
||||||
|
raw_inode->i_block[1] =
|
||||||
|
cpu_to_le32(new_encode_dev(inode->i_rdev));
|
||||||
|
raw_inode->i_block[2] = 0;
|
||||||
|
}
|
||||||
|
} else if (!ext4_has_inline_data(inode)) {
|
||||||
|
for (block = 0; block < EXT4_N_BLOCKS; block++)
|
||||||
|
raw_inode->i_block[block] = ei->i_data[block];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
|
||||||
|
u64 ivers = ext4_inode_peek_iversion(inode);
|
||||||
|
|
||||||
|
raw_inode->i_disk_version = cpu_to_le32(ivers);
|
||||||
|
if (ei->i_extra_isize) {
|
||||||
|
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
||||||
|
raw_inode->i_version_hi =
|
||||||
|
cpu_to_le32(ivers >> 32);
|
||||||
|
raw_inode->i_extra_isize =
|
||||||
|
cpu_to_le16(ei->i_extra_isize);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (i_projid != EXT4_DEF_PROJID &&
|
||||||
|
!ext4_has_feature_project(inode->i_sb))
|
||||||
|
err = err ?: -EFSCORRUPTED;
|
||||||
|
|
||||||
|
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
|
||||||
|
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
|
||||||
|
raw_inode->i_projid = cpu_to_le32(i_projid);
|
||||||
|
|
||||||
|
ext4_inode_csum_set(inode, raw_inode, ei);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ext4_get_inode_loc returns with an extra refcount against the inode's
|
* ext4_get_inode_loc returns with an extra refcount against the inode's
|
||||||
* underlying buffer_head on success. If 'in_mem' is true, we have all
|
* underlying buffer_head on success. If we pass 'inode' and it does not
|
||||||
* data in memory that is needed to recreate the on-disk version of this
|
* have in-inode xattr, we have all inode data in memory that is needed
|
||||||
* inode.
|
* to recreate the on-disk version of this inode.
|
||||||
*/
|
*/
|
||||||
static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
|
static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
|
||||||
struct ext4_iloc *iloc, int in_mem,
|
struct inode *inode, struct ext4_iloc *iloc,
|
||||||
ext4_fsblk_t *ret_block)
|
ext4_fsblk_t *ret_block)
|
||||||
{
|
{
|
||||||
struct ext4_group_desc *gdp;
|
struct ext4_group_desc *gdp;
|
||||||
@@ -4287,7 +4431,7 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
|
|||||||
* is the only valid inode in the block, we need not read the
|
* is the only valid inode in the block, we need not read the
|
||||||
* block.
|
* block.
|
||||||
*/
|
*/
|
||||||
if (in_mem) {
|
if (inode && !ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
|
||||||
struct buffer_head *bitmap_bh;
|
struct buffer_head *bitmap_bh;
|
||||||
int i, start;
|
int i, start;
|
||||||
|
|
||||||
@@ -4315,8 +4459,13 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
|
|||||||
}
|
}
|
||||||
brelse(bitmap_bh);
|
brelse(bitmap_bh);
|
||||||
if (i == start + inodes_per_block) {
|
if (i == start + inodes_per_block) {
|
||||||
|
struct ext4_inode *raw_inode =
|
||||||
|
(struct ext4_inode *) (bh->b_data + iloc->offset);
|
||||||
|
|
||||||
/* all other inodes are free, so skip I/O */
|
/* all other inodes are free, so skip I/O */
|
||||||
memset(bh->b_data, 0, bh->b_size);
|
memset(bh->b_data, 0, bh->b_size);
|
||||||
|
if (!ext4_test_inode_state(inode, EXT4_STATE_NEW))
|
||||||
|
ext4_fill_raw_inode(inode, raw_inode);
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
unlock_buffer(bh);
|
unlock_buffer(bh);
|
||||||
goto has_buffer;
|
goto has_buffer;
|
||||||
@@ -4377,7 +4526,7 @@ static int __ext4_get_inode_loc_noinmem(struct inode *inode,
|
|||||||
ext4_fsblk_t err_blk;
|
ext4_fsblk_t err_blk;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0,
|
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, NULL, iloc,
|
||||||
&err_blk);
|
&err_blk);
|
||||||
|
|
||||||
if (ret == -EIO)
|
if (ret == -EIO)
|
||||||
@@ -4392,9 +4541,8 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
|
|||||||
ext4_fsblk_t err_blk;
|
ext4_fsblk_t err_blk;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* We have all inode data except xattrs in memory here. */
|
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, inode, iloc,
|
||||||
ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc,
|
&err_blk);
|
||||||
!ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk);
|
|
||||||
|
|
||||||
if (ret == -EIO)
|
if (ret == -EIO)
|
||||||
ext4_error_inode_block(inode, err_blk, EIO,
|
ext4_error_inode_block(inode, err_blk, EIO,
|
||||||
@@ -4407,7 +4555,7 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
|
|||||||
int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
|
int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
|
||||||
struct ext4_iloc *iloc)
|
struct ext4_iloc *iloc)
|
||||||
{
|
{
|
||||||
return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL);
|
return __ext4_get_inode_loc(sb, ino, NULL, iloc, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ext4_should_enable_dax(struct inode *inode)
|
static bool ext4_should_enable_dax(struct inode *inode)
|
||||||
@@ -4528,13 +4676,6 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
|
|||||||
else
|
else
|
||||||
inode_set_iversion_queried(inode, val);
|
inode_set_iversion_queried(inode, val);
|
||||||
}
|
}
|
||||||
static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
|
|
||||||
{
|
|
||||||
if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
|
|
||||||
return inode_peek_iversion_raw(inode);
|
|
||||||
else
|
|
||||||
return inode_peek_iversion(inode);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
|
||||||
ext4_iget_flags flags, const char *function,
|
ext4_iget_flags flags, const char *function,
|
||||||
@@ -4855,51 +4996,6 @@ bad_inode:
|
|||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_inode_blocks_set(handle_t *handle,
|
|
||||||
struct ext4_inode *raw_inode,
|
|
||||||
struct ext4_inode_info *ei)
|
|
||||||
{
|
|
||||||
struct inode *inode = &(ei->vfs_inode);
|
|
||||||
u64 i_blocks = READ_ONCE(inode->i_blocks);
|
|
||||||
struct super_block *sb = inode->i_sb;
|
|
||||||
|
|
||||||
if (i_blocks <= ~0U) {
|
|
||||||
/*
|
|
||||||
* i_blocks can be represented in a 32 bit variable
|
|
||||||
* as multiple of 512 bytes
|
|
||||||
*/
|
|
||||||
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
|
||||||
raw_inode->i_blocks_high = 0;
|
|
||||||
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This should never happen since sb->s_maxbytes should not have
|
|
||||||
* allowed this, sb->s_maxbytes was set according to the huge_file
|
|
||||||
* feature in ext4_fill_super().
|
|
||||||
*/
|
|
||||||
if (!ext4_has_feature_huge_file(sb))
|
|
||||||
return -EFSCORRUPTED;
|
|
||||||
|
|
||||||
if (i_blocks <= 0xffffffffffffULL) {
|
|
||||||
/*
|
|
||||||
* i_blocks can be represented in a 48 bit variable
|
|
||||||
* as multiple of 512 bytes
|
|
||||||
*/
|
|
||||||
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
|
||||||
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
|
|
||||||
ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
|
||||||
} else {
|
|
||||||
ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
|
|
||||||
/* i_block is stored in file system block size */
|
|
||||||
i_blocks = i_blocks >> (inode->i_blkbits - 9);
|
|
||||||
raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
|
|
||||||
raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __ext4_update_other_inode_time(struct super_block *sb,
|
static void __ext4_update_other_inode_time(struct super_block *sb,
|
||||||
unsigned long orig_ino,
|
unsigned long orig_ino,
|
||||||
unsigned long ino,
|
unsigned long ino,
|
||||||
@@ -4975,11 +5071,8 @@ static int ext4_do_update_inode(handle_t *handle,
|
|||||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||||
struct buffer_head *bh = iloc->bh;
|
struct buffer_head *bh = iloc->bh;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
int err = 0, block;
|
int err;
|
||||||
int need_datasync = 0, set_large_file = 0;
|
int need_datasync = 0, set_large_file = 0;
|
||||||
uid_t i_uid;
|
|
||||||
gid_t i_gid;
|
|
||||||
projid_t i_projid;
|
|
||||||
|
|
||||||
spin_lock(&ei->i_raw_lock);
|
spin_lock(&ei->i_raw_lock);
|
||||||
|
|
||||||
@@ -4990,97 +5083,15 @@ static int ext4_do_update_inode(handle_t *handle,
|
|||||||
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
|
if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
|
||||||
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
|
memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
|
||||||
|
|
||||||
err = ext4_inode_blocks_set(handle, raw_inode, ei);
|
if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode))
|
||||||
|
|
||||||
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
|
|
||||||
i_uid = i_uid_read(inode);
|
|
||||||
i_gid = i_gid_read(inode);
|
|
||||||
i_projid = from_kprojid(&init_user_ns, ei->i_projid);
|
|
||||||
if (!(test_opt(inode->i_sb, NO_UID32))) {
|
|
||||||
raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
|
|
||||||
raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
|
|
||||||
/*
|
|
||||||
* Fix up interoperability with old kernels. Otherwise,
|
|
||||||
* old inodes get re-used with the upper 16 bits of the
|
|
||||||
* uid/gid intact.
|
|
||||||
*/
|
|
||||||
if (ei->i_dtime && list_empty(&ei->i_orphan)) {
|
|
||||||
raw_inode->i_uid_high = 0;
|
|
||||||
raw_inode->i_gid_high = 0;
|
|
||||||
} else {
|
|
||||||
raw_inode->i_uid_high =
|
|
||||||
cpu_to_le16(high_16_bits(i_uid));
|
|
||||||
raw_inode->i_gid_high =
|
|
||||||
cpu_to_le16(high_16_bits(i_gid));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
|
|
||||||
raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
|
|
||||||
raw_inode->i_uid_high = 0;
|
|
||||||
raw_inode->i_gid_high = 0;
|
|
||||||
}
|
|
||||||
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
|
|
||||||
|
|
||||||
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
|
|
||||||
EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
|
|
||||||
EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
|
|
||||||
EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
|
|
||||||
|
|
||||||
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
|
||||||
raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
|
|
||||||
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
|
|
||||||
raw_inode->i_file_acl_high =
|
|
||||||
cpu_to_le16(ei->i_file_acl >> 32);
|
|
||||||
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
|
|
||||||
if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
|
|
||||||
ext4_isize_set(raw_inode, ei->i_disksize);
|
|
||||||
need_datasync = 1;
|
need_datasync = 1;
|
||||||
}
|
|
||||||
if (ei->i_disksize > 0x7fffffffULL) {
|
if (ei->i_disksize > 0x7fffffffULL) {
|
||||||
if (!ext4_has_feature_large_file(sb) ||
|
if (!ext4_has_feature_large_file(sb) ||
|
||||||
EXT4_SB(sb)->s_es->s_rev_level ==
|
EXT4_SB(sb)->s_es->s_rev_level == cpu_to_le32(EXT4_GOOD_OLD_REV))
|
||||||
cpu_to_le32(EXT4_GOOD_OLD_REV))
|
|
||||||
set_large_file = 1;
|
set_large_file = 1;
|
||||||
}
|
}
|
||||||
raw_inode->i_generation = cpu_to_le32(inode->i_generation);
|
|
||||||
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
|
||||||
if (old_valid_dev(inode->i_rdev)) {
|
|
||||||
raw_inode->i_block[0] =
|
|
||||||
cpu_to_le32(old_encode_dev(inode->i_rdev));
|
|
||||||
raw_inode->i_block[1] = 0;
|
|
||||||
} else {
|
|
||||||
raw_inode->i_block[0] = 0;
|
|
||||||
raw_inode->i_block[1] =
|
|
||||||
cpu_to_le32(new_encode_dev(inode->i_rdev));
|
|
||||||
raw_inode->i_block[2] = 0;
|
|
||||||
}
|
|
||||||
} else if (!ext4_has_inline_data(inode)) {
|
|
||||||
for (block = 0; block < EXT4_N_BLOCKS; block++)
|
|
||||||
raw_inode->i_block[block] = ei->i_data[block];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
|
err = ext4_fill_raw_inode(inode, raw_inode);
|
||||||
u64 ivers = ext4_inode_peek_iversion(inode);
|
|
||||||
|
|
||||||
raw_inode->i_disk_version = cpu_to_le32(ivers);
|
|
||||||
if (ei->i_extra_isize) {
|
|
||||||
if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
|
|
||||||
raw_inode->i_version_hi =
|
|
||||||
cpu_to_le32(ivers >> 32);
|
|
||||||
raw_inode->i_extra_isize =
|
|
||||||
cpu_to_le16(ei->i_extra_isize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i_projid != EXT4_DEF_PROJID &&
|
|
||||||
!ext4_has_feature_project(inode->i_sb))
|
|
||||||
err = err ?: -EFSCORRUPTED;
|
|
||||||
|
|
||||||
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
|
|
||||||
EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
|
|
||||||
raw_inode->i_projid = cpu_to_le32(i_projid);
|
|
||||||
|
|
||||||
ext4_inode_csum_set(inode, raw_inode, ei);
|
|
||||||
spin_unlock(&ei->i_raw_lock);
|
spin_unlock(&ei->i_raw_lock);
|
||||||
if (err) {
|
if (err) {
|
||||||
EXT4_ERROR_INODE(inode, "corrupted inode contents");
|
EXT4_ERROR_INODE(inode, "corrupted inode contents");
|
||||||
|
@@ -6299,7 +6299,6 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
|
|||||||
{
|
{
|
||||||
ext4_grpblk_t next, count, free_count;
|
ext4_grpblk_t next, count, free_count;
|
||||||
void *bitmap;
|
void *bitmap;
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
bitmap = e4b->bd_bitmap;
|
bitmap = e4b->bd_bitmap;
|
||||||
start = (e4b->bd_info->bb_first_free > start) ?
|
start = (e4b->bd_info->bb_first_free > start) ?
|
||||||
@@ -6314,10 +6313,10 @@ __releases(ext4_group_lock_ptr(sb, e4b->bd_group))
|
|||||||
next = mb_find_next_bit(bitmap, max + 1, start);
|
next = mb_find_next_bit(bitmap, max + 1, start);
|
||||||
|
|
||||||
if ((next - start) >= minblocks) {
|
if ((next - start) >= minblocks) {
|
||||||
ret = ext4_trim_extent(sb, start, next - start, e4b);
|
int ret = ext4_trim_extent(sb, start, next - start, e4b);
|
||||||
|
|
||||||
if (ret && ret != -EOPNOTSUPP)
|
if (ret && ret != -EOPNOTSUPP)
|
||||||
break;
|
break;
|
||||||
ret = 0;
|
|
||||||
count += next - start;
|
count += next - start;
|
||||||
}
|
}
|
||||||
free_count += next - start;
|
free_count += next - start;
|
||||||
|
@@ -1439,7 +1439,7 @@ static bool ext4_match(struct inode *parent,
|
|||||||
fname->hinfo.minor_hash !=
|
fname->hinfo.minor_hash !=
|
||||||
EXT4_DIRENT_MINOR_HASH(de)) {
|
EXT4_DIRENT_MINOR_HASH(de)) {
|
||||||
|
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return !ext4_ci_compare(parent, &cf, de->name,
|
return !ext4_ci_compare(parent, &cf, de->name,
|
||||||
|
@@ -279,14 +279,14 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
|
|||||||
io_end->inode = inode;
|
io_end->inode = inode;
|
||||||
INIT_LIST_HEAD(&io_end->list);
|
INIT_LIST_HEAD(&io_end->list);
|
||||||
INIT_LIST_HEAD(&io_end->list_vec);
|
INIT_LIST_HEAD(&io_end->list_vec);
|
||||||
atomic_set(&io_end->count, 1);
|
refcount_set(&io_end->count, 1);
|
||||||
}
|
}
|
||||||
return io_end;
|
return io_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
|
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&io_end->count)) {
|
if (refcount_dec_and_test(&io_end->count)) {
|
||||||
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
|
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
|
||||||
list_empty(&io_end->list_vec)) {
|
list_empty(&io_end->list_vec)) {
|
||||||
ext4_release_io_end(io_end);
|
ext4_release_io_end(io_end);
|
||||||
@@ -300,7 +300,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end)
|
|||||||
{
|
{
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&io_end->count)) {
|
if (refcount_dec_and_test(&io_end->count)) {
|
||||||
if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
|
if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
|
||||||
err = ext4_convert_unwritten_io_end_vec(io_end->handle,
|
err = ext4_convert_unwritten_io_end_vec(io_end->handle,
|
||||||
io_end);
|
io_end);
|
||||||
@@ -314,7 +314,7 @@ int ext4_put_io_end(ext4_io_end_t *io_end)
|
|||||||
|
|
||||||
ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
|
ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
|
||||||
{
|
{
|
||||||
atomic_inc(&io_end->count);
|
refcount_inc(&io_end->count);
|
||||||
return io_end;
|
return io_end;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -3270,9 +3270,9 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
|
|||||||
struct super_block *sb = elr->lr_super;
|
struct super_block *sb = elr->lr_super;
|
||||||
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
|
ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
|
||||||
ext4_group_t group = elr->lr_next_group;
|
ext4_group_t group = elr->lr_next_group;
|
||||||
unsigned long timeout = 0;
|
|
||||||
unsigned int prefetch_ios = 0;
|
unsigned int prefetch_ios = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
u64 start_time;
|
||||||
|
|
||||||
if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
|
if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) {
|
||||||
elr->lr_next_group = ext4_mb_prefetch(sb, group,
|
elr->lr_next_group = ext4_mb_prefetch(sb, group,
|
||||||
@@ -3309,14 +3309,13 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
|
|||||||
ret = 1;
|
ret = 1;
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
timeout = jiffies;
|
start_time = ktime_get_real_ns();
|
||||||
ret = ext4_init_inode_table(sb, group,
|
ret = ext4_init_inode_table(sb, group,
|
||||||
elr->lr_timeout ? 0 : 1);
|
elr->lr_timeout ? 0 : 1);
|
||||||
trace_ext4_lazy_itable_init(sb, group);
|
trace_ext4_lazy_itable_init(sb, group);
|
||||||
if (elr->lr_timeout == 0) {
|
if (elr->lr_timeout == 0) {
|
||||||
timeout = (jiffies - timeout) *
|
elr->lr_timeout = nsecs_to_jiffies((ktime_get_real_ns() - start_time) *
|
||||||
EXT4_SB(elr->lr_super)->s_li_wait_mult;
|
EXT4_SB(elr->lr_super)->s_li_wait_mult);
|
||||||
elr->lr_timeout = timeout;
|
|
||||||
}
|
}
|
||||||
elr->lr_next_sched = jiffies + elr->lr_timeout;
|
elr->lr_next_sched = jiffies + elr->lr_timeout;
|
||||||
elr->lr_next_group = group + 1;
|
elr->lr_next_group = group + 1;
|
||||||
@@ -5734,10 +5733,10 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
|||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
unsigned long old_sb_flags, vfs_flags;
|
unsigned long old_sb_flags, vfs_flags;
|
||||||
struct ext4_mount_options old_opts;
|
struct ext4_mount_options old_opts;
|
||||||
int enable_quota = 0;
|
|
||||||
ext4_group_t g;
|
ext4_group_t g;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
#ifdef CONFIG_QUOTA
|
#ifdef CONFIG_QUOTA
|
||||||
|
int enable_quota = 0;
|
||||||
int i, j;
|
int i, j;
|
||||||
char *to_free[EXT4_MAXQUOTAS];
|
char *to_free[EXT4_MAXQUOTAS];
|
||||||
#endif
|
#endif
|
||||||
@@ -5828,7 +5827,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
|
if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
|
||||||
ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
|
ext4_abort(sb, ESHUTDOWN, "Abort forced by user");
|
||||||
|
|
||||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
|
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
|
||||||
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
|
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
|
||||||
@@ -5942,7 +5941,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
|||||||
err = -EROFS;
|
err = -EROFS;
|
||||||
goto restore_opts;
|
goto restore_opts;
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_QUOTA
|
||||||
enable_quota = 1;
|
enable_quota = 1;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user