|
|
|
@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Calculate the number of metadata blocks need to reserve
|
|
|
|
|
* to allocate a block located at @lblock
|
|
|
|
|
*/
|
|
|
|
|
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
|
|
|
|
|
{
|
|
|
|
|
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
|
|
|
|
return ext4_ext_calc_metadata_amount(inode, lblock);
|
|
|
|
|
|
|
|
|
|
return ext4_ind_calc_metadata_amount(inode, lblock);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Called with i_data_sem down, which is important since we can call
|
|
|
|
|
* ext4_discard_preallocations() from here.
|
|
|
|
@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
|
|
|
|
|
used = ei->i_reserved_data_blocks;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
|
|
|
|
|
ext4_warning(inode->i_sb, "ino %lu, allocated %d "
|
|
|
|
|
"with only %d reserved metadata blocks "
|
|
|
|
|
"(releasing %d blocks with reserved %d data blocks)",
|
|
|
|
|
inode->i_ino, ei->i_allocated_meta_blocks,
|
|
|
|
|
ei->i_reserved_meta_blocks, used,
|
|
|
|
|
ei->i_reserved_data_blocks);
|
|
|
|
|
WARN_ON(1);
|
|
|
|
|
ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Update per-inode reservations */
|
|
|
|
|
ei->i_reserved_data_blocks -= used;
|
|
|
|
|
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
|
|
|
|
|
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
|
|
|
|
|
used + ei->i_allocated_meta_blocks);
|
|
|
|
|
ei->i_allocated_meta_blocks = 0;
|
|
|
|
|
percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
|
|
|
|
|
|
|
|
|
|
if (ei->i_reserved_data_blocks == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* We can release all of the reserved metadata blocks
|
|
|
|
|
* only when we have written all of the delayed
|
|
|
|
|
* allocation blocks.
|
|
|
|
|
*/
|
|
|
|
|
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
|
|
|
|
|
ei->i_reserved_meta_blocks);
|
|
|
|
|
ei->i_reserved_meta_blocks = 0;
|
|
|
|
|
ei->i_da_metadata_calc_len = 0;
|
|
|
|
|
}
|
|
|
|
|
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
|
|
|
|
|
|
|
|
|
/* Update quota subsystem for data blocks */
|
|
|
|
@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file,
|
|
|
|
|
return ret ? ret : copied;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Reserve a metadata for a single block located at lblock
|
|
|
|
|
*/
|
|
|
|
|
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
|
|
|
|
|
{
|
|
|
|
|
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
|
|
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
|
|
unsigned int md_needed;
|
|
|
|
|
ext4_lblk_t save_last_lblock;
|
|
|
|
|
int save_len;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* recalculate the amount of metadata blocks to reserve
|
|
|
|
|
* in order to allocate nrblocks
|
|
|
|
|
* worse case is one extent per block
|
|
|
|
|
*/
|
|
|
|
|
spin_lock(&ei->i_block_reservation_lock);
|
|
|
|
|
/*
|
|
|
|
|
* ext4_calc_metadata_amount() has side effects, which we have
|
|
|
|
|
* to be prepared undo if we fail to claim space.
|
|
|
|
|
*/
|
|
|
|
|
save_len = ei->i_da_metadata_calc_len;
|
|
|
|
|
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
|
|
|
|
|
md_needed = EXT4_NUM_B2C(sbi,
|
|
|
|
|
ext4_calc_metadata_amount(inode, lblock));
|
|
|
|
|
trace_ext4_da_reserve_space(inode, md_needed);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We do still charge estimated metadata to the sb though;
|
|
|
|
|
* we cannot afford to run out of free blocks.
|
|
|
|
|
*/
|
|
|
|
|
if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
|
|
|
|
|
ei->i_da_metadata_calc_len = save_len;
|
|
|
|
|
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
|
|
|
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
}
|
|
|
|
|
ei->i_reserved_meta_blocks += md_needed;
|
|
|
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
|
|
|
|
|
|
|
|
return 0; /* success */
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Reserve a single cluster located at lblock
|
|
|
|
|
*/
|
|
|
|
@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
|
|
|
|
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
|
|
|
|
unsigned int md_needed;
|
|
|
|
|
int ret;
|
|
|
|
|
ext4_lblk_t save_last_lblock;
|
|
|
|
|
int save_len;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We will charge metadata quota at writeout time; this saves
|
|
|
|
@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
|
|
|
|
|
* ext4_calc_metadata_amount() has side effects, which we have
|
|
|
|
|
* to be prepared undo if we fail to claim space.
|
|
|
|
|
*/
|
|
|
|
|
save_len = ei->i_da_metadata_calc_len;
|
|
|
|
|
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
|
|
|
|
|
md_needed = EXT4_NUM_B2C(sbi,
|
|
|
|
|
ext4_calc_metadata_amount(inode, lblock));
|
|
|
|
|
trace_ext4_da_reserve_space(inode, md_needed);
|
|
|
|
|
md_needed = 0;
|
|
|
|
|
trace_ext4_da_reserve_space(inode, 0);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* We do still charge estimated metadata to the sb though;
|
|
|
|
|
* we cannot afford to run out of free blocks.
|
|
|
|
|
*/
|
|
|
|
|
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
|
|
|
|
|
ei->i_da_metadata_calc_len = save_len;
|
|
|
|
|
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
|
|
|
|
|
if (ext4_claim_free_clusters(sbi, 1, 0)) {
|
|
|
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
|
|
|
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
|
|
|
|
|
return -ENOSPC;
|
|
|
|
|
}
|
|
|
|
|
ei->i_reserved_data_blocks++;
|
|
|
|
|
ei->i_reserved_meta_blocks += md_needed;
|
|
|
|
|
spin_unlock(&ei->i_block_reservation_lock);
|
|
|
|
|
|
|
|
|
|
return 0; /* success */
|
|
|
|
@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
|
|
|
|
|
}
|
|
|
|
|
ei->i_reserved_data_blocks -= to_free;
|
|
|
|
|
|
|
|
|
|
if (ei->i_reserved_data_blocks == 0) {
|
|
|
|
|
/*
|
|
|
|
|
* We can release all of the reserved metadata blocks
|
|
|
|
|
* only when we have written all of the delayed
|
|
|
|
|
* allocation blocks.
|
|
|
|
|
* Note that in case of bigalloc, i_reserved_meta_blocks,
|
|
|
|
|
* i_reserved_data_blocks, etc. refer to number of clusters.
|
|
|
|
|
*/
|
|
|
|
|
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
|
|
|
|
|
ei->i_reserved_meta_blocks);
|
|
|
|
|
ei->i_reserved_meta_blocks = 0;
|
|
|
|
|
ei->i_da_metadata_calc_len = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* update fs dirty data blocks counter */
|
|
|
|
|
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
|
|
|
|
|
|
|
|
|
@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
|
|
|
|
|
ext4_msg(sb, KERN_CRIT, "Block reservation details");
|
|
|
|
|
ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
|
|
|
|
|
ei->i_reserved_data_blocks);
|
|
|
|
|
ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
|
|
|
|
|
ei->i_reserved_meta_blocks);
|
|
|
|
|
ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
|
|
|
|
|
ei->i_allocated_meta_blocks);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -1620,13 +1510,6 @@ add_delayed:
|
|
|
|
|
retval = ret;
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
ret = ext4_da_reserve_metadata(inode, iblock);
|
|
|
|
|
if (ret) {
|
|
|
|
|
/* not enough space to reserve */
|
|
|
|
|
retval = ret;
|
|
|
|
|
goto out_unlock;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
|
|
|
|
@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
|
|
|
|
|
{
|
|
|
|
|
trace_ext4_alloc_da_blocks(inode);
|
|
|
|
|
|
|
|
|
|
if (!EXT4_I(inode)->i_reserved_data_blocks &&
|
|
|
|
|
!EXT4_I(inode)->i_reserved_meta_blocks)
|
|
|
|
|
if (!EXT4_I(inode)->i_reserved_data_blocks)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|