btrfs: qgroup: Refactor btrfs_qgroup_insert_dirty_extent()

Refactor btrfs_qgroup_insert_dirty_extent() function, to two functions:
1. btrfs_qgroup_insert_dirty_extent_nolock()
   Almost the same with original code.
   For delayed_ref usage, which has delayed refs locked.

   Change the return value type to int, since caller never needs the
   pointer, but only needs to know if they need to free the allocated
   memory.

2. btrfs_qgroup_insert_dirty_extent()
   The more encapsulated version.

   Will do the delayed_refs lock, memory allocation, quota enabled check
   and other things.

The original design is to keep exported functions to minimal, but since
more btrfs hacks exposed, like replacing path in balance, we need to
record dirty extents manually, so we have to add such functions.

Also, add comment for both functions, to info developers how to keep
qgroup correct when doing hacks.

Cc: Mark Fasheh <mfasheh@suse.de>
Signed-off-by: Qu Wenruo <quwenruo@cn.fujitsu.com>
Reviewed-and-Tested-by: Goldwyn Rodrigues <rgoldwyn@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
Qu Wenruo 2016-08-15 10:36:50 +08:00 committed by Chris Mason
parent d06f23d6a9
commit cb93b52cc0
4 changed files with 71 additions and 47 deletions

View File

@ -541,7 +541,6 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_head *existing;
struct btrfs_delayed_ref_head *head_ref = NULL; struct btrfs_delayed_ref_head *head_ref = NULL;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *qexisting;
int count_mod = 1; int count_mod = 1;
int must_insert_reserved = 0; int must_insert_reserved = 0;
@ -606,10 +605,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->num_bytes = num_bytes; qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL; qrecord->old_roots = NULL;
qexisting = btrfs_qgroup_insert_dirty_extent(fs_info, if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
delayed_refs, delayed_refs, qrecord))
qrecord);
if (qexisting)
kfree(qrecord); kfree(qrecord);
} }

View File

@ -8521,35 +8521,6 @@ reada:
wc->reada_slot = slot; wc->reada_slot = slot;
} }
/*
* These may not be seen by the usual inc/dec ref code so we have to
* add them here.
*/
static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
u64 num_bytes)
{
struct btrfs_qgroup_extent_record *qrecord;
struct btrfs_delayed_ref_root *delayed_refs;
qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
if (!qrecord)
return -ENOMEM;
qrecord->bytenr = bytenr;
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
delayed_refs, qrecord))
kfree(qrecord);
spin_unlock(&delayed_refs->lock);
return 0;
}
static int account_leaf_items(struct btrfs_trans_handle *trans, static int account_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *eb) struct extent_buffer *eb)
@ -8583,7 +8554,8 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
ret = record_one_subtree_extent(trans, root, bytenr, num_bytes); ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
bytenr, num_bytes, GFP_NOFS);
if (ret) if (ret)
return ret; return ret;
} }
@ -8732,8 +8704,9 @@ walk_down:
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING; path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
ret = record_one_subtree_extent(trans, root, child_bytenr, ret = btrfs_qgroup_insert_dirty_extent(trans,
root->nodesize); root->fs_info, child_bytenr,
root->nodesize, GFP_NOFS);
if (ret) if (ret)
goto out; goto out;
} }

View File

@ -1453,10 +1453,9 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
return ret; return ret;
} }
struct btrfs_qgroup_extent_record * int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_delayed_ref_root *delayed_refs, struct btrfs_qgroup_extent_record *record)
struct btrfs_qgroup_extent_record *record)
{ {
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL; struct rb_node *parent_node = NULL;
@ -1475,12 +1474,42 @@ btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
else if (bytenr > entry->bytenr) else if (bytenr > entry->bytenr)
p = &(*p)->rb_right; p = &(*p)->rb_right;
else else
return entry; return 1;
} }
rb_link_node(&record->node, parent_node, p); rb_link_node(&record->node, parent_node, p);
rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
return NULL; return 0;
}
int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag)
{
struct btrfs_qgroup_extent_record *record;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
if (!fs_info->quota_enabled || bytenr == 0 || num_bytes == 0)
return 0;
if (WARN_ON(trans == NULL))
return -EINVAL;
record = kmalloc(sizeof(*record), gfp_flag);
if (!record)
return -ENOMEM;
delayed_refs = &trans->transaction->delayed_refs;
record->bytenr = bytenr;
record->num_bytes = num_bytes;
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
return 0;
} }
#define UPDATE_NEW 0 #define UPDATE_NEW 0

View File

@ -64,10 +64,35 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op; struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans, int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
struct btrfs_qgroup_extent_record * /*
btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info, * Insert one dirty extent record into @delayed_refs, informing qgroup to
struct btrfs_delayed_ref_root *delayed_refs, * account that extent at commit trans time.
struct btrfs_qgroup_extent_record *record); *
* No lock version, caller must acquire delayed ref lock and allocate memory.
*
* Return 0 for success insert
* Return >0 for existing record, caller can free @record safely.
* Error is not possible
*/
int btrfs_qgroup_insert_dirty_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
/*
* Insert one dirty extent record into @delayed_refs, informing qgroup to
* account that extent at commit trans time.
*
* Better encapsulated version.
*
* Return 0 if the operation is done.
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag);
int int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct btrfs_fs_info *fs_info,